Skip to content

Commit

Permalink
New implementation of wham normalization
Browse files Browse the repository at this point in the history
This is more stable and should work in all cases, making the
normalize option actually useless
  • Loading branch information
GiovanniBussi committed Sep 24, 2023
1 parent 866ed47 commit ac2af24
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 21 deletions.
44 changes: 25 additions & 19 deletions bussilab/wham.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"""
import sys
from typing import Optional, cast
from typing import Optional, Union, cast
import numpy as np
from . import coretools

Expand Down Expand Up @@ -40,7 +40,7 @@ def wham(bias,
verbose: bool = False,
logZ: Optional[np.ndarray] = None,
logW: Optional[np.ndarray] = None,
normalize: bool = True,
normalize: Union[bool, str] = "log",
method: str = "minimize",
minimize_opt: Optional[dict] = None):
"""Compute weights according to binless WHAM.
Expand All @@ -54,13 +54,9 @@ def wham(bias,
It is crucial however to compute the potential according to each of the employed
Hamiltonian on **all the frames**, not only on those simulated using that Hamiltonian.
Notice that, unless one passes `normalize=False`, the returned weights are normalized.
In most practical cases, this means that one should decide a priori on which Hamiltonian
weights should be computed and subtract it first. E.g., to obtain the weights corresponding
to the first Hamiltonian, one should likely replace `bias` with `bias-bias[:,0][:,np.newaxis]`.
Not doing so, unless bias fluctuations are very small, will result in numerical issues.
If using `normalize=False`, weights are **not** normalized and biases are pre-shifted so as
to decrease numerical issues.
Notice that by default weights are normalized. It is possible to override this behavior with
normalize=False. However, starting with v0.0.40, this should not be necessary. The new
implementation of normalization should be numerically stable in all cases.
Bugs
----
Expand Down Expand Up @@ -190,9 +186,9 @@ def wham(bias,
the bias. Providing an initial guess that is close to the converged value can speed up
significantly the convergence. *If logW is provided, logZ is ignored*.
normalize: bool, optional
If False, do not normalize resulting weights. Useful when biases fluctuate a lot and one
does not want to choose first on which of the Hamiltonians they should be normalized.
normalize: bool or str, optional
By default, "log", which properly normalizes weights in all cases.
normalize=True or False is enabled for backward compatibility.
method: str, optional
If "substitute", solve self-consistent equations by substitution.
Expand Down Expand Up @@ -299,14 +295,24 @@ def func(x):
else:
raise ValueError("method should be 'minimize' or 'substitute'")

if normalize:
weight *= np.exp((shifts1-np.max(shifts1)))
# normalized weights
weight /= np.sum(weight)
with np.errstate(divide = 'ignore'):
logW = np.log(weight)
# this is the traditional implementation, either normalized or not
if isinstance(normalize,bool):
if normalize:
weight *= np.exp((shifts1-np.max(shifts1)))
# normalized weights
weight /= np.sum(weight)
with np.errstate(divide = 'ignore'):
logW = np.log(weight)
else:
logW = np.log(weight) + shifts1
# this is the new default, which allows normalizing also non-normalizable weights
else:
logW = np.log(weight) + shifts1
if normalize == "log":
logW = np.log(weight) + shifts1
logW -= np.max(logW)
logW -= np.log(np.sum(np.exp(logW)))
else:
raise ValueError("normalize should be True, False, or 'log'")

if verbose:
sys.stderr.write("WHAM: end")
Expand Down
3 changes: 1 addition & 2 deletions tests/test_wham.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,8 @@ def test_wham4s(self):
def test_wham5(self):
import numpy as np
# check if the algorithm is numerically robust if one frame has weight much smaller than another
# requires weights to be non normalized
for diffW in (0.0,1e1,1e2,1e3,1e4,1e5,1e6):
w=wham(2.0*np.array([[0.0,0.0],[diffW,diffW]]),T=2.0, normalize=False)
w=wham(2.0*np.array([[0.0,0.0],[diffW,diffW]]),T=2.0)
self.assertAlmostEqual(w.logW[1]-w.logW[0],diffW)
self.assertAlmostEqual(w.logZ[1]-w.logZ[0],0.0)

Expand Down

0 comments on commit ac2af24

Please sign in to comment.