Skip to content

Commit

Permalink
Remove local_sampler_tuning method
Browse files Browse the repository at this point in the history
  • Loading branch information
kazewong committed Apr 5, 2024
1 parent ec39499 commit 8bbc9c4
Showing 1 changed file with 0 additions and 32 deletions.
32 changes: 0 additions & 32 deletions src/flowMC/Sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,38 +198,6 @@ def sample(self, initial_position: Float[Array, "n_chains n_dim"], data: dict):
)
self.summary[strategy.__name__] = summary

def local_sampler_tuning(
self,
initial_position: Float[Array, "n_chain n_dim"],
data: dict,
max_iter: int = 100,
):
"""
Tuning the local sampler. This runs a number of iterations of the local sampler,
and then uses the acceptance rate to adjust the local sampler parameters.
Since this is mostly for a fast adaptation, we do not carry the sample state forward.
Instead, we only adapt the sampler parameters using the initial position.
Args:
n_steps (int): Number of steps to run the local sampler.
initial_position (Device Array): Initial position for the local sampler.
max_iter (int): Number of iterations to run the local sampler.
"""
if self.local_autotune:
print("Tuning local sampler")
# kernel_vmap = self.local_sampler.kernel_vmap
# self.local_sampler.params = self.local_autotune(
# kernel_vmap,
# self.rng_keys_mcmc,
# initial_position,
# self.likelihood_vec(initial_position),
# data,
# self.local_sampler.params,
# max_iter,
# )
else:
print("No autotune found, use input sampler_params")

def get_sampler_state(self, training: bool = False) -> dict:
"""
Get the sampler state. There are two sets of sampler outputs one can get,
Expand Down

0 comments on commit 8bbc9c4

Please sign in to comment.