Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Polychord 1.20.0 #232

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions cobaya/samplers/polychord/polychord.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class polychord(Sampler):
"""
# Name of the PolyChord repo and version to download
_pc_repo_name = "PolyChord/PolyChordLite"
_pc_repo_version = "1.18.2"
_pc_repo_version = "1.20.0"
_base_dir_suffix = "polychord_raw"
_clusters_dir = "clusters"
_at_resume_prefer_old = Sampler._at_resume_prefer_old + ["blocking"]
Expand Down Expand Up @@ -82,7 +82,7 @@ def initialize(self):
if getattr(self, p) is not None:
setattr(self, p, NumberWithUnits(
getattr(self, p), "d", scale=self.nDims, dtype=int).value)
self._quants_nlive_units = ["nprior"]
self._quants_nlive_units = ["nprior", "nfail"]
for p in self._quants_nlive_units:
if getattr(self, p) is not None:
setattr(self, p, NumberWithUnits(
Expand Down Expand Up @@ -136,13 +136,13 @@ def initialize(self):
int(o * read_dnumber(self.num_repeats, dim_block))
for o, dim_block in zip(oversampling_factors, self.grade_dims)]
# Assign settings
pc_args = ["nlive", "num_repeats", "nprior", "do_clustering",
"precision_criterion", "max_ndead", "boost_posterior", "feedback",
"logzero", "posteriors", "equals", "compression_factor",
"cluster_posteriors", "write_resume", "read_resume", "write_stats",
"write_live", "write_dead", "base_dir", "grade_frac", "grade_dims",
"feedback", "read_resume", "base_dir", "file_root", "grade_frac",
"grade_dims"]
pc_args = ["nlive", "num_repeats", "nprior", "nfail", "do_clustering",
"feedback", "precision_criterion", "logzero",
"max_ndead", "boost_posterior", "posteriors", "equals",
"cluster_posteriors", "write_resume", "read_resume",
"write_stats", "write_live", "write_dead", "write_prior",
"maximise", "compression_factor", "synchronous", "base_dir",
"file_root", "seed", "grade_dims", "grade_frac", "nlives"]
# As stated above, num_repeats is ignored, so let's not pass it
pc_args.pop(pc_args.index("num_repeats"))
settings: Any = load_module('pypolychord.settings', path=self._poly_build_path,
Expand Down
19 changes: 18 additions & 1 deletion cobaya/samplers/polychord/polychord.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ num_repeats: 2d
# Number of prior samples drawn before starting compression
# Can be in units of nlive (but not dimension) as Xnlive
nprior: 10nlive
# Number of failed spawns before stopping nested sampling.
nfail : nlive
# Whether to check for and explore multi-modality on the posterior
do_clustering: True
# Stopping criterion: fraction of the total evidence contained in the live points
Expand All @@ -35,6 +37,20 @@ logzero: -1e30
boost_posterior: 0 # increase up to `num_repeats`
# Verbosity during the sampling process. Set to one of [0,1,2,3]
feedback: # default: Same as global `verbosity`
# Parallelise with synchronous workers, rather than asynchronous ones.
# This can be set to False if the likelihood speed is known to be
# approximately constant across the parameter space. Synchronous
# parallelisation is less effective than asynchronous by a factor ~O(1)
# for large parallelisation.
synchronous : True
# Variable number of live points option. This dictionary is a mapping
# between loglike contours and nlive.
# You should still set nlive to be a sensible number, as this indicates
# how often to update the clustering, and to define the default value.
nlives : {}
# Perform maximisation at the end of the run to find the maximum
# likelihood point and value
maximise : False
# Exploiting speed hierarchy
# --------------------------
# whether to measure actual speeds for your machine/threading at starting rather
Expand All @@ -57,7 +73,7 @@ blocking:
confidence_for_unbounded: 0.9999995 # 5 sigmas of the prior
# Seeding runs
# ------------
seed: # postitive integer
seed: # positive integer
# Raw output of PolyChord (no need to change them, normally)
# ----------------------------------------------------------
file_root:
Expand All @@ -69,3 +85,4 @@ read_resume: True
write_stats: True
write_live: True
write_dead: True
write_prior: True