From 192ce399c644d72f2382cd1c4654981a88993ff7 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:21:46 +0000 Subject: [PATCH 01/15] MAINT: remove deprecated sampler plugins --- bilby/core/sampler/cpnest.py | 260 ------ bilby/core/sampler/dnest4.py | 259 ------ bilby/core/sampler/kombine.py | 239 ----- bilby/core/sampler/nessai.py | 343 ------- bilby/core/sampler/nestle.py | 132 --- bilby/core/sampler/polychord.py | 173 ---- bilby/core/sampler/proposal.py | 358 ------- bilby/core/sampler/ptemcee.py | 1436 ----------------------------- bilby/core/sampler/ptmcmc.py | 256 ----- bilby/core/sampler/pymc.py | 1013 -------------------- bilby/core/sampler/pymultinest.py | 201 ---- bilby/core/sampler/ultranest.py | 304 ------ bilby/core/sampler/zeus.py | 195 ---- 13 files changed, 5169 deletions(-) delete mode 100644 bilby/core/sampler/cpnest.py delete mode 100644 bilby/core/sampler/dnest4.py delete mode 100644 bilby/core/sampler/kombine.py delete mode 100644 bilby/core/sampler/nessai.py delete mode 100644 bilby/core/sampler/nestle.py delete mode 100644 bilby/core/sampler/polychord.py delete mode 100644 bilby/core/sampler/proposal.py delete mode 100644 bilby/core/sampler/ptemcee.py delete mode 100644 bilby/core/sampler/ptmcmc.py delete mode 100644 bilby/core/sampler/pymc.py delete mode 100644 bilby/core/sampler/pymultinest.py delete mode 100644 bilby/core/sampler/ultranest.py delete mode 100644 bilby/core/sampler/zeus.py diff --git a/bilby/core/sampler/cpnest.py b/bilby/core/sampler/cpnest.py deleted file mode 100644 index 187333101..000000000 --- a/bilby/core/sampler/cpnest.py +++ /dev/null @@ -1,260 +0,0 @@ -import array -import copy -import sys -import warnings - -import numpy as np -from numpy.lib.recfunctions import structured_to_unstructured -from pandas import DataFrame - -from ..utils import check_directory_exists_and_if_not_mkdir, logger -from .base_sampler import NestedSampler, signal_wrapper -from .proposal import JumpProposalCycle, Sample - - -class Cpnest(NestedSampler): - """bilby wrapper of cpnest (https://github.com/johnveitch/cpnest) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `cpnest.CPNest`, see documentation - for that class for further help. Under Other Parameters, we list commonly - used kwargs and the bilby defaults. - - Parameters - ========== - nlive: int - The number of live points, note this can also equivalently be given as - one of [npoints, nlives, n_live_points] - seed: int (1234) - Initialised random seed - nthreads: int, (1) - Number of threads to use - maxmcmc: int (1000) - The maximum number of MCMC steps to take - verbose: Bool (True) - If true, print information information about the convergence during - resume: Bool (True) - Whether or not to resume from a previous run - output: str - Where to write the CPNest, by default this is - {self.outdir}/cpnest_{self.label}/ - - """ - - msg = ( - "The CPNest sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `cpnest-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/cpnest-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "cpnest" - default_kwargs = dict( - verbose=3, - nthreads=1, - nlive=500, - maxmcmc=1000, - seed=None, - poolsize=100, - nhamiltonian=0, - resume=True, - output=None, - proposals=None, - n_periodic_checkpoint=8000, - ) - hard_exit = True - sampling_seed_key = "seed" - - def _translate_kwargs(self, kwargs): - kwargs = super()._translate_kwargs(kwargs) - if "nlive" not in kwargs: - for equiv in self.npoints_equiv_kwargs: - if equiv in kwargs: - kwargs["nlive"] = kwargs.pop(equiv) - if "nthreads" not in kwargs: - for equiv in self.npool_equiv_kwargs: - if equiv in kwargs: - kwargs["nthreads"] = kwargs.pop(equiv) - - if "seed" not in kwargs: - logger.warning("No seed provided, cpnest will use 1234.") - - @signal_wrapper - def run_sampler(self): - from cpnest import CPNest - from cpnest import model as cpmodel - from cpnest.nest2pos import compute_weights - from cpnest.parameter import LivePoint - - class Model(cpmodel.Model): - """A wrapper class to pass our log_likelihood into cpnest""" - - def __init__(self, names, priors): - self.names = names - self.priors = priors - self._update_bounds() - - @staticmethod - def log_likelihood(x, **kwargs): - theta = [x[n] for n in self.search_parameter_keys] - return self.log_likelihood(theta) - - @staticmethod - def log_prior(x, **kwargs): - theta = [x[n] for n in self.search_parameter_keys] - return self.log_prior(theta) - - def _update_bounds(self): - self.bounds = [ - [self.priors[key].minimum, self.priors[key].maximum] - for key in self.names - ] - - def new_point(self): - """Draw a point from the prior""" - prior_samples = self.priors.sample() - self._update_bounds() - point = LivePoint( - self.names, - array.array("d", [prior_samples[name] for name in self.names]), - ) - return point - - self._resolve_proposal_functions() - model = Model(self.search_parameter_keys, self.priors) - out = None - remove_kwargs = ["proposals", "n_periodic_checkpoint"] - while out is None: - try: - out = CPNest(model, **self.kwargs) - except TypeError as e: - if len(remove_kwargs) > 0: - kwarg = remove_kwargs.pop(0) - else: - raise TypeError("Unable to initialise cpnest sampler") - logger.info(f"CPNest init. failed with error {e}, please update") - logger.info(f"Attempting to rerun with kwarg {kwarg} removed") - self.kwargs.pop(kwarg) - try: - out.run() - except SystemExit: - out.checkpoint() - self.write_current_state_and_exit() - - if self.plot: - out.plot() - - self.calc_likelihood_count() - self.result.samples = structured_to_unstructured( - out.posterior_samples[self.search_parameter_keys] - ) - self.result.log_likelihood_evaluations = out.posterior_samples["logL"] - self.result.nested_samples = DataFrame(out.get_nested_samples(filename="")) - self.result.nested_samples.rename( - columns=dict(logL="log_likelihood"), inplace=True - ) - _, log_weights = compute_weights( - np.array(self.result.nested_samples.log_likelihood), - np.array(out.NS.state.nlive), - ) - self.result.nested_samples["weights"] = np.exp(log_weights) - self.result.log_evidence = out.NS.state.logZ - self.result.log_evidence_err = np.sqrt(out.NS.state.info / out.NS.state.nlive) - self.result.information_gain = out.NS.state.info - return self.result - - def write_current_state_and_exit(self, signum=None, frame=None): - """ - Overwrites the base class to make sure that :code:`CPNest` terminates - properly as :code:`CPNest` handles all the multiprocessing internally. - """ - self._log_interruption(signum=signum) - sys.exit(self.exit_code) - - def _verify_kwargs_against_default_kwargs(self): - """ - Set the directory where the output will be written - and check resume and checkpoint status. - """ - if not self.kwargs["output"]: - self.kwargs["output"] = f"{self.outdir}/cpnest_{self.label}/" - if self.kwargs["output"].endswith("/") is False: - self.kwargs["output"] = f"{self.kwargs['output']}/" - check_directory_exists_and_if_not_mkdir(self.kwargs["output"]) - if self.kwargs["n_periodic_checkpoint"] and not self.kwargs["resume"]: - self.kwargs["n_periodic_checkpoint"] = None - NestedSampler._verify_kwargs_against_default_kwargs(self) - - def _resolve_proposal_functions(self): - from cpnest.proposal import ProposalCycle - - if "proposals" in self.kwargs: - if self.kwargs["proposals"] is None: - return - if isinstance(self.kwargs["proposals"], JumpProposalCycle): - self.kwargs["proposals"] = dict( - mhs=self.kwargs["proposals"], hmc=self.kwargs["proposals"] - ) - for key, proposal in self.kwargs["proposals"].items(): - if isinstance(proposal, JumpProposalCycle): - self.kwargs["proposals"][key] = cpnest_proposal_cycle_factory( - proposal - ) - elif isinstance(proposal, ProposalCycle): - pass - else: - raise TypeError("Unknown proposal type") - - -def cpnest_proposal_factory(jump_proposal): - import cpnest.proposal - - class CPNestEnsembleProposal(cpnest.proposal.EnsembleProposal): - def __init__(self, jp): - self.jump_proposal = jp - self.ensemble = None - - def __call__(self, sample, **kwargs): - return self.get_sample(sample, **kwargs) - - def get_sample(self, cpnest_sample, **kwargs): - sample = Sample.from_cpnest_live_point(cpnest_sample) - self.ensemble = kwargs.get("coordinates", self.ensemble) - sample = self.jump_proposal(sample=sample, sampler_name="cpnest", **kwargs) - self.log_J = self.jump_proposal.log_j - return self._update_cpnest_sample(cpnest_sample, sample) - - @staticmethod - def _update_cpnest_sample(cpnest_sample, sample): - cpnest_sample.names = list(sample.keys()) - for i, value in enumerate(sample.values()): - cpnest_sample.values[i] = value - return cpnest_sample - - return CPNestEnsembleProposal(jump_proposal) - - -def cpnest_proposal_cycle_factory(jump_proposals): - import cpnest.proposal - - class CPNestProposalCycle(cpnest.proposal.ProposalCycle): - def __init__(self): - self.jump_proposals = copy.deepcopy(jump_proposals) - for i, prop in enumerate(self.jump_proposals.proposal_functions): - self.jump_proposals.proposal_functions[i] = cpnest_proposal_factory( - prop - ) - self.jump_proposals.update_cycle() - super(CPNestProposalCycle, self).__init__( - proposals=self.jump_proposals.proposal_functions, - weights=self.jump_proposals.weights, - cyclelength=self.jump_proposals.cycle_length, - ) - - def get_sample(self, old, **kwargs): - return self.jump_proposals(sample=old, coordinates=self.ensemble, **kwargs) - - def set_ensemble(self, ensemble): - self.ensemble = ensemble - - return CPNestProposalCycle diff --git a/bilby/core/sampler/dnest4.py b/bilby/core/sampler/dnest4.py deleted file mode 100644 index 616b8f32a..000000000 --- a/bilby/core/sampler/dnest4.py +++ /dev/null @@ -1,259 +0,0 @@ -import datetime -import time -import warnings - -import numpy as np - -from ..utils import logger -from .base_sampler import NestedSampler, _TemporaryFileSamplerMixin, signal_wrapper - - -class _DNest4Model(object): - def __init__( - self, log_likelihood_func, from_prior_func, widths, centers, highs, lows - ): - """Initialize the DNest4 model. - Args: - log_likelihood_func: function - The loglikelihood function to use during the Nested Sampling run. - from_prior_func: function - The function to use when randomly selecting parameter vectors from the prior space. - widths: array_like - The approximate widths of the prior distrbutions. - centers: array_like - The approximate center points of the prior distributions. - """ - self._log_likelihood = log_likelihood_func - self._from_prior = from_prior_func - self._widths = widths - self._centers = centers - self._highs = highs - self._lows = lows - self._n_dim = len(widths) - return - - def log_likelihood(self, coords): - """The model's log_likelihood function""" - return self._log_likelihood(coords) - - def from_prior(self): - """The model's function to select random points from the prior space.""" - return self._from_prior() - - def perturb(self, coords): - """The perturb function to perform Monte Carlo trial moves.""" - from ..utils import random - - idx = random.rng.integers(self._n_dim) - - coords[idx] += self._widths[idx] * (random.rng.uniform(size=1) - 0.5) - cw = self._widths[idx] - cc = self._centers[idx] - - coords[idx] = self.wrap(coords[idx], (cc - 0.5 * cw), cc + 0.5 * cw) - - return 0.0 - - @staticmethod - def wrap(x, minimum, maximum): - if maximum <= minimum: - raise ValueError( - f"maximum {maximum} <= minimum {minimum}, when trying to wrap coordinates" - ) - return (x - minimum) % (maximum - minimum) + minimum - - -class DNest4(_TemporaryFileSamplerMixin, NestedSampler): - """ - Bilby wrapper of DNest4 - - Parameters - ========== - TBD - - Other Parameters - ------========== - num_particles: int - The number of points to use in the Nested Sampling active population. - max_num_levels: int - The max number of diffusive likelihood levels that DNest4 should initialize - during the Diffusive Nested Sampling run. - backend: str - The python DNest4 backend for storing the output. - Options are: 'memory' and 'csv'. If 'memory' the - DNest4 outputs are stored in memory during the run. If 'csv' the - DNest4 outputs are written out to files with a CSV format during - the run. - CSV backend may not be functional right now (October 2020) - num_steps: int - The number of MCMC iterations to run - new_level_interval: int - The number of moves to run before creating a new diffusive likelihood level - lam: float - Set the backtracking scale length - beta: float - Set the strength of effect to force the histogram to equal bin counts - seed: int - Set the seed for the C++ random number generator - verbose: Bool - If True, prints information during run - """ - - msg = ( - "The DNest4 sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `dnest4-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/dnest4-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "d4nest" - default_kwargs = dict( - max_num_levels=20, - num_steps=500, - new_level_interval=10000, - num_per_step=10000, - thread_steps=1, - num_particles=1000, - lam=10.0, - beta=100, - seed=None, - verbose=True, - outputfiles_basename=None, - backend="memory", - ) - short_name = "dn4" - hard_exit = True - sampling_seed_key = "seed" - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - exit_code=77, - skip_import_verification=False, - temporary_directory=True, - **kwargs, - ): - super(DNest4, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - temporary_directory=temporary_directory, - exit_code=exit_code, - **kwargs, - ) - - self.num_particles = self.kwargs["num_particles"] - self.max_num_levels = self.kwargs["max_num_levels"] - self._verbose = self.kwargs["verbose"] - self._backend = self.kwargs["backend"] - - self.start_time = np.nan - self.sampler = None - self._information = np.nan - - # Get the estimates of the prior distributions' widths and centers. - widths = [] - centers = [] - highs = [] - lows = [] - - samples = self.priors.sample(size=10000) - - for key in self.search_parameter_keys: - pts = samples[key] - low = pts.min() - high = pts.max() - width = high - low - center = (high + low) / 2.0 - widths.append(width) - centers.append(center) - - highs.append(high) - lows.append(low) - - self._widths = np.array(widths) - self._centers = np.array(centers) - self._highs = np.array(highs) - self._lows = np.array(lows) - - self._dnest4_model = _DNest4Model( - self.log_likelihood, - self.get_random_draw_from_prior, - self._widths, - self._centers, - self._highs, - self._lows, - ) - - def _set_backend(self): - import dnest4 - - if self._backend == "csv": - return dnest4.backends.CSVBackend( - f"{self.outdir}/dnest4{self.label}/", sep=" " - ) - else: - return dnest4.backends.MemoryBackend() - - def _set_dnest4_kwargs(self): - dnest4_keys = ["num_steps", "new_level_interval", "lam", "beta", "seed"] - self.dnest4_kwargs = {key: self.kwargs[key] for key in dnest4_keys} - - @signal_wrapper - def run_sampler(self): - import dnest4 - - self._set_dnest4_kwargs() - backend = self._set_backend() - - self._verify_kwargs_against_default_kwargs() - self._setup_run_directory() - self._check_and_load_sampling_time_file() - self.start_time = time.time() - - self.sampler = dnest4.DNest4Sampler(self._dnest4_model, backend=backend) - out = self.sampler.sample( - self.max_num_levels, num_particles=self.num_particles, **self.dnest4_kwargs - ) - - for i, sample in enumerate(out): - if self._verbose and ((i + 1) % 100 == 0): - stats = self.sampler.postprocess() - logger.info(f"Iteration: {i + 1} log(Z): {stats['log_Z']}") - - self._calculate_and_save_sampling_time() - self._clean_up_run_directory() - - stats = self.sampler.postprocess(resample=1) - self.result.log_evidence = stats["log_Z"] - self._information = stats["H"] - self.result.log_evidence_err = np.sqrt(self._information / self.num_particles) - self.result.samples = np.array(self.sampler.backend.posterior_samples) - - self.result.sampler_output = out - self.result.outputfiles_basename = self.outputfiles_basename - self.result.sampling_time = datetime.timedelta(seconds=self.total_sampling_time) - - self.calc_likelihood_count() - - return self.result - - def _translate_kwargs(self, kwargs): - kwargs = super()._translate_kwargs(kwargs) - if "num_steps" not in kwargs: - for equiv in self.walks_equiv_kwargs: - if equiv in kwargs: - kwargs["num_steps"] = kwargs.pop(equiv) - - def _verify_kwargs_against_default_kwargs(self): - self.outputfiles_basename = self.kwargs.pop("outputfiles_basename", None) - super(DNest4, self)._verify_kwargs_against_default_kwargs() diff --git a/bilby/core/sampler/kombine.py b/bilby/core/sampler/kombine.py deleted file mode 100644 index 24d256b43..000000000 --- a/bilby/core/sampler/kombine.py +++ /dev/null @@ -1,239 +0,0 @@ -import os -import warnings - -import numpy as np - -from ..utils import logger -from .base_sampler import LikePriorEvaluator, signal_wrapper -from .emcee import Emcee - -_evaluator = LikePriorEvaluator() - - -class Kombine(Emcee): - """bilby wrapper kombine (https://github.com/bfarr/kombine) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `kombine.Sampler`, see - documentation for that class for further help. Under Other Parameters, we - list commonly used kwargs and the bilby defaults. - - Parameters - ========== - nwalkers: int, (500) - The number of walkers - iterations: int, (100) - The number of iterations - auto_burnin: bool (False) - Use `kombine`'s automatic burnin (at your own risk) - nburn: int (None) - If given, the fixed number of steps to discard as burn-in. These will - be discarded from the total number of steps set by `nsteps` and - therefore the value must be greater than `nsteps`. Else, nburn is - estimated from the autocorrelation time - burn_in_fraction: float, (0.25) - The fraction of steps to discard as burn-in in the event that the - autocorrelation time cannot be calculated - burn_in_act: float (3.) - The number of autocorrelation times to discard as burn-in - - """ - - msg = ( - "The Kombine sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `kombine-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/kombine-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "kombine" - default_kwargs = dict( - nwalkers=500, - args=[], - pool=None, - transd=False, - lnpost0=None, - blob0=None, - iterations=500, - storechain=True, - processes=1, - update_interval=None, - kde=None, - kde_size=None, - spaces=None, - freeze_transd=False, - test_steps=16, - critical_pval=0.05, - max_steps=None, - burnin_verbose=False, - ) - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=False, - pos0=None, - nburn=None, - burn_in_fraction=0.25, - resume=True, - burn_in_act=3, - autoburnin=False, - **kwargs, - ): - super(Kombine, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - pos0=pos0, - nburn=nburn, - burn_in_fraction=burn_in_fraction, - burn_in_act=burn_in_act, - resume=resume, - **kwargs, - ) - - if self.kwargs["nwalkers"] > self.kwargs["iterations"]: - raise ValueError("Kombine Sampler requires Iterations be > nWalkers") - self.autoburnin = autoburnin - - def _check_version(self): - # set prerelease to False to prevent checks for newer emcee versions in parent class - self.prerelease = False - - @property - def sampler_function_kwargs(self): - keys = [ - "lnpost0", - "blob0", - "iterations", - "storechain", - "lnprop0", - "update_interval", - "kde", - "kde_size", - "spaces", - "freeze_transd", - ] - function_kwargs = {key: self.kwargs[key] for key in keys if key in self.kwargs} - function_kwargs["p0"] = self.pos0 - return function_kwargs - - @property - def sampler_burnin_kwargs(self): - extra_keys = ["test_steps", "critical_pval", "max_steps", "burnin_verbose"] - removal_keys = ["iterations", "spaces", "freeze_transd"] - burnin_kwargs = self.sampler_function_kwargs.copy() - for key in extra_keys: - if key in self.kwargs: - burnin_kwargs[key] = self.kwargs[key] - if "burnin_verbose" in burnin_kwargs.keys(): - burnin_kwargs["verbose"] = burnin_kwargs.pop("burnin_verbose") - for key in removal_keys: - if key in burnin_kwargs.keys(): - burnin_kwargs.pop(key) - return burnin_kwargs - - @property - def sampler_init_kwargs(self): - init_kwargs = { - key: value - for key, value in self.kwargs.items() - if key not in self.sampler_function_kwargs - and key not in self.sampler_burnin_kwargs - } - init_kwargs.pop("burnin_verbose") - init_kwargs["lnpostfn"] = _evaluator.call_emcee - init_kwargs["ndim"] = self.ndim - - return init_kwargs - - def _initialise_sampler(self): - import kombine - - self._sampler = kombine.Sampler(**self.sampler_init_kwargs) - self._init_chain_file() - - def _set_pos0_for_resume(self): - # take last iteration - self.pos0 = self.sampler.chain[-1, :, :] - - @property - def sampler_chain(self): - # remove last iterations when resuming - nsteps = self._previous_iterations - return self.sampler.chain[:nsteps, :, :] - - def check_resume(self): - return ( - self.resume - and os.path.isfile(self.checkpoint_info.sampler_file) - and os.path.getsize(self.checkpoint_info.sampler_file) > 0 - ) - - @signal_wrapper - def run_sampler(self): - self._setup_pool() - if self.autoburnin: - if self.check_resume(): - logger.info("Resuming with autoburnin=True skips burnin process:") - else: - logger.info("Running kombine sampler's automatic burnin process") - self.sampler.burnin(**self.sampler_burnin_kwargs) - self.kwargs["iterations"] += self._previous_iterations - self.nburn = self._previous_iterations - logger.info( - f"Kombine auto-burnin complete. Removing {self.nburn} samples from chains" - ) - self._set_pos0_for_resume() - - from tqdm.auto import tqdm - - sampler_function_kwargs = self.sampler_function_kwargs - iterations = sampler_function_kwargs.pop("iterations") - iterations -= self._previous_iterations - sampler_function_kwargs["p0"] = self.pos0 - for sample in tqdm( - self.sampler.sample(iterations=iterations, **sampler_function_kwargs), - total=iterations, - ): - self.write_chains_to_file(sample) - self.write_current_state() - self.result.sampler_output = np.nan - if not self.autoburnin: - tmp_chain = self.sampler.chain.copy() - self.calculate_autocorrelation(tmp_chain.reshape((-1, self.ndim))) - self.print_nburn_logging_info() - self._close_pool() - - self._generate_result() - self.result.log_evidence_err = np.nan - - tmp_chain = self.sampler.chain[self.nburn :, :, :].copy() - self.result.samples = tmp_chain.reshape((-1, self.ndim)) - self.result.walkers = self.sampler.chain.reshape( - (self.nwalkers, self.nsteps, self.ndim) - ) - return self.result - - def _setup_pool(self): - from kombine import SerialPool - - super(Kombine, self)._setup_pool() - if self.pool is None: - self.pool = SerialPool() - - def _close_pool(self): - from kombine import SerialPool - - if isinstance(self.pool, SerialPool): - self.pool = None - super(Kombine, self)._close_pool() diff --git a/bilby/core/sampler/nessai.py b/bilby/core/sampler/nessai.py deleted file mode 100644 index da04163f7..000000000 --- a/bilby/core/sampler/nessai.py +++ /dev/null @@ -1,343 +0,0 @@ -import os -import sys -import warnings - -import numpy as np -from pandas import DataFrame -from scipy.special import logsumexp - -from ..utils import check_directory_exists_and_if_not_mkdir, load_json, logger -from .base_sampler import NestedSampler, signal_wrapper - - -class Nessai(NestedSampler): - """bilby wrapper of nessai (https://github.com/mj-will/nessai) - - .. warning:: - The nessai sampler interface in bilby is deprecated and will be - removed in future release. Please use the :code`nessai-bilby` - sampler plugin instead: https://github.com/bilby-dev/nessai-bilby - - All positional and keyword arguments passed to `run_sampler` are propagated - to `nessai.flowsampler.FlowSampler` - - See the documentation for an explanation of the different kwargs. - - Documentation: https://nessai.readthedocs.io/ - """ - - sampler_name = "nessai" - _default_kwargs = None - _run_kwargs_list = None - sampling_seed_key = "seed" - - msg = ( - "The nessai sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `nessai-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/nessai-bilby." - ) - warnings.warn(msg, FutureWarning) - - @property - def run_kwargs_list(self): - """List of kwargs used in the run method of :code:`FlowSampler`""" - if not self._run_kwargs_list: - from nessai.utils.bilbyutils import get_run_kwargs_list - - self._run_kwargs_list = get_run_kwargs_list() - ignored_kwargs = ["save"] - for ik in ignored_kwargs: - if ik in self._run_kwargs_list: - self._run_kwargs_list.remove(ik) - return self._run_kwargs_list - - @property - def default_kwargs(self): - """Default kwargs for nessai. - - Retrieves default values from nessai directly and then includes any - bilby specific defaults. This avoids the need to update bilby when the - defaults change or new kwargs are added to nessai. - - Includes the following kwargs that are specific to bilby: - - - :code:`nessai_log_level`: allows setting the logging level in nessai - - :code:`nessai_logging_stream`: allows setting the logging stream - - :code:`nessai_plot`: allows toggling the plotting in FlowSampler.run - """ - if not self._default_kwargs: - from nessai.utils.bilbyutils import get_all_kwargs - - kwargs = get_all_kwargs() - - # Defaults for bilby that will override nessai defaults - bilby_defaults = dict( - output=None, - exit_code=self.exit_code, - nessai_log_level=None, - nessai_logging_stream="stdout", - nessai_plot=True, - plot_posterior=False, # bilby already produces a posterior plot - log_on_iteration=False, # Use periodic logging by default - logging_interval=60, # Log every 60 seconds - ) - kwargs.update(bilby_defaults) - # Kwargs that cannot be set in bilby - remove = [ - "save", - "signal_handling", - ] - for k in remove: - if k in kwargs: - kwargs.pop(k) - self._default_kwargs = kwargs - return self._default_kwargs - - def log_prior(self, theta): - """ - - Parameters - ---------- - theta: list - List of sampled values on a unit interval - - Returns - ------- - float: Joint ln prior probability of theta - - """ - return self.priors.ln_prob(theta, axis=0) - - def get_nessai_model(self): - """Get the model for nessai.""" - from nessai.livepoint import dict_to_live_points - from nessai.model import Model as BaseModel - - class Model(BaseModel): - """A wrapper class to pass our log_likelihood and priors into nessai - - Parameters - ---------- - names : list of str - List of parameters to sample - priors : :obj:`bilby.core.prior.PriorDict` - Priors to use for sampling. Needed for the bounds and the - `sample` method. - """ - - def __init__(self, names, priors): - self.names = names - self.priors = priors - self._update_bounds() - - @staticmethod - def log_likelihood(x, **kwargs): - """Compute the log likelihood""" - theta = [x[n].item() for n in self.search_parameter_keys] - return self.log_likelihood(theta) - - @staticmethod - def log_prior(x, **kwargs): - """Compute the log prior""" - theta = {n: x[n] for n in self._search_parameter_keys} - return self.log_prior(theta) - - def _update_bounds(self): - self.bounds = { - key: [self.priors[key].minimum, self.priors[key].maximum] - for key in self.names - } - - def new_point(self, N=1): - """Draw a point from the prior""" - prior_samples = self.priors.sample(size=N) - samples = {n: prior_samples[n] for n in self.names} - return dict_to_live_points(samples) - - def new_point_log_prob(self, x): - """Proposal probability for new the point""" - return self.log_prior(x) - - @staticmethod - def from_unit_hypercube(x): - """Map samples from the unit hypercube to the prior.""" - theta = {} - for n in self._search_parameter_keys: - theta[n] = self.priors[n].rescale(x[n]) - return dict_to_live_points(theta) - - @staticmethod - def to_unit_hypercube(x): - """Map samples from the prior to the unit hypercube.""" - theta = {n: x[n] for n in self._search_parameter_keys} - return dict_to_live_points(self.priors.cdf(theta)) - - model = Model(self.search_parameter_keys, self.priors) - return model - - def split_kwargs(self): - """Split kwargs into configuration and run time kwargs""" - kwargs = self.kwargs.copy() - run_kwargs = {} - for k in self.run_kwargs_list: - run_kwargs[k] = kwargs.pop(k) - run_kwargs["plot"] = kwargs.pop("nessai_plot") - return kwargs, run_kwargs - - def get_posterior_weights(self): - """Get the posterior weights for the nested samples""" - from nessai.posterior import compute_weights - - _, log_weights = compute_weights( - np.array(self.fs.nested_samples["logL"]), - np.array(self.fs.ns.state.nlive), - ) - w = np.exp(log_weights - logsumexp(log_weights)) - return w - - def get_nested_samples(self): - """Get the nested samples dataframe""" - ns = DataFrame(self.fs.nested_samples) - ns.rename( - columns=dict(logL="log_likelihood", logP="log_prior", it="iteration"), - inplace=True, - ) - return ns - - def update_result(self): - """Update the result object.""" - from nessai.livepoint import live_points_to_array - - # Manually set likelihood evaluations because parallelisation breaks the counter - self.result.num_likelihood_evaluations = self.fs.ns.total_likelihood_evaluations - - self.result.sampling_time = self.fs.ns.sampling_time - self.result.samples = live_points_to_array( - self.fs.posterior_samples, self.search_parameter_keys - ) - self.result.log_likelihood_evaluations = self.fs.posterior_samples["logL"] - self.result.nested_samples = self.get_nested_samples() - self.result.nested_samples["weights"] = self.get_posterior_weights() - self.result.log_evidence = self.fs.log_evidence - self.result.log_evidence_err = self.fs.log_evidence_error - - @signal_wrapper - def run_sampler(self): - """Run the sampler. - - Nessai is designed to be ran in two stages, initialise the sampler - and then call the run method with additional configuration. This means - there are effectively two sets of keyword arguments: one for - initializing the sampler and the other for the run function. - """ - from nessai.flowsampler import FlowSampler - from nessai.utils import setup_logger - - kwargs, run_kwargs = self.split_kwargs() - - # Setup the logger for nessai, use nessai_log_level if specified, else use - # the level of the bilby logger. - nessai_log_level = kwargs.pop("nessai_log_level") - if nessai_log_level is None or nessai_log_level == "bilby": - nessai_log_level = logger.getEffectiveLevel() - nessai_logging_stream = kwargs.pop("nessai_logging_stream") - - setup_logger( - self.outdir, - label=self.label, - log_level=nessai_log_level, - stream=nessai_logging_stream, - ) - - # Get the nessai model - model = self.get_nessai_model() - - # Configure the sampler - self.fs = FlowSampler( - model, - signal_handling=False, # Disable signal handling so it can be handled by bilby - **kwargs, - ) - # Run the sampler - self.fs.run(**run_kwargs) - - # Update the result - self.update_result() - - return self.result - - def _translate_kwargs(self, kwargs): - """Translate the keyword arguments""" - super()._translate_kwargs(kwargs) - if "nlive" not in kwargs: - for equiv in self.npoints_equiv_kwargs: - if equiv in kwargs: - kwargs["nlive"] = kwargs.pop(equiv) - if "n_pool" not in kwargs: - for equiv in self.npool_equiv_kwargs: - if equiv in kwargs: - kwargs["n_pool"] = kwargs.pop(equiv) - if "n_pool" not in kwargs: - kwargs["n_pool"] = self._npool - - def _verify_kwargs_against_default_kwargs(self): - """Verify the keyword arguments""" - if "config_file" in self.kwargs: - d = load_json(self.kwargs["config_file"], None) - self.kwargs.update(d) - self.kwargs.pop("config_file") - - if not self.kwargs["plot"]: - self.kwargs["plot"] = self.plot - - if not self.kwargs["output"]: - self.kwargs["output"] = os.path.join( - self.outdir, f"{self.label}_nessai", "" - ) - - check_directory_exists_and_if_not_mkdir(self.kwargs["output"]) - NestedSampler._verify_kwargs_against_default_kwargs(self) - - def write_current_state(self): - """Write the current state of the sampler""" - self.fs.ns.checkpoint() - - def write_current_state_and_exit(self, signum=None, frame=None): - """ - Overwrites the base class to make sure that :code:`Nessai` terminates - properly. - """ - if hasattr(self, "fs"): - self.fs.terminate_run(code=signum) - else: - logger.warning("Sampler is not initialized") - self._log_interruption(signum=signum) - sys.exit(self.exit_code) - - @classmethod - def get_expected_outputs(cls, outdir=None, label=None): - """Get lists of the expected outputs directories and files. - - These are used by :code:`bilby_pipe` when transferring files via HTCondor. - - Parameters - ---------- - outdir : str - The output directory. - label : str - The label for the run. - - Returns - ------- - list - List of file names. This will be empty for nessai. - list - List of directory names. - """ - dirs = [os.path.join(outdir, f"{label}_{cls.sampler_name}", "")] - dirs += [os.path.join(dirs[0], d, "") for d in ["proposal", "diagnostics"]] - filenames = [] - return filenames, dirs - - def _setup_pool(self): - pass diff --git a/bilby/core/sampler/nestle.py b/bilby/core/sampler/nestle.py deleted file mode 100644 index a706503ea..000000000 --- a/bilby/core/sampler/nestle.py +++ /dev/null @@ -1,132 +0,0 @@ -import warnings - -from pandas import DataFrame - -from .base_sampler import NestedSampler, signal_wrapper - - -class Nestle(NestedSampler): - """bilby wrapper `nestle.Sampler` (http://kylebarbary.com/nestle/) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `nestle.sample`, see documentation for - that function for further help. Under Other Parameters, we list commonly - used kwargs and the bilby defaults - - Parameters - ========== - npoints: int - The number of live points, note this can also equivalently be given as - one of [nlive, nlives, n_live_points] - method: {'classic', 'single', 'multi'} ('multi') - Method used to select new points - verbose: Bool - If true, print information information about the convergence during - sampling - - """ - - msg = ( - "The Nestle sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `nestle-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/nestle-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "nestle" - default_kwargs = dict( - verbose=True, - method="multi", - npoints=500, - update_interval=None, - npdim=None, - maxiter=None, - maxcall=None, - dlogz=None, - decline_factor=None, - rstate=None, - callback=None, - steps=20, - enlarge=1.2, - ) - - def _translate_kwargs(self, kwargs): - kwargs = super()._translate_kwargs(kwargs) - if "npoints" not in kwargs: - for equiv in self.npoints_equiv_kwargs: - if equiv in kwargs: - kwargs["npoints"] = kwargs.pop(equiv) - if "steps" not in kwargs: - for equiv in self.walks_equiv_kwargs: - if equiv in kwargs: - kwargs["steps"] = kwargs.pop(equiv) - - def _verify_kwargs_against_default_kwargs(self): - if self.kwargs["verbose"]: - import nestle - - self.kwargs["callback"] = nestle.print_progress - self.kwargs.pop("verbose") - NestedSampler._verify_kwargs_against_default_kwargs(self) - - @signal_wrapper - def run_sampler(self): - """Runs Nestle sampler with given kwargs and returns the result - - Returns - ======= - bilby.core.result.Result: Packaged information about the result - - """ - import nestle - - if nestle.__version__ == "0.2.0": - # This is a very ugly hack to support numpy>=1.24 - nestle.np.float = float - nestle.np.int = int - - out = nestle.sample( - loglikelihood=self.log_likelihood, - prior_transform=self.prior_transform, - ndim=self.ndim, - **self.kwargs - ) - print("") - - self.result.sampler_output = out - self.result.samples = nestle.resample_equal(out.samples, out.weights) - self.result.nested_samples = DataFrame( - out.samples, columns=self.search_parameter_keys - ) - self.result.nested_samples["weights"] = out.weights - self.result.nested_samples["log_likelihood"] = out.logl - self.result.log_likelihood_evaluations = self.reorder_loglikelihoods( - unsorted_loglikelihoods=out.logl, - unsorted_samples=out.samples, - sorted_samples=self.result.samples, - ) - self.result.log_evidence = out.logz - self.result.log_evidence_err = out.logzerr - self.result.information_gain = out.h - self.calc_likelihood_count() - return self.result - - def _run_test(self): - """ - Runs to test whether the sampler is properly running with the given - kwargs without actually running to the end - - Returns - ======= - bilby.core.result.Result: Dummy container for sampling results. - - """ - self.kwargs["maxiter"] = 2 - return self.run_sampler() - - def write_current_state(self): - """ - Nestle doesn't support checkpointing so no current state will be - written on interrupt. - """ - pass diff --git a/bilby/core/sampler/polychord.py b/bilby/core/sampler/polychord.py deleted file mode 100644 index dc163942f..000000000 --- a/bilby/core/sampler/polychord.py +++ /dev/null @@ -1,173 +0,0 @@ -import os -import warnings - -import numpy as np - -from .base_sampler import NestedSampler, signal_wrapper - - -class PyPolyChord(NestedSampler): - - """ - Bilby wrapper of PyPolyChord - https://arxiv.org/abs/1506.00171 - - .. warning:: - The PyPolyChord sampler interface in bilby is deprecated and will be - removed in future release. Please use the :code`pypolychord-bilby` - sampler plugin instead: https://github.com/bilby-dev/pypolychord-bilby - - PolyChordLite is available at: - https://github.com/PolyChord/PolyChordLite - - Follow the installation instructions at their github page. - - Keyword arguments will be passed into `pypolychord.run_polychord` into the `settings` - argument. See the PolyChord documentation for what all of those mean. - - To see what the keyword arguments are for, see the docstring of PyPolyChordSettings - """ - - msg = ( - "The PyPolyChord sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `pypolychord-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/pypolychord-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "pypolychord" - default_kwargs = dict( - use_polychord_defaults=False, - nlive=None, - num_repeats=None, - nprior=-1, - do_clustering=True, - feedback=1, - precision_criterion=0.001, - logzero=-1e30, - max_ndead=-1, - boost_posterior=0.0, - posteriors=True, - equals=True, - cluster_posteriors=True, - write_resume=True, - write_paramnames=False, - read_resume=True, - write_stats=True, - write_live=True, - write_dead=True, - write_prior=True, - compression_factor=np.exp(-1), - base_dir="outdir", - file_root="polychord", - seed=-1, - grade_dims=None, - grade_frac=None, - nlives={}, - ) - hard_exit = True - sampling_seed_key = "seed" - - @signal_wrapper - def run_sampler(self): - import pypolychord - from pypolychord.settings import PolyChordSettings - - if self.kwargs["use_polychord_defaults"]: - settings = PolyChordSettings( - nDims=self.ndim, - nDerived=self.ndim, - base_dir=self._sample_file_directory, - file_root=self.label, - ) - else: - self._setup_dynamic_defaults() - pc_kwargs = self.kwargs.copy() - pc_kwargs["base_dir"] = self._sample_file_directory - pc_kwargs["file_root"] = self.label - pc_kwargs.pop("use_polychord_defaults") - settings = PolyChordSettings( - nDims=self.ndim, nDerived=self.ndim, **pc_kwargs - ) - self._verify_kwargs_against_default_kwargs() - out = pypolychord.run_polychord( - loglikelihood=self.log_likelihood, - nDims=self.ndim, - nDerived=self.ndim, - settings=settings, - prior=self.prior_transform, - ) - self.result.log_evidence = out.logZ - self.result.log_evidence_err = out.logZerr - log_likelihoods, physical_parameters = self._read_sample_file() - self.result.log_likelihood_evaluations = log_likelihoods - self.result.samples = physical_parameters - self.calc_likelihood_count() - return self.result - - def _setup_dynamic_defaults(self): - """Sets up some interdependent default argument if none are given by the user""" - if not self.kwargs["grade_dims"]: - self.kwargs["grade_dims"] = [self.ndim] - if not self.kwargs["grade_frac"]: - self.kwargs["grade_frac"] = [1.0] * len(self.kwargs["grade_dims"]) - if not self.kwargs["nlive"]: - self.kwargs["nlive"] = self.ndim * 25 - if not self.kwargs["num_repeats"]: - self.kwargs["num_repeats"] = self.ndim * 5 - - def _translate_kwargs(self, kwargs): - kwargs = super()._translate_kwargs(kwargs) - if "nlive" not in kwargs: - for equiv in self.npoints_equiv_kwargs: - if equiv in kwargs: - kwargs["nlive"] = kwargs.pop(equiv) - - def log_likelihood(self, theta): - """Overrides the log_likelihood so that PolyChord understands it""" - return super(PyPolyChord, self).log_likelihood(theta), theta - - def _read_sample_file(self): - """ - This method reads out the _equal_weights.txt file that polychord produces. - The first column is omitted since it is just composed of 1s, i.e. the equal weights/ - The second column are the log likelihoods, the remaining columns are the physical parameters - - Returns - ======= - array_like, array_like: The log_likelihoods and the associated parameters - - """ - sample_file = ( - self._sample_file_directory + "/" + self.label + "_equal_weights.txt" - ) - samples = np.loadtxt(sample_file) - log_likelihoods = -0.5 * samples[:, 1] - physical_parameters = samples[:, -self.ndim :] - return log_likelihoods, physical_parameters - - @classmethod - def get_expected_outputs(cls, outdir=None, label=None): - """Get lists of the expected outputs directories and files. - - These are used by :code:`bilby_pipe` when transferring files via HTCondor. - - Parameters - ---------- - outdir : str - The output directory. - label : str - Ignored for pypolychord. - - Returns - ------- - list - List of file names. This will always be empty for pypolychord. - list - List of directory names. - """ - return [], [os.path.join(outdir, "chains", "")] - - @property - def _sample_file_directory(self): - return self.outdir + "/chains" diff --git a/bilby/core/sampler/proposal.py b/bilby/core/sampler/proposal.py deleted file mode 100644 index d23d19b4c..000000000 --- a/bilby/core/sampler/proposal.py +++ /dev/null @@ -1,358 +0,0 @@ -from inspect import isclass - -import numpy as np - -from ..prior import Uniform -from ..utils import random - - -class Sample(dict): - def __init__(self, dictionary=None): - if dictionary is None: - dictionary = dict() - super(Sample, self).__init__(dictionary) - - def __add__(self, other): - return Sample({key: self[key] + other[key] for key in self.keys()}) - - def __sub__(self, other): - return Sample({key: self[key] - other[key] for key in self.keys()}) - - def __mul__(self, other): - return Sample({key: self[key] * other for key in self.keys()}) - - @classmethod - def from_cpnest_live_point(cls, cpnest_live_point): - res = cls(dict()) - for i, key in enumerate(cpnest_live_point.names): - res[key] = cpnest_live_point.values[i] - return res - - @classmethod - def from_external_type(cls, external_sample, sampler_name): - if sampler_name == "cpnest": - return cls.from_cpnest_live_point(external_sample) - return external_sample - - -class JumpProposal(object): - def __init__(self, priors=None): - """A generic class for jump proposals - - Parameters - ========== - priors: bilby.core.prior.PriorDict - Dictionary of priors used in this sampling run - - Attributes - ========== - log_j: float - Log Jacobian of the proposal. Characterises whether or not detailed balance - is preserved. If not, log_j needs to be adjusted accordingly. - """ - self.priors = priors - self.log_j = 0.0 - - def __call__(self, sample, **kwargs): - """A generic wrapper for the jump proposal function - - Parameters - ========== - args: Arguments that are going to be passed into the proposal function - kwargs: Keyword arguments that are going to be passed into the proposal function - - Returns - ======= - dict: A dictionary with the new samples. Boundary conditions are applied. - - """ - return self._apply_boundaries(sample) - - def _move_reflecting_keys(self, sample): - keys = [ - key for key in sample.keys() if self.priors[key].boundary == "reflective" - ] - for key in keys: - if ( - sample[key] > self.priors[key].maximum - or sample[key] < self.priors[key].minimum - ): - r = self.priors[key].maximum - self.priors[key].minimum - delta = (sample[key] - self.priors[key].minimum) % (2 * r) - if delta > r: - sample[key] = ( - 2 * self.priors[key].maximum - self.priors[key].minimum - delta - ) - elif delta < r: - sample[key] = self.priors[key].minimum + delta - return sample - - def _move_periodic_keys(self, sample): - keys = [key for key in sample.keys() if self.priors[key].boundary == "periodic"] - for key in keys: - if ( - sample[key] > self.priors[key].maximum - or sample[key] < self.priors[key].minimum - ): - sample[key] = self.priors[key].minimum + ( - (sample[key] - self.priors[key].minimum) - % (self.priors[key].maximum - self.priors[key].minimum) - ) - return sample - - def _apply_boundaries(self, sample): - sample = self._move_periodic_keys(sample) - sample = self._move_reflecting_keys(sample) - return sample - - -class JumpProposalCycle(object): - def __init__(self, proposal_functions, weights, cycle_length=100): - """A generic wrapper class for proposal cycles - - Parameters - ========== - proposal_functions: list - A list of callable proposal functions/objects - weights: list - A list of integer weights for the respective proposal functions - cycle_length: int, optional - Length of the proposal cycle - """ - self.proposal_functions = proposal_functions - self.weights = weights - self.cycle_length = cycle_length - self._index = 0 - self._cycle = np.array([]) - self.update_cycle() - - def __call__(self, **kwargs): - proposal = self._cycle[self.index] - self._index = (self.index + 1) % self.cycle_length - return proposal(**kwargs) - - def __len__(self): - return len(self.proposal_functions) - - def update_cycle(self): - self._cycle = random.rng.choice( - self.proposal_functions, - size=self.cycle_length, - p=self.weights, - replace=True, - ) - - @property - def proposal_functions(self): - return self._proposal_functions - - @proposal_functions.setter - def proposal_functions(self, proposal_functions): - for i, proposal in enumerate(proposal_functions): - if isclass(proposal): - proposal_functions[i] = proposal() - self._proposal_functions = proposal_functions - - @property - def index(self): - return self._index - - @property - def weights(self): - """ - - Returns - ======= - Normalised proposal weights - - """ - return np.array(self._weights) / np.sum(np.array(self._weights)) - - @weights.setter - def weights(self, weights): - assert len(weights) == len(self.proposal_functions) - self._weights = weights - - @property - def unnormalised_weights(self): - return self._weights - - -class NormJump(JumpProposal): - def __init__(self, step_size, priors=None): - """ - A normal distributed step centered around the old sample - - Parameters - ========== - step_size: float - The scalable step size - priors: - See superclass - """ - super(NormJump, self).__init__(priors) - self.step_size = step_size - - def __call__(self, sample, **kwargs): - for key in sample.keys(): - sample[key] = random.rng.normal(sample[key], self.step_size) - return super(NormJump, self).__call__(sample) - - -class EnsembleWalk(JumpProposal): - def __init__( - self, - random_number_generator=random.rng.uniform, - n_points=3, - priors=None, - **random_number_generator_args - ): - """ - An ensemble walk - - Parameters - ========== - random_number_generator: func, optional - A random number generator. Default is random.random - n_points: int, optional - Number of points in the ensemble to average over. Default is 3. - priors: - See superclass - random_number_generator_args: - Additional keyword arguments for the random number generator - """ - super(EnsembleWalk, self).__init__(priors) - self.random_number_generator = random_number_generator - self.n_points = n_points - self.random_number_generator_args = random_number_generator_args - - def __call__(self, sample, **kwargs): - subset = random.rng.choice(kwargs["coordinates"], self.n_points, replace=False) - for i in range(len(subset)): - subset[i] = Sample.from_external_type( - subset[i], kwargs.get("sampler_name", None) - ) - center_of_mass = self.get_center_of_mass(subset) - for x in subset: - sample += (x - center_of_mass) * self.random_number_generator( - **self.random_number_generator_args - ) - return super(EnsembleWalk, self).__call__(sample) - - @staticmethod - def get_center_of_mass(subset): - return {key: np.mean([c[key] for c in subset]) for key in subset[0].keys()} - - -class EnsembleStretch(JumpProposal): - def __init__(self, scale=2.0, priors=None): - """ - Stretch move. Calculates the log Jacobian which can be used in cpnest to bias future moves. - - Parameters - ========== - scale: float, optional - Stretching scale. Default is 2.0. - """ - super(EnsembleStretch, self).__init__(priors) - self.scale = scale - - def __call__(self, sample, **kwargs): - second_sample = random.rng.choice(kwargs["coordinates"]) - second_sample = Sample.from_external_type( - second_sample, kwargs.get("sampler_name", None) - ) - step = random.rng.uniform(-1, 1) * np.log(self.scale) - sample = second_sample + (sample - second_sample) * np.exp(step) - self.log_j = len(sample) * step - return super(EnsembleStretch, self).__call__(sample) - - -class DifferentialEvolution(JumpProposal): - def __init__(self, sigma=1e-4, mu=1.0, priors=None): - """ - Differential evolution step. Takes two elements from the existing coordinates and differentially evolves the - old sample based on them using some Gaussian randomisation in the step. - - Parameters - ========== - sigma: float, optional - Random spread in the evolution step. Default is 1e-4 - mu: float, optional - Scale of the randomization. Default is 1.0 - """ - super(DifferentialEvolution, self).__init__(priors) - self.sigma = sigma - self.mu = mu - - def __call__(self, sample, **kwargs): - a, b = random.rng.choice(kwargs["coordinates"], 2, replace=False) - sample = sample + (b - a) * random.rng.normal(self.mu, self.sigma) - return super(DifferentialEvolution, self).__call__(sample) - - -class EnsembleEigenVector(JumpProposal): - def __init__(self, priors=None): - """ - Ensemble step based on the ensemble eigenvectors. - - Parameters - ========== - priors: - See superclass - """ - super(EnsembleEigenVector, self).__init__(priors) - self.eigen_values = None - self.eigen_vectors = None - self.covariance = None - - def update_eigenvectors(self, coordinates): - if coordinates is None: - return - elif len(coordinates[0]) == 1: - self._set_1_d_eigenvectors(coordinates) - else: - self._set_n_d_eigenvectors(coordinates) - - def _set_1_d_eigenvectors(self, coordinates): - n_samples = len(coordinates) - key = list(coordinates[0].keys())[0] - variance = np.var([coordinates[j][key] for j in range(n_samples)]) - self.eigen_values = np.atleast_1d(variance) - self.covariance = self.eigen_values - self.eigen_vectors = np.eye(1) - - def _set_n_d_eigenvectors(self, coordinates): - n_samples = len(coordinates) - dim = len(coordinates[0]) - cov_array = np.zeros((dim, n_samples)) - for i, key in enumerate(coordinates[0].keys()): - for j in range(n_samples): - cov_array[i, j] = coordinates[j][key] - self.covariance = np.cov(cov_array) - self.eigen_values, self.eigen_vectors = np.linalg.eigh(self.covariance) - - def __call__(self, sample, **kwargs): - self.update_eigenvectors(kwargs["coordinates"]) - i = random.rng.integers(len(sample)) - jump_size = np.sqrt(np.fabs(self.eigen_values[i])) * random.rng.normal(0, 1) - for j, key in enumerate(sample.keys()): - sample[key] += jump_size * self.eigen_vectors[j, i] - return super(EnsembleEigenVector, self).__call__(sample) - - -class DrawFlatPrior(JumpProposal): - """ - Draws a proposal from the flattened prior distribution. - """ - - def __call__(self, sample, **kwargs): - sample = _draw_from_flat_priors(sample, self.priors) - return super(DrawFlatPrior, self).__call__(sample) - - -def _draw_from_flat_priors(sample, priors): - for key in sample.keys(): - flat_prior = Uniform(priors[key].minimum, priors[key].maximum, priors[key].name) - sample[key] = flat_prior.sample() - return sample diff --git a/bilby/core/sampler/ptemcee.py b/bilby/core/sampler/ptemcee.py deleted file mode 100644 index 529feb4cb..000000000 --- a/bilby/core/sampler/ptemcee.py +++ /dev/null @@ -1,1436 +0,0 @@ -import copy -import datetime -import logging -import os -import time -import warnings -from collections import namedtuple - -import numpy as np -import pandas as pd -from scipy.integrate import trapezoid - -from ..likelihood import _safe_likelihood_call -from ..utils import check_directory_exists_and_if_not_mkdir, logger, safe_file_dump -from .base_sampler import LikePriorEvaluator, MCMCSampler, SamplerError, signal_wrapper - -ConvergenceInputs = namedtuple( - "ConvergenceInputs", - [ - "autocorr_c", - "autocorr_tol", - "autocorr_tau", - "gradient_tau", - "gradient_mean_log_posterior", - "Q_tol", - "safety", - "burn_in_nact", - "burn_in_fixed_discard", - "mean_logl_frac", - "thin_by_nact", - "nsamples", - "ignore_keys_for_tau", - "min_tau", - "niterations_per_check", - ], -) - - -class Ptemcee(MCMCSampler): - """bilby wrapper ptemcee (https://github.com/willvousden/ptemcee) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `ptemcee.Sampler`, see - documentation for that class for further help. Under Other Parameters, we - list commonly used kwargs and the bilby defaults. - - Parameters - ---------- - nsamples: int, (5000) - The requested number of samples. Note, in cases where the - autocorrelation parameter is difficult to measure, it is possible to - end up with more than nsamples. - burn_in_nact, thin_by_nact: int, (50, 1) - The number of burn-in autocorrelation times to discard and the thin-by - factor. Increasing burn_in_nact increases the time required for burn-in. - Increasing thin_by_nact increases the time required to obtain nsamples. - burn_in_fixed_discard: int (0) - A fixed number of samples to discard for burn-in - mean_logl_frac: float, (0.0.1) - The maximum fractional change the mean log-likelihood to accept - autocorr_tol: int, (50) - The minimum number of autocorrelation times needed to trust the - estimate of the autocorrelation time. - autocorr_c: int, (5) - The step size for the window search used by emcee.autocorr.integrated_time - safety: int, (1) - A multiplicative factor for the estimated autocorrelation. Useful for - cases where non-convergence can be observed by eye but the automated - tools are failing. - autocorr_tau: int, (1) - The number of autocorrelation times to use in assessing if the - autocorrelation time is stable. - gradient_tau: float, (0.1) - The maximum (smoothed) local gradient of the ACT estimate to allow. - This ensures the ACT estimate is stable before finishing sampling. - gradient_mean_log_posterior: float, (0.1) - The maximum (smoothed) local gradient of the logliklilhood to allow. - This ensures the ACT estimate is stable before finishing sampling. - Q_tol: float (1.01) - The maximum between-chain to within-chain tolerance allowed (akin to - the Gelman-Rubin statistic). - min_tau: int, (1) - A minimum tau (autocorrelation time) to accept. - check_point_delta_t: float, (600) - The period with which to checkpoint (in seconds). - threads: int, (1) - If threads > 1, a MultiPool object is setup and used. - exit_code: int, (77) - The code on which the sampler exits. - store_walkers: bool (False) - If true, store the unthinned, unburnt chains in the result. Note, this - is not recommended for cases where tau is large. - ignore_keys_for_tau: str - A pattern used to ignore keys in estimating the autocorrelation time. - pos0: str, list, np.ndarray, dict - If a string, one of "prior" or "minimize". For "prior", the initial - positions of the sampler are drawn from the sampler. If "minimize", - a scipy.optimize step is applied to all parameters a number of times. - The walkers are then initialized from the range of values obtained. - If a list, for the keys in the list the optimization step is applied, - otherwise the initial points are drawn from the prior. - If a :code:`numpy` array the shape should be - :code:`(ntemps, nwalkers, ndim)`. - If a :code:`dict`, this should be a dictionary with keys matching the - :code:`search_parameter_keys`. Each entry should be an array with - shape :code:`(ntemps, nwalkers)`. - - niterations_per_check: int (5) - The number of iteration steps to take before checking ACT. This - effectively pre-thins the chains. Larger values reduce the per-eval - timing due to improved efficiency. But, if it is made too large the - pre-thinning may be overly aggressive effectively wasting compute-time. - If you see tau=1, then niterations_per_check is likely too large. - - - Other Parameters - ---------------- - nwalkers: int, (200) - The number of walkers - nsteps: int, (100) - The number of steps to take - ntemps: int (10) - The number of temperatures used by ptemcee - Tmax: float - The maximum temperature - - """ - - msg = ( - "The ptemcee sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `ptemcee-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/ptemcee-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "ptemcee" - # Arguments used by ptemcee - default_kwargs = dict( - ntemps=10, - nwalkers=100, - Tmax=None, - betas=None, - a=2.0, - adaptation_lag=10000, - adaptation_time=100, - random=None, - adapt=False, - swap_ratios=False, - ) - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - check_point_plot=True, - skip_import_verification=False, - resume=True, - nsamples=5000, - burn_in_nact=50, - burn_in_fixed_discard=0, - mean_logl_frac=0.01, - thin_by_nact=0.5, - autocorr_tol=50, - autocorr_c=5, - safety=1, - autocorr_tau=1, - gradient_tau=0.1, - gradient_mean_log_posterior=0.1, - Q_tol=1.02, - min_tau=1, - check_point_delta_t=600, - threads=1, - exit_code=77, - plot=False, - store_walkers=False, - ignore_keys_for_tau=None, - pos0="prior", - niterations_per_check=5, - log10beta_min=None, - verbose=True, - **kwargs, - ): - super(Ptemcee, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - exit_code=exit_code, - **kwargs, - ) - - self.nwalkers = self.sampler_init_kwargs["nwalkers"] - self.ntemps = self.sampler_init_kwargs["ntemps"] - self.max_steps = 500 - - # Checkpointing inputs - self.resume = resume - self.check_point_delta_t = check_point_delta_t - self.check_point_plot = check_point_plot - self.resume_file = f"{self.outdir}/{self.label}_checkpoint_resume.pickle" - - # Store convergence checking inputs in a named tuple - convergence_inputs_dict = dict( - autocorr_c=autocorr_c, - autocorr_tol=autocorr_tol, - autocorr_tau=autocorr_tau, - safety=safety, - burn_in_nact=burn_in_nact, - burn_in_fixed_discard=burn_in_fixed_discard, - mean_logl_frac=mean_logl_frac, - thin_by_nact=thin_by_nact, - gradient_tau=gradient_tau, - gradient_mean_log_posterior=gradient_mean_log_posterior, - Q_tol=Q_tol, - nsamples=nsamples, - ignore_keys_for_tau=ignore_keys_for_tau, - min_tau=min_tau, - niterations_per_check=niterations_per_check, - ) - self.convergence_inputs = ConvergenceInputs(**convergence_inputs_dict) - logger.info(f"Using convergence inputs: {self.convergence_inputs}") - - # Check if threads was given as an equivalent arg - if threads == 1: - for equiv in self.npool_equiv_kwargs: - if equiv in kwargs: - threads = kwargs.pop(equiv) - - # Store threads - self.threads = threads - - # Misc inputs - self.store_walkers = store_walkers - self.pos0 = pos0 - - self._periodic = [ - self.priors[key].boundary == "periodic" - for key in self.search_parameter_keys - ] - self.priors.sample() - self._minima = np.array( - [self.priors[key].minimum for key in self.search_parameter_keys] - ) - self._range = ( - np.array([self.priors[key].maximum for key in self.search_parameter_keys]) - - self._minima - ) - - self.log10beta_min = log10beta_min - if self.log10beta_min is not None: - betas = np.logspace(0, self.log10beta_min, self.ntemps) - logger.warning(f"Using betas {betas}") - self.kwargs["betas"] = betas - self.verbose = verbose - - self.iteration = 0 - self.chain_array = self.get_zero_chain_array() - self.log_likelihood_array = self.get_zero_array() - self.log_posterior_array = self.get_zero_array() - self.beta_list = list() - self.tau_list = list() - self.tau_list_n = list() - self.Q_list = list() - self.time_per_check = list() - - self.nburn = np.nan - self.thin = np.nan - self.tau_int = np.nan - self.nsamples_effective = 0 - self.discard = 0 - - @property - def sampler_function_kwargs(self): - """Kwargs passed to samper.sampler()""" - keys = ["adapt", "swap_ratios"] - return {key: self.kwargs[key] for key in keys} - - @property - def sampler_init_kwargs(self): - """Kwargs passed to initialize ptemcee.Sampler()""" - return { - key: value - for key, value in self.kwargs.items() - if key not in self.sampler_function_kwargs - } - - def _translate_kwargs(self, kwargs): - """Translate kwargs""" - kwargs = super()._translate_kwargs(kwargs) - if "nwalkers" not in kwargs: - for equiv in self.nwalkers_equiv_kwargs: - if equiv in kwargs: - kwargs["nwalkers"] = kwargs.pop(equiv) - - def get_pos0_from_prior(self): - """Draw the initial positions from the prior - - Returns - ------- - pos0: list - The initial postitions of the walkers, with shape (ntemps, nwalkers, ndim) - - """ - logger.info("Generating pos0 samples") - return np.array( - [ - [self.get_random_draw_from_prior() for _ in range(self.nwalkers)] - for _ in range(self.kwargs["ntemps"]) - ] - ) - - def get_pos0_from_minimize(self, minimize_list=None): - """Draw the initial positions using an initial minimization step - - See pos0 in the class initialization for details. - - Returns - ------- - pos0: list - The initial postitions of the walkers, with shape (ntemps, nwalkers, ndim) - - """ - - from scipy.optimize import minimize - - from ..utils import random - - # Set up the minimize list: keys not in this list will have initial - # positions drawn from the prior - if minimize_list is None: - minimize_list = self.search_parameter_keys - pos0 = np.zeros((self.kwargs["ntemps"], self.kwargs["nwalkers"], self.ndim)) - else: - pos0 = np.array(self.get_pos0_from_prior()) - - logger.info(f"Attempting to set pos0 for {minimize_list} from minimize") - - likelihood_copy = copy.copy(self.likelihood) - - def neg_log_like(params): - """Internal function to minimize""" - try: - parameters = copy.copy(draw) - parameters.update({key: val for key, val in zip(minimize_list, params)}) - return -_safe_likelihood_call(likelihood_copy, parameters) - except RuntimeError: - return +np.inf - - # Bounds used in the minimization - bounds = [ - (self.priors[key].minimum, self.priors[key].maximum) - for key in minimize_list - ] - - # Run the minimization step several times to get a range of values - trials = 0 - success = [] - while True: - draw = self.priors.sample() - x0 = [draw[key] for key in minimize_list] - res = minimize( - neg_log_like, x0, bounds=bounds, method="L-BFGS-B", tol=1e-15 - ) - if res.success: - success.append(res.x) - if trials > 100: - raise SamplerError("Unable to set pos0 from minimize") - if len(success) >= 10: - break - - # Initialize positions from the range of values - success = np.array(success) - for i, key in enumerate(minimize_list): - pos0_min = np.min(success[:, i]) - pos0_max = np.max(success[:, i]) - logger.info(f"Initialize {key} walkers from {pos0_min}->{pos0_max}") - j = self.search_parameter_keys.index(key) - pos0[:, :, j] = random.rng.uniform( - pos0_min, - pos0_max, - size=(self.kwargs["ntemps"], self.kwargs["nwalkers"]), - ) - return pos0 - - def get_pos0_from_array(self): - if self.pos0.shape != (self.ntemps, self.nwalkers, self.ndim): - raise ValueError( - "Shape of starting array should be (ntemps, nwalkers, ndim). " - f"In this case that is ({self.ntemps}, {self.nwalkers}, " - f"{self.ndim}), got {self.pos0.shape}" - ) - else: - return self.pos0 - - def get_pos0_from_dict(self): - """ - Initialize the starting points from a passed dictionary. - - The :code:`pos0` passed to the :code:`Sampler` should be a dictionary - with keys matching the :code:`search_parameter_keys`. - Each entry should have shape :code:`(ntemps, nwalkers)`. - """ - pos0 = np.array([self.pos0[key] for key in self.search_parameter_keys]) - self.pos0 = np.moveaxis(pos0, 0, -1) - return self.get_pos0_from_array() - - def setup_sampler(self): - """Either initialize the sampler or read in the resume file""" - import ptemcee - - if ptemcee.__version__ == "1.0.0": - # This is a very ugly hack to support numpy>=1.24 - ptemcee.sampler.np.float = float - - if ( - os.path.isfile(self.resume_file) - and os.path.getsize(self.resume_file) - and self.resume is True - ): - import dill - - logger.info(f"Resume data {self.resume_file} found") - with open(self.resume_file, "rb") as file: - data = dill.load(file) - - # Extract the check-point data - self.sampler = data["sampler"] - self.iteration = data["iteration"] - self.chain_array = data["chain_array"] - self.log_likelihood_array = data["log_likelihood_array"] - self.log_posterior_array = data["log_posterior_array"] - self.pos0 = data["pos0"] - self.beta_list = data["beta_list"] - self.sampler._betas = np.array(self.beta_list[-1]) - self.tau_list = data["tau_list"] - self.tau_list_n = data["tau_list_n"] - self.Q_list = data["Q_list"] - self.time_per_check = data["time_per_check"] - - # Initialize the pool - self.sampler.pool = self.pool - self.sampler.threads = self.threads - - logger.info(f"Resuming from previous run with time={self.iteration}") - - else: - # Initialize the PTSampler - if self.threads == 1: - self.sampler = ptemcee.Sampler( - dim=self.ndim, - logl=self.log_likelihood, - logp=self.log_prior, - **self.sampler_init_kwargs, - ) - else: - self.sampler = ptemcee.Sampler( - dim=self.ndim, - logl=do_nothing_function, - logp=do_nothing_function, - threads=self.threads, - **self.sampler_init_kwargs, - ) - - self.sampler._likeprior = LikePriorEvaluator() - - # Initialize storing results - self.iteration = 0 - self.chain_array = self.get_zero_chain_array() - self.log_likelihood_array = self.get_zero_array() - self.log_posterior_array = self.get_zero_array() - self.beta_list = list() - self.tau_list = list() - self.tau_list_n = list() - self.Q_list = list() - self.time_per_check = list() - self.pos0 = self.get_pos0() - - return self.sampler - - def get_zero_chain_array(self): - return np.zeros((self.nwalkers, self.max_steps, self.ndim)) - - def get_zero_array(self): - return np.zeros((self.ntemps, self.nwalkers, self.max_steps)) - - def get_pos0(self): - """Master logic for setting pos0""" - if isinstance(self.pos0, str) and self.pos0.lower() == "prior": - return self.get_pos0_from_prior() - elif isinstance(self.pos0, str) and self.pos0.lower() == "minimize": - return self.get_pos0_from_minimize() - elif isinstance(self.pos0, list): - return self.get_pos0_from_minimize(minimize_list=self.pos0) - elif isinstance(self.pos0, np.ndarray): - return self.get_pos0_from_array() - elif isinstance(self.pos0, dict): - return self.get_pos0_from_dict() - else: - raise SamplerError(f"pos0={self.pos0} not implemented") - - def _close_pool(self): - if getattr(self.sampler, "pool", None) is not None: - self.sampler.pool = None - if "pool" in self.result.sampler_kwargs: - del self.result.sampler_kwargs["pool"] - super(Ptemcee, self)._close_pool() - - @signal_wrapper - def run_sampler(self): - self._setup_pool() - sampler = self.setup_sampler() - - t0 = datetime.datetime.now() - logger.info("Starting to sample") - - while True: - for pos0, log_posterior, log_likelihood in sampler.sample( - self.pos0, - storechain=False, - iterations=self.convergence_inputs.niterations_per_check, - **self.sampler_function_kwargs, - ): - pos0[:, :, self._periodic] = ( - np.mod( - pos0[:, :, self._periodic] - self._minima[self._periodic], - self._range[self._periodic], - ) - + self._minima[self._periodic] - ) - - if self.iteration == self.chain_array.shape[1]: - self.chain_array = np.concatenate( - (self.chain_array, self.get_zero_chain_array()), axis=1 - ) - self.log_likelihood_array = np.concatenate( - (self.log_likelihood_array, self.get_zero_array()), axis=2 - ) - self.log_posterior_array = np.concatenate( - (self.log_posterior_array, self.get_zero_array()), axis=2 - ) - - self.pos0 = pos0 - - self.chain_array[:, self.iteration, :] = pos0[0, :, :] - self.log_likelihood_array[:, :, self.iteration] = log_likelihood - self.log_posterior_array[:, :, self.iteration] = log_posterior - self.mean_log_posterior = np.mean( - self.log_posterior_array[:, :, : self.iteration], axis=1 - ) - - # (nwalkers, ntemps, iterations) - # so mean_log_posterior is shaped (nwalkers, iterations) - - # Calculate time per iteration - self.time_per_check.append((datetime.datetime.now() - t0).total_seconds()) - t0 = datetime.datetime.now() - - self.iteration += 1 - - # Calculate minimum iteration step to discard - minimum_iteration = get_minimum_stable_itertion( - self.mean_log_posterior, frac=self.convergence_inputs.mean_logl_frac - ) - logger.debug(f"Minimum iteration = {minimum_iteration}") - - # Calculate the maximum discard number - discard_max = np.max( - [self.convergence_inputs.burn_in_fixed_discard, minimum_iteration] - ) - - if self.iteration > discard_max + self.nwalkers: - # If we have taken more than nwalkers steps after the discard - # then set the discard - self.discard = discard_max - else: - # If haven't discard everything (avoid initialisation bias) - logger.debug("Too few steps to calculate convergence") - self.discard = self.iteration - - ( - stop, - self.nburn, - self.thin, - self.tau_int, - self.nsamples_effective, - ) = check_iteration( - self.iteration, - self.chain_array[:, self.discard : self.iteration, :], - sampler, - self.convergence_inputs, - self.search_parameter_keys, - self.time_per_check, - self.beta_list, - self.tau_list, - self.tau_list_n, - self.Q_list, - self.mean_log_posterior, - verbose=self.verbose, - ) - - if stop: - logger.info("Finished sampling") - break - - # If a checkpoint is due, checkpoint - if os.path.isfile(self.resume_file): - last_checkpoint_s = time.time() - os.path.getmtime(self.resume_file) - else: - last_checkpoint_s = np.sum(self.time_per_check) - - if last_checkpoint_s > self.check_point_delta_t: - self.write_current_state(plot=self.check_point_plot) - - # Run a final checkpoint to update the plots and samples - self.write_current_state(plot=self.check_point_plot) - - # Get 0-likelihood samples and store in the result - self.result.samples = self.chain_array[ - :, self.discard + self.nburn : self.iteration : self.thin, : - ].reshape((-1, self.ndim)) - loglikelihood = self.log_likelihood_array[ - 0, :, self.discard + self.nburn : self.iteration : self.thin - ] # nwalkers, nsteps - self.result.log_likelihood_evaluations = loglikelihood.reshape((-1)) - - if self.store_walkers: - self.result.walkers = self.sampler.chain - self.result.nburn = self.nburn - self.result.discard = self.discard - - log_evidence, log_evidence_err = compute_evidence( - sampler, - self.log_likelihood_array, - self.outdir, - self.label, - self.discard, - self.nburn, - self.thin, - self.iteration, - ) - self.result.log_evidence = log_evidence - self.result.log_evidence_err = log_evidence_err - - self.result.sampling_time = datetime.timedelta( - seconds=np.sum(self.time_per_check) - ) - - self._close_pool() - - return self.result - - def write_current_state(self, plot=True): - check_directory_exists_and_if_not_mkdir(self.outdir) - checkpoint( - self.iteration, - self.outdir, - self.label, - self.nsamples_effective, - self.sampler, - self.discard, - self.nburn, - self.thin, - self.search_parameter_keys, - self.resume_file, - self.log_likelihood_array, - self.log_posterior_array, - self.chain_array, - self.pos0, - self.beta_list, - self.tau_list, - self.tau_list_n, - self.Q_list, - self.time_per_check, - ) - - if plot: - try: - # Generate the walkers plot diagnostic - plot_walkers( - self.chain_array[:, : self.iteration, :], - self.nburn, - self.thin, - self.search_parameter_keys, - self.outdir, - self.label, - self.discard, - ) - except Exception as e: - logger.info(f"Walkers plot failed with exception {e}") - - try: - # Generate the tau plot diagnostic if DEBUG - if logger.level < logging.INFO: - plot_tau( - self.tau_list_n, - self.tau_list, - self.search_parameter_keys, - self.outdir, - self.label, - self.tau_int, - self.convergence_inputs.autocorr_tau, - ) - except Exception as e: - logger.info(f"tau plot failed with exception {e}") - - try: - plot_mean_log_posterior( - self.mean_log_posterior, - self.outdir, - self.label, - ) - except Exception as e: - logger.info(f"mean_logl plot failed with exception {e}") - - @classmethod - def get_expected_outputs(cls, outdir=None, label=None): - """Get lists of the expected outputs directories and files. - - These are used by :code:`bilby_pipe` when transferring files via HTCondor. - - Parameters - ---------- - outdir : str - The output directory. - label : str - The label for the run. - - Returns - ------- - list - List of file names. - list - List of directory names. Will always be empty for ptemcee. - """ - filenames = [f"{outdir}/{label}_checkpoint_resume.pickle"] - return filenames, [] - - -def get_minimum_stable_itertion(mean_array, frac, nsteps_min=10): - nsteps = mean_array.shape[1] - if nsteps < nsteps_min: - return 0 - - min_it = 0 - for x in mean_array: - maxl = np.max(x) - fracdiff = (maxl - x) / np.abs(maxl) - idxs = fracdiff < frac - if np.sum(idxs) > 0: - min_it = np.max([min_it, np.min(np.arange(len(idxs))[idxs])]) - return min_it - - -def check_iteration( - iteration, - samples, - sampler, - convergence_inputs, - search_parameter_keys, - time_per_check, - beta_list, - tau_list, - tau_list_n, - gelman_rubin_list, - mean_log_posterior, - verbose=True, -): - """Per-iteration logic to calculate the convergence check. - - To check convergence, this function does the following: - 1. Calculate the autocorrelation time (tau) for each dimension for each walker, - corresponding to those dimensions in search_parameter_keys that aren't - specifically excluded in ci.ignore_keys_for_tau. - a. Store the average tau for each dimension, averaged over each walker. - 2. Calculate the Gelman-Rubin statistic (see `get_Q_convergence`), measuring - the convergence of the ensemble of walkers. - 3. Calculate the number of effective samples; we aggregate the total number - of burned-in samples (amongst all walkers), divided by a multiple of the - current maximum average autocorrelation time. Tuned by `ci.burn_in_nact` - and `ci.thin_by_nact`. - 4. If the Gelman-Rubin statistic < `ci.Q_tol` and `ci.nsamples` < the - number of effective samples, we say that our ensemble is converged, - setting `converged = True`. - 5. For some number of the latest steps (set by the autocorrelation time - and the GRAD_WINDOW_LENGTH parameter), we find the maxmium gradient - of the autocorrelation time over all of our dimensions, over all walkers - (autocorrelation time is already averaged over walkers) and the maximum - value of the gradient of the mean log posterior over iterations, over - all walkers. - 6. If the maximum gradient in tau is less than `ci.gradient_tau` and the - maximum gradient in the mean log posterior is less than - `ci.gradient_mean_log_posterior`, we set `tau_usable = True`. - 7. If both `converged` and `tau_usable` are true, we return `stop = True`, - indicating that our ensemble is converged + burnt in on this - iteration. - 8. Also prints progress! (see `print_progress`) - - Notes - ----- - The gradient of tau is computed with a Savgol-Filter, over windows in - sample number of length `GRAD_WINDOW_LENGTH`. This value must be an odd integer. - For `ndim > 3`, we calculate this as the nearest odd integer to ndim. - For `ndim <= 3`, we calculate this as the nearest odd integer to nwalkers, as - typically a much larger window length than polynomial order (default 2) leads - to more stable smoothing. - - Parameters - ---------- - iteration: int - Number indexing the current iteration, at which we are checking - convergence. - samples: np.ndarray - Array of ensemble MCMC samples, shaped like (number of walkers, number - of MCMC steps, number of dimensions). - sampler: bilby.core.sampler.Ptemcee - Bilby Ptemcee sampler object; in particular, this function uses the list - of walker temperatures stored in `sampler.betas`. - convergence_inputs: bilby.core.sampler.ptemcee.ConvergenceInputs - A named tuple of the convergence checking inputs - search_parameter_keys: list - A list of the search parameter keys - time_per_check, tau_list, tau_list_n: list - Lists used for tracking the run - beta_list: list - List of floats storing the walker inverse temperatures. - tau_list: list - List of average autocorrelation times for each dimension, averaged - over walkers, at each checked iteration. So, an effective shape - of (number of iterations so far, number of dimensions). - tau_list_n: list - List of iteration numbers, enumerating the first "axis" of tau_list. - E.g. if tau_list_n[1] = 5, this means that the list found at - tau_list[1] was calculated on iteration number 5. - gelman_rubin_list: list (floats) - list of values of the Gelman-Rubin statistic; the value calculated - in this call of check_iteration is appended to the gelman_rubin_list. - mean_log_posterior: np.ndarray - Float array shaped like (number of walkers, number of MCMC steps), - with the log of the posterior, averaged over the dimensions. - verbose: bool - Whether to print the output - - Returns - ------- - stop: bool - A boolean flag, True if the stopping criteria has been met - burn: int - The number of burn-in steps to discard - thin: int - The thin-by factor to apply - tau_int: int - The integer estimated ACT - nsamples_effective: int - The effective number of samples after burning and thinning - """ - - ci = convergence_inputs - - nwalkers, nsteps, ndim = samples.shape - tau_array = calculate_tau_array(samples, search_parameter_keys, ci) - tau = np.max(np.mean(tau_array, axis=0)) - - # Apply multiplicitive safety factor - tau = ci.safety * tau - - # Store for convergence checking and plotting - beta_list.append(list(sampler.betas)) - tau_list.append(list(np.mean(tau_array, axis=0))) - tau_list_n.append(iteration) - - gelman_rubin_statistic = get_Q_convergence(samples) - gelman_rubin_list.append(gelman_rubin_statistic) - - if np.isnan(tau) or np.isinf(tau): - if verbose: - print_progress( - iteration, - sampler, - time_per_check, - np.nan, - np.nan, - np.nan, - np.nan, - np.nan, - False, - convergence_inputs, - gelman_rubin_statistic, - ) - return False, np.nan, np.nan, np.nan, np.nan - - # Convert to an integer - tau_int = int(np.ceil(tau)) - - # Calculate the effective number of samples available - nburn = int(ci.burn_in_nact * tau_int) - thin = int(np.max([1, ci.thin_by_nact * tau_int])) - samples_per_check = nwalkers / thin - nsamples_effective = int(nwalkers * (nsteps - nburn) / thin) - - # Calculate convergence boolean - converged = gelman_rubin_statistic < ci.Q_tol and ci.nsamples < nsamples_effective - logger.debug( - "Convergence: Q GRAD_WINDOW_LENGTH: - gradient_tau = get_max_gradient( - check_taus, axis=0, window_length=GRAD_WINDOW_LENGTH - ) - - if gradient_tau < ci.gradient_tau: - logger.debug( - f"tau usable as {gradient_tau} < gradient_tau={ci.gradient_tau}" - ) - tau_usable = True - else: - logger.debug( - f"tau not usable as {gradient_tau} > gradient_tau={ci.gradient_tau}" - ) - tau_usable = False - - check_mean_log_posterior = mean_log_posterior[:, -nsteps_to_check:] - gradient_mean_log_posterior = get_max_gradient( - check_mean_log_posterior, - axis=1, - window_length=GRAD_WINDOW_LENGTH, - smooth=True, - ) - - if gradient_mean_log_posterior < ci.gradient_mean_log_posterior: - logger.debug( - f"tau usable as {gradient_mean_log_posterior} < " - f"gradient_mean_log_posterior={ci.gradient_mean_log_posterior}" - ) - tau_usable *= True - else: - logger.debug( - f"tau not usable as {gradient_mean_log_posterior} > " - f"gradient_mean_log_posterior={ci.gradient_mean_log_posterior}" - ) - tau_usable = False - - else: - logger.debug("ACT is nan") - gradient_tau = np.nan - gradient_mean_log_posterior = np.nan - tau_usable = False - - if nsteps < tau_int * ci.autocorr_tol: - logger.debug("ACT less than autocorr_tol") - tau_usable = False - elif tau_int < ci.min_tau: - logger.debug("ACT less than min_tau") - tau_usable = False - - # Print an update on the progress - if verbose: - print_progress( - iteration, - sampler, - time_per_check, - nsamples_effective, - samples_per_check, - tau_int, - gradient_tau, - gradient_mean_log_posterior, - tau_usable, - convergence_inputs, - gelman_rubin_statistic, - ) - - stop = converged and tau_usable - return stop, nburn, thin, tau_int, nsamples_effective - - -def get_max_gradient(x, axis=0, window_length=11, polyorder=2, smooth=False): - """Calculate the maximum value of the gradient in the input data. - - Applies a Savitzky-Golay filter (`scipy.signal.savgol_filter`) to the input - data x, along a particular axis. This filter smooths the data and, as configured - in this function, simultaneously calculates the derivative of the smoothed data. - If smooth=True is provided, it will apply a Savitzky-Golay filter with a - polynomial order of 3 to the input data before applying this filter a second - time and calculating the derivative. This function will return the maximum value - of the derivative returned by the filter. - - See https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html - for more information on the Savitzky-Golay filter that we use. Some parameter - documentation has been borrowed from this source. - - Parameters - ---------- - x : np.ndarray - Array of input data (can be int or float, as `savgol_filter` casts to float). - axis : int, default = 0 - The axis of the input array x over which to calculate the gradient. - window_length : int, default = 11 - The length of the filter window (i.e., the number of coefficients to use - in approximating the data). - polyorder : int, default = 2 - The order of the polynomial used to fit the samples. polyorder must be less - than window_length. - smooth : bool, default = False - If true, this will smooth the data with a Savitzky-Golay filter before - providing it to the Savitzky-Golay filter for calculating the derviative. - Probably useful if you think your input data is especially noisy. - - Returns - ------- - max_gradient : float - Maximum value of the gradient. - """ - - from scipy.signal import savgol_filter - - if smooth: - x = savgol_filter(x, axis=axis, window_length=window_length, polyorder=3) - return np.max( - savgol_filter( - x, axis=axis, window_length=window_length, polyorder=polyorder, deriv=1 - ) - ) - - -def get_Q_convergence(samples): - """Calculate the Gelman-Rubin statistic as an estimate of convergence for - an ensemble of MCMC walkers. - - Calculates the Gelman-Rubin statistic, from Gelman and Rubin (1992). - See section 2.2 of Gelman and Rubin (1992), at - https://doi.org/10.1214/ss/1177011136. - - There is also a good description of this statistic in section 7.4.2 - of "Advanced Statistical Computing" (Peng 2021), in-progress course notes, - currently found at - https://bookdown.org/rdpeng/advstatcomp/monitoring-convergence.html. - As of this writing, L in this resource refers to the number of sampling - steps remaining after some have been discarded to achieve burn-in, - equivalent to nsteps here. Paraphrasing, we compare the variance between - our walkers (chains) to the variance within each walker (compare - inter-walker vs. intra-walker variance). We do this because our walkers - should be indistinguishable from one another when they reach a steady-state, - i.e. convergence. Looking at V-hat in the definition of this function, we - can see that as nsteps -> infinity, B (inter-chain variance) -> 0, - R -> 1; so, R >~ 1 is a good condition for the convergence of our ensemble. - - In practice, this function calculates the Gelman-Rubin statistic for - each dimension, and then returns the largest of these values. This - means that we can be sure that, once the walker with the largest such value - achieves a Gelman-Rubin statistic of >~ 1, the others have as well. - - Parameters - ---------- - samples: np.ndarray - Array of ensemble MCMC samples, shaped like (number of walkers, number - of MCMC steps, number of dimensions). - - Returns - ------- - Q: float - The largest value of the Gelman-Rubin statistic, from those values - calculated for each dimension. If only one step is represented in - samples, this returns np.inf. - """ - - nwalkers, nsteps, ndim = samples.shape - if nsteps > 1: - W = np.mean(np.var(samples, axis=1), axis=0) - - per_walker_mean = np.mean(samples, axis=1) - mean = np.mean(per_walker_mean, axis=0) - B = nsteps / (nwalkers - 1.0) * np.sum((per_walker_mean - mean) ** 2, axis=0) - - Vhat = (nsteps - 1) / nsteps * W + (nwalkers + 1) / (nwalkers * nsteps) * B - Q_per_dim = np.sqrt(Vhat / W) - return np.max(Q_per_dim) - else: - return np.inf - - -def print_progress( - iteration, - sampler, - time_per_check, - nsamples_effective, - samples_per_check, - tau_int, - gradient_tau, - gradient_mean_log_posterior, - tau_usable, - convergence_inputs, - Q, -): - # Setup acceptance string - acceptance = sampler.acceptance_fraction[0, :] - acceptance_str = f"{np.min(acceptance):1.2f}-{np.max(acceptance):1.2f}" - - # Setup tswap acceptance string - tswap_acceptance_fraction = sampler.tswap_acceptance_fraction - tswap_acceptance_str = f"{np.min(tswap_acceptance_fraction):1.2f}-{np.max(tswap_acceptance_fraction):1.2f}" - - ave_time_per_check = np.mean(time_per_check[-3:]) - time_left = ( - (convergence_inputs.nsamples - nsamples_effective) - * ave_time_per_check - / samples_per_check - ) - if time_left > 0: - time_left = str(datetime.timedelta(seconds=int(time_left))) - else: - time_left = "waiting on convergence" - - sampling_time = datetime.timedelta(seconds=np.sum(time_per_check)) - - tau_str = f"{tau_int}(+{gradient_tau:0.2f},+{gradient_mean_log_posterior:0.2f})" - - if tau_usable: - tau_str = f"={tau_str}" - else: - tau_str = f"!{tau_str}" - - Q_str = f"{Q:0.2f}" - - evals_per_check = ( - sampler.nwalkers * sampler.ntemps * convergence_inputs.niterations_per_check - ) - - approximate_ncalls = ( - convergence_inputs.niterations_per_check - * iteration - * sampler.nwalkers - * sampler.ntemps - ) - ncalls = f"{approximate_ncalls:1.1e}" - eval_timing = f"{1000.0 * ave_time_per_check / evals_per_check:1.2f}ms/ev" - - try: - print( - f"{iteration}|{str(sampling_time).split('.')[0]}|nc:{ncalls}|" - f"a0:{acceptance_str}|swp:{tswap_acceptance_str}|" - f"n:{nsamples_effective}<{convergence_inputs.nsamples}|t{tau_str}|" - f"q:{Q_str}|{eval_timing}", - flush=True, - ) - except OSError as e: - logger.debug(f"Failed to print iteration due to :{e}") - - -def calculate_tau_array(samples, search_parameter_keys, ci): - """Calculate the autocorrelation time for zero-temperature chains. - - Calculates the autocorrelation time for each chain, for those parameters/ - dimensions that are not explicitly excluded in ci.ignore_keys_for_tau. - - Parameters - ---------- - samples: np.ndarray - Array of ensemble MCMC samples, shaped like (number of walkers, number - of MCMC steps, number of dimensions). - search_parameter_keys: list - A list of the search parameter keys - ci : collections.namedtuple - Collection of settings for convergence tests, including autocorrelation - calculation. If a value in search_parameter_keys is included in - ci.ignore_keys_for_tau, this function will not calculate an - autocorrelation time for any walker along that particular dimension. - - Returns - ------- - tau_array: np.ndarray - Float array shaped like (nwalkers, ndim) (with all np.inf for any - dimension that is excluded by ci.ignore_keys_for_tau). - """ - - import emcee - - nwalkers, nsteps, ndim = samples.shape - tau_array = np.zeros((nwalkers, ndim)) + np.inf - if nsteps > 1: - for ii in range(nwalkers): - for jj, key in enumerate(search_parameter_keys): - if ci.ignore_keys_for_tau and ci.ignore_keys_for_tau in key: - continue - try: - tau_array[ii, jj] = emcee.autocorr.integrated_time( - samples[ii, :, jj], c=ci.autocorr_c, tol=0 - )[0] - except emcee.autocorr.AutocorrError: - tau_array[ii, jj] = np.inf - return tau_array - - -def checkpoint( - iteration, - outdir, - label, - nsamples_effective, - sampler, - discard, - nburn, - thin, - search_parameter_keys, - resume_file, - log_likelihood_array, - log_posterior_array, - chain_array, - pos0, - beta_list, - tau_list, - tau_list_n, - Q_list, - time_per_check, -): - logger.info("Writing checkpoint and diagnostics") - ndim = sampler.dim - - # Store the samples if possible - if nsamples_effective > 0: - filename = f"{outdir}/{label}_samples.txt" - samples = np.array(chain_array)[ - :, discard + nburn : iteration : thin, : - ].reshape((-1, ndim)) - df = pd.DataFrame(samples, columns=search_parameter_keys) - df.to_csv(filename, index=False, header=True, sep=" ") - - # Pickle the resume artefacts - pool = sampler.pool - sampler.pool = None - sampler_copy = copy.deepcopy(sampler) - sampler.pool = pool - - data = dict( - iteration=iteration, - sampler=sampler_copy, - beta_list=beta_list, - tau_list=tau_list, - tau_list_n=tau_list_n, - Q_list=Q_list, - time_per_check=time_per_check, - log_likelihood_array=log_likelihood_array, - log_posterior_array=log_posterior_array, - chain_array=chain_array, - pos0=pos0, - ) - - safe_file_dump(data, resume_file, "dill") - del data, sampler_copy - logger.info("Finished writing checkpoint") - - -def plot_walkers(walkers, nburn, thin, parameter_labels, outdir, label, discard=0): - """Method to plot the trace of the walkers in an ensemble MCMC plot""" - import matplotlib.pyplot as plt - - nwalkers, nsteps, ndim = walkers.shape - if np.isnan(nburn): - nburn = nsteps - if np.isnan(thin): - thin = 1 - idxs = np.arange(nsteps) - fig, axes = plt.subplots(nrows=ndim, ncols=2, figsize=(8, 3 * ndim)) - scatter_kwargs = dict( - lw=0, - marker="o", - markersize=1, - alpha=0.1, - ) - - # Plot the fixed burn-in - if discard > 0: - for i, (ax, axh) in enumerate(axes): - ax.plot( - idxs[:discard], - walkers[:, :discard, i].T, - color="gray", - **scatter_kwargs, - ) - - # Plot the burn-in - for i, (ax, axh) in enumerate(axes): - ax.plot( - idxs[discard : discard + nburn + 1], - walkers[:, discard : discard + nburn + 1, i].T, - color="C1", - **scatter_kwargs, - ) - - # Plot the thinned posterior samples - for i, (ax, axh) in enumerate(axes): - ax.plot( - idxs[discard + nburn :: thin], - walkers[:, discard + nburn :: thin, i].T, - color="C0", - **scatter_kwargs, - ) - axh.hist( - walkers[:, discard + nburn :: thin, i].reshape((-1)), bins=50, alpha=0.8 - ) - - for i, (ax, axh) in enumerate(axes): - axh.set_xlabel(parameter_labels[i]) - ax.set_ylabel(parameter_labels[i]) - - fig.tight_layout() - filename = f"{outdir}/{label}_checkpoint_trace.png" - fig.savefig(filename) - plt.close(fig) - - -def plot_tau( - tau_list_n, - tau_list, - search_parameter_keys, - outdir, - label, - tau, - autocorr_tau, -): - import matplotlib.pyplot as plt - - fig, ax = plt.subplots() - for i, key in enumerate(search_parameter_keys): - ax.plot(tau_list_n, np.array(tau_list)[:, i], label=key) - ax.set_xlabel("Iteration") - ax.set_ylabel(r"$\langle \tau \rangle$") - ax.legend() - fig.tight_layout() - fig.savefig(f"{outdir}/{label}_checkpoint_tau.png") - plt.close(fig) - - -def plot_mean_log_posterior(mean_log_posterior, outdir, label): - import matplotlib.pyplot as plt - - mean_log_posterior[mean_log_posterior < -1e100] = np.nan - - ntemps, nsteps = mean_log_posterior.shape - ymax = np.nanmax(mean_log_posterior) - ymin = np.nanmin(mean_log_posterior[:, -100:]) - ymax += 0.1 * (ymax - ymin) - ymin -= 0.1 * (ymax - ymin) - - fig, ax = plt.subplots() - idxs = np.arange(nsteps) - ax.plot(idxs, mean_log_posterior.T) - ax.set( - xlabel="Iteration", - ylabel=r"$\langle\mathrm{log-posterior}\rangle$", - ylim=(ymin, ymax), - ) - fig.tight_layout() - fig.savefig(f"{outdir}/{label}_checkpoint_meanlogposterior.png") - plt.close(fig) - - -def compute_evidence( - sampler, - log_likelihood_array, - outdir, - label, - discard, - nburn, - thin, - iteration, - make_plots=True, -): - """Computes the evidence using thermodynamic integration""" - import matplotlib.pyplot as plt - - betas = sampler.betas - # We compute the evidence without the burnin samples, but we do not thin - lnlike = log_likelihood_array[:, :, discard + nburn : iteration] - mean_lnlikes = np.mean(np.mean(lnlike, axis=1), axis=1) - - mean_lnlikes = mean_lnlikes[::-1] - betas = betas[::-1] - - if any(np.isinf(mean_lnlikes)): - logger.warning( - "mean_lnlikes contains inf: recalculating without" - f" the {len(betas[np.isinf(mean_lnlikes)])} infs" - ) - idxs = np.isinf(mean_lnlikes) - mean_lnlikes = mean_lnlikes[~idxs] - betas = betas[~idxs] - - lnZ = trapezoid(mean_lnlikes, betas) - z1 = trapezoid(mean_lnlikes, betas) - z2 = trapezoid(mean_lnlikes[::-1][::2][::-1], betas[::-1][::2][::-1]) - lnZerr = np.abs(z1 - z2) - - if make_plots: - fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 8)) - ax1.semilogx(betas, mean_lnlikes, "-o") - ax1.set_xlabel(r"$\beta$") - ax1.set_ylabel(r"$\langle \log(\mathcal{L}) \rangle$") - min_betas = [] - evidence = [] - for i in range(int(len(betas) / 2.0)): - min_betas.append(betas[i]) - evidence.append(trapezoid(mean_lnlikes[i:], betas[i:])) - - ax2.semilogx(min_betas, evidence, "-o") - ax2.set_ylabel( - r"$\int_{\beta_{min}}^{\beta=1}" - + r"\langle \log(\mathcal{L})\rangle d\beta$", - size=16, - ) - ax2.set_xlabel(r"$\beta_{min}$") - plt.tight_layout() - fig.savefig(f"{outdir}/{label}_beta_lnl.png") - plt.close(fig) - - return lnZ, lnZerr - - -def do_nothing_function(): - """This is a do-nothing function, we overwrite the likelihood and prior elsewhere""" - pass diff --git a/bilby/core/sampler/ptmcmc.py b/bilby/core/sampler/ptmcmc.py deleted file mode 100644 index c8a790953..000000000 --- a/bilby/core/sampler/ptmcmc.py +++ /dev/null @@ -1,256 +0,0 @@ -import glob -import shutil -import warnings - -import numpy as np - -from ..utils import logger -from .base_sampler import MCMCSampler, SamplerNotInstalledError, signal_wrapper - - -class PTMCMCSampler(MCMCSampler): - """bilby wrapper of PTMCMC (https://github.com/jellis18/PTMCMCSampler/) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `PTMCMCSampler.PTMCMCSampler`, see - documentation for that class for further help. Under Other Parameters, we - list commonly used kwargs and the bilby defaults. - - Parameters - ========== - Niter: int (2*10**4 + 1) - The number of mcmc steps - burn: int (5 * 10**3) - If given, the fixed number of steps to discard as burn-in - thin: int (1) - The number of steps before saving the sample to the chain - custom_proposals: dict (None) - Add dictionary of proposals to the array of proposals, this must be in - the form of a dictionary with the name of the proposal, then a list - containing the jump function and the weight e.g {'name' : [function , - weight]} see - (https://github.com/rgreen1995/PTMCMCSampler/blob/master/examples/simple.ipynb) - and - (http://jellis18.github.io/PTMCMCSampler/PTMCMCSampler.html#ptmcmcsampler-ptmcmcsampler-module) - for examples and more info. - logl_grad: func (None) - Gradient of likelihood if known (default = None) - logp_grad: func (None) - Gradient of prior if known (default = None) - verbose: bool (True) - Update current run-status to the screen - - """ - - msg = ( - "The PTMCMC sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `ptmcmc-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/ptmcmc-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "ptmcmcsampler" - abbreviation = "ptmcmc_temp" - default_kwargs = { - "p0": None, - "Niter": 2 * 10**4 + 1, - "neff": 10**4, - "burn": 5 * 10**3, - "verbose": True, - "ladder": None, - "Tmin": 1, - "Tmax": None, - "Tskip": 100, - "isave": 1000, - "thin": 1, - "covUpdate": 1000, - "SCAMweight": 1, - "AMweight": 1, - "DEweight": 1, - "HMCweight": 0, - "MALAweight": 0, - "NUTSweight": 0, - "HMCstepsize": 0.1, - "HMCsteps": 300, - "groups": None, - "custom_proposals": None, - "loglargs": {}, - "loglkwargs": {}, - "logpargs": {}, - "logpkwargs": {}, - "logl_grad": None, - "logp_grad": None, - "outDir": None, - } - hard_exit = True - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=False, - **kwargs, - ): - - super(PTMCMCSampler, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - **kwargs, - ) - - if self.kwargs["p0"] is None: - self.p0 = self.get_random_draw_from_prior() - else: - self.p0 = self.kwargs["p0"] - self.likelihood = likelihood - self.priors = priors - - def _verify_external_sampler(self): - # PTMCMC is imported with Caps so need to overwrite the parent function - # which forces `__name__.lower() - external_sampler_name = self.__class__.__name__ - try: - __import__(external_sampler_name) - except (ImportError, SystemExit): - raise SamplerNotInstalledError( - f"Sampler {external_sampler_name} is not installed on this system" - ) - - def _translate_kwargs(self, kwargs): - kwargs = super()._translate_kwargs(kwargs) - if "Niter" not in kwargs: - for equiv in self.nwalkers_equiv_kwargs: - if equiv in kwargs: - kwargs["Niter"] = kwargs.pop(equiv) - if "burn" not in kwargs: - for equiv in self.nburn_equiv_kwargs: - if equiv in kwargs: - kwargs["burn"] = kwargs.pop(equiv) - - @property - def custom_proposals(self): - return self.kwargs["custom_proposals"] - - @property - def sampler_init_kwargs(self): - keys = [ - "groups", - "loglargs", - "logp_grad", - "logpkwargs", - "loglkwargs", - "logl_grad", - "logpargs", - "outDir", - "verbose", - ] - init_kwargs = {key: self.kwargs[key] for key in keys} - if init_kwargs["outDir"] is None: - init_kwargs["outDir"] = f"{self.outdir}/ptmcmc_temp_{self.label}/" - return init_kwargs - - @property - def sampler_function_kwargs(self): - keys = [ - "Niter", - "neff", - "Tmin", - "HMCweight", - "covUpdate", - "SCAMweight", - "ladder", - "burn", - "NUTSweight", - "AMweight", - "MALAweight", - "thin", - "HMCstepsize", - "isave", - "Tskip", - "HMCsteps", - "Tmax", - "DEweight", - ] - sampler_kwargs = {key: self.kwargs[key] for key in keys} - return sampler_kwargs - - @staticmethod - def _import_external_sampler(): - from PTMCMCSampler import PTMCMCSampler - - return PTMCMCSampler - - @signal_wrapper - def run_sampler(self): - PTMCMCSampler = self._import_external_sampler() - sampler = PTMCMCSampler.PTSampler( - ndim=self.ndim, - logp=self.log_prior, - logl=self.log_likelihood, - cov=np.eye(self.ndim), - **self.sampler_init_kwargs, - ) - if self.custom_proposals is not None: - for proposal in self.custom_proposals: - logger.info( - f"Adding {proposal} to proposals with weight {self.custom_proposals[proposal][1]}" - ) - sampler.addProposalToCycle( - self.custom_proposals[proposal][0], - self.custom_proposals[proposal][1], - ) - sampler.sample(p0=self.p0, **self.sampler_function_kwargs) - samples, meta, loglike = self.__read_in_data() - - self.calc_likelihood_count() - self.result.nburn = self.sampler_function_kwargs["burn"] - self.result.samples = samples[self.result.nburn :] - self.meta_data["sampler_meta"] = meta - self.result.log_likelihood_evaluations = loglike[self.result.nburn :] - self.result.sampler_output = np.nan - self.result.walkers = np.nan - self.result.log_evidence = np.nan - self.result.log_evidence_err = np.nan - return self.result - - def __read_in_data(self): - """Read the data stored by PTMCMC to disk""" - temp_outDir = self.sampler_init_kwargs["outDir"] - try: - data = np.loadtxt(f"{temp_outDir}chain_1.txt") - except OSError: - data = np.loadtxt(f"{temp_outDir}chain_1.0.txt") - jumpfiles = glob.glob(f"{temp_outDir}/*jump.txt") - jumps = map(np.loadtxt, jumpfiles) - samples = data[:, :-4] - loglike = data[:, -3] - - jump_accept = {} - for ct, j in enumerate(jumps): - label = jumpfiles[ct].split("/")[-1].split("_jump.txt")[0] - jump_accept[label] = j - PT_swap = {"swap_accept": data[:, -1]} - tot_accept = {"tot_accept": data[:, -2]} - log_post = {"log_post": data[:, -4]} - meta = {} - meta["tot_accept"] = tot_accept - meta["PT_swap"] = PT_swap - meta["proposals"] = jump_accept - meta["log_post"] = log_post - - shutil.rmtree(temp_outDir) - - return samples, meta, loglike - - def write_current_state(self): - """TODO: implement a checkpointing method""" - pass diff --git a/bilby/core/sampler/pymc.py b/bilby/core/sampler/pymc.py deleted file mode 100644 index 90a607c4d..000000000 --- a/bilby/core/sampler/pymc.py +++ /dev/null @@ -1,1013 +0,0 @@ -import warnings - -import numpy as np - -from ...gw.likelihood import BasicGravitationalWaveTransient, GravitationalWaveTransient -from ..likelihood import ( - ExponentialLikelihood, - GaussianLikelihood, - PoissonLikelihood, - StudentTLikelihood, - _safe_likelihood_call, -) -from ..prior import Cosine, DeltaFunction, MultivariateGaussian, PowerLaw, Sine -from ..utils import derivatives, infer_args_from_method -from .base_sampler import MCMCSampler - - -class Pymc(MCMCSampler): - """bilby wrapper of the PyMC sampler (https://www.pymc.io/) - - All keyword arguments (i.e., the kwargs) passed to `run_sampler` will be - propapated to `pymc.sample` where appropriate, see documentation for that - class for further help. Under Other Parameters, we list commonly used - kwargs and the bilby, or where appropriate, PyMC defaults. - - Parameters - ========== - draws: int, (1000) - The number of sample draws from the posterior per chain. - chains: int, (2) - The number of independent MCMC chains to run. - cores: int, (1) - The number of CPU cores to use. - tune: int, (500) - The number of tuning (or burn-in) samples per chain. - discard_tuned_samples: bool, True - Set whether to automatically discard the tuning samples from the final - chains. - step: str, dict - Provide a step method name, or dictionary of step method names keyed to - particular variable names (these are case insensitive). If passing a - dictionary of methods, the values keyed on particular variables can be - lists of methods to form compound steps. If no method is provided for - any particular variable then PyMC will automatically decide upon a - default, with the first option being the NUTS sampler. The currently - allowed methods are 'NUTS', 'HamiltonianMC', 'Metropolis', - 'BinaryMetropolis', 'BinaryGibbsMetropolis', 'Slice', and - 'CategoricalGibbsMetropolis'. Note: you cannot provide a PyMC step - method function itself here as it is outside of the model context - manager. - step_kwargs: dict - Options for steps methods other than NUTS. The dictionary is keyed on - lowercase step method names with values being dictionaries of keywords - for the given step method. - - """ - - msg = ( - "The PyMC sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `pymc-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/pymc-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "pymc" - default_kwargs = dict( - draws=500, - step=None, - init="auto", - n_init=200000, - initvals=None, - trace=None, - chains=2, - cores=1, - tune=500, - progressbar=True, - model=None, - random_seed=None, - discard_tuned_samples=True, - compute_convergence_checks=True, - nuts_kwargs=None, - step_kwargs=None, - ) - - default_nuts_kwargs = dict( - target_accept=None, - max_treedepth=None, - step_scale=None, - Emax=None, - gamma=None, - k=None, - t0=None, - adapt_step_size=None, - early_max_treedepth=None, - scaling=None, - is_cov=None, - potential=None, - ) - - default_kwargs.update(default_nuts_kwargs) - - sampling_seed_key = "random_seed" - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=False, - **kwargs, - ): - # add default step kwargs - _, STEP_METHODS, _ = self._import_external_sampler() - self.default_step_kwargs = {m.__name__.lower(): None for m in STEP_METHODS} - self.default_kwargs.update(self.default_step_kwargs) - - super(Pymc, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - **kwargs, - ) - self.draws = self._kwargs["draws"] - self.chains = self._kwargs["chains"] - - @staticmethod - def _import_external_sampler(): - import pymc - from pymc import floatX - from pymc.step_methods import STEP_METHODS - - return pymc, STEP_METHODS, floatX - - @staticmethod - def _import_tensor(): - try: - import pytensor as tensor # noqa - import pytensor.tensor as tt - from pytensor.compile.ops import as_op # noqa - except ImportError: - import aesara as tensor # noqa - import aesara.tensor as tt - from aesara.compile.ops import as_op # noqa - - return tensor, tt, as_op - - def _verify_parameters(self): - """ - Change `_verify_parameters()` to just pass, i.e., don't try and - evaluate the likelihood for PyMC. - """ - pass - - def _verify_use_ratio(self): - """ - Change `_verify_use_ratio() to just pass. - """ - pass - - def setup_prior_mapping(self): - """ - Set the mapping between predefined bilby priors and the equivalent - PyMC distributions. - """ - - prior_map = {} - self.prior_map = prior_map - - # predefined PyMC distributions - prior_map["Gaussian"] = { - "pymc": "Normal", - "argmap": {"mu": "mu", "sigma": "sigma"}, - } - prior_map["TruncatedGaussian"] = { - "pymc": "TruncatedNormal", - "argmap": { - "mu": "mu", - "sigma": "sigma", - "minimum": "lower", - "maximum": "upper", - }, - } - prior_map["HalfGaussian"] = {"pymc": "HalfNormal", "argmap": {"sigma": "sigma"}} - prior_map["Uniform"] = { - "pymc": "Uniform", - "argmap": {"minimum": "lower", "maximum": "upper"}, - } - prior_map["LogNormal"] = { - "pymc": "Lognormal", - "argmap": {"mu": "mu", "sigma": "sigma"}, - } - prior_map["Exponential"] = { - "pymc": "Exponential", - "argmap": {"mu": "lam"}, - "argtransform": {"mu": lambda mu: 1.0 / mu}, - } - prior_map["StudentT"] = { - "pymc": "StudentT", - "argmap": {"df": "nu", "mu": "mu", "scale": "sigma"}, - } - prior_map["Beta"] = { - "pymc": "Beta", - "argmap": {"alpha": "alpha", "beta": "beta"}, - } - prior_map["Logistic"] = { - "pymc": "Logistic", - "argmap": {"mu": "mu", "scale": "s"}, - } - prior_map["Cauchy"] = { - "pymc": "Cauchy", - "argmap": {"alpha": "alpha", "beta": "beta"}, - } - prior_map["Gamma"] = { - "pymc": "Gamma", - "argmap": {"k": "alpha", "theta": "beta"}, - "argtransform": {"theta": lambda theta: 1.0 / theta}, - } - prior_map["ChiSquared"] = {"pymc": "ChiSquared", "argmap": {"nu": "nu"}} - prior_map["Interped"] = { - "pymc": "Interpolated", - "argmap": {"xx": "x_points", "yy": "pdf_points"}, - } - prior_map["Normal"] = prior_map["Gaussian"] - prior_map["TruncatedNormal"] = prior_map["TruncatedGaussian"] - prior_map["HalfNormal"] = prior_map["HalfGaussian"] - prior_map["LogGaussian"] = prior_map["LogNormal"] - prior_map["Lorentzian"] = prior_map["Cauchy"] - prior_map["FromFile"] = prior_map["Interped"] - - # GW specific priors - prior_map["UniformComovingVolume"] = prior_map["Interped"] - - # internally defined mappings for bilby priors - prior_map["DeltaFunction"] = {"internal": self._deltafunction_prior} - prior_map["Sine"] = {"internal": self._sine_prior} - prior_map["Cosine"] = {"internal": self._cosine_prior} - prior_map["PowerLaw"] = {"internal": self._powerlaw_prior} - prior_map["LogUniform"] = {"internal": self._powerlaw_prior} - prior_map["MultivariateGaussian"] = { - "internal": self._multivariate_normal_prior - } - prior_map["MultivariateNormal"] = {"internal": self._multivariate_normal_prior} - - def _deltafunction_prior(self, key, **kwargs): - """ - Map the bilby delta function prior to a single value for PyMC. - """ - - # check prior is a DeltaFunction - if isinstance(self.priors[key], DeltaFunction): - return self.priors[key].peak - else: - raise ValueError(f"Prior for '{key}' is not a DeltaFunction") - - def _sine_prior(self, key): - """ - Map the bilby Sine prior to a PyMC style function - """ - - # check prior is a Sine - pymc, _, floatX = self._import_external_sampler() - _, tt, _ = self._import_tensor() - if isinstance(self.priors[key], Sine): - - class PymcSine(pymc.Continuous): - def __init__(self, lower=0.0, upper=np.pi): - if lower >= upper: - raise ValueError("Lower bound is above upper bound!") - - # set the mode - self.lower = lower = tt.as_tensor_variable(floatX(lower)) - self.upper = upper = tt.as_tensor_variable(floatX(upper)) - self.norm = tt.cos(lower) - tt.cos(upper) - self.mean = ( - tt.sin(upper) - + lower * tt.cos(lower) - - tt.sin(lower) - - upper * tt.cos(upper) - ) / self.norm - - transform = pymc.distributions.transforms.interval(lower, upper) - - super(PymcSine, self).__init__(transform=transform) - - def logp(self, value): - upper = self.upper - lower = self.lower - return pymc.distributions.dist_math.bound( - tt.log(tt.sin(value) / self.norm), - lower <= value, - value <= upper, - ) - - return PymcSine( - key, lower=self.priors[key].minimum, upper=self.priors[key].maximum - ) - else: - raise ValueError(f"Prior for '{key}' is not a Sine") - - def _cosine_prior(self, key): - """ - Map the bilby Cosine prior to a PyMC style function - """ - - # check prior is a Cosine - pymc, _, floatX = self._import_external_sampler() - _, tt, _ = self._import_tensor() - if isinstance(self.priors[key], Cosine): - - class PymcCosine(pymc.Continuous): - def __init__(self, lower=-np.pi / 2.0, upper=np.pi / 2.0): - if lower >= upper: - raise ValueError("Lower bound is above upper bound!") - - self.lower = lower = tt.as_tensor_variable(floatX(lower)) - self.upper = upper = tt.as_tensor_variable(floatX(upper)) - self.norm = tt.sin(upper) - tt.sin(lower) - self.mean = ( - upper * tt.sin(upper) - + tt.cos(upper) - - lower * tt.sin(lower) - - tt.cos(lower) - ) / self.norm - - transform = pymc.distributions.transforms.interval(lower, upper) - - super(PymcCosine, self).__init__(transform=transform) - - def logp(self, value): - upper = self.upper - lower = self.lower - return pymc.distributions.dist_math.bound( - tt.log(tt.cos(value) / self.norm), - lower <= value, - value <= upper, - ) - - return PymcCosine( - key, lower=self.priors[key].minimum, upper=self.priors[key].maximum - ) - else: - raise ValueError(f"Prior for '{key}' is not a Cosine") - - def _powerlaw_prior(self, key): - """ - Map the bilby PowerLaw prior to a PyMC style function - """ - - # check prior is a PowerLaw - pymc, _, floatX = self._import_external_sampler() - _, tt, _ = self._import_tensor() - if isinstance(self.priors[key], PowerLaw): - - # check power law is set - if not hasattr(self.priors[key], "alpha"): - raise AttributeError("No 'alpha' attribute set for PowerLaw prior") - - if self.priors[key].alpha < -1.0: - # use Pareto distribution - palpha = -(1.0 + self.priors[key].alpha) - - return pymc.Bound(pymc.Pareto, upper=self.priors[key].minimum)( - key, alpha=palpha, m=self.priors[key].maximum - ) - else: - - class PymcPowerLaw(pymc.Continuous): - def __init__(self, lower, upper, alpha, testval=1): - falpha = alpha - self.lower = lower = tt.as_tensor_variable(floatX(lower)) - self.upper = upper = tt.as_tensor_variable(floatX(upper)) - self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) - - if falpha == -1: - self.norm = 1.0 / (tt.log(self.upper / self.lower)) - else: - beta = 1.0 + self.alpha - self.norm = 1.0 / ( - beta - * (tt.pow(self.upper, beta) - tt.pow(self.lower, beta)) - ) - - transform = pymc.distributions.transforms.interval(lower, upper) - - super(PymcPowerLaw, self).__init__( - transform=transform, testval=testval - ) - - def logp(self, value): - upper = self.upper - lower = self.lower - alpha = self.alpha - - return pymc.distributions.dist_math.bound( - alpha * tt.log(value) + tt.log(self.norm), - lower <= value, - value <= upper, - ) - - return PymcPowerLaw( - key, - lower=self.priors[key].minimum, - upper=self.priors[key].maximum, - alpha=self.priors[key].alpha, - ) - else: - raise ValueError(f"Prior for '{key}' is not a Power Law") - - def _multivariate_normal_prior(self, key): - """ - Map the bilby MultivariateNormal prior to a PyMC style function. - """ - - # check prior is a PowerLaw - pymc, _, _ = self._import_external_sampler() - if isinstance(self.priors[key], MultivariateGaussian): - # get names of multivariate Gaussian parameters - mvpars = self.priors[key].mvg.names - - # set the prior on multiple parameters if not present yet - if not np.all([p in self.multivariate_normal_sets for p in mvpars]): - mvg = self.priors[key].mvg - - # get bounds - lower = [bound[0] for bound in mvg.bounds.values()] - upper = [bound[1] for bound in mvg.bounds.values()] - - # test values required for mixture - testvals = [] - for bound in mvg.bounds.values(): - if np.isinf(bound[0]) and np.isinf(bound[1]): - testvals.append(0.0) - elif np.isinf(bound[0]): - testvals.append(bound[1] - 1.0) - elif np.isinf(bound[1]): - testvals.append(bound[0] + 1.0) - else: - # half-way between the two bounds - testvals.append(bound[0] + (bound[1] - bound[0]) / 2.0) - - # if bounds are at +/-infinity set to 100 sigmas as infinities - # cause problems for the Bound class - maxmu = np.max(mvg.mus, axis=0) - minmu = np.min(mvg.mus, axis=0) - maxsigma = np.max(mvg.sigmas, axis=0) - for i in range(len(mvpars)): - if np.isinf(lower[i]): - lower[i] = minmu[i] - 100.0 * maxsigma[i] - if np.isinf(upper[i]): - upper[i] = maxmu[i] + 100.0 * maxsigma[i] - - # create a bounded MultivariateNormal distribution - BoundedMvN = pymc.Bound(pymc.MvNormal, lower=lower, upper=upper) - - comp_dists = [] # list of any component modes - for i in range(mvg.nmodes): - comp_dists.append( - BoundedMvN( - f"comp{i}", - mu=mvg.mus[i], - cov=mvg.covs[i], - shape=len(mvpars), - ).distribution - ) - - # create a Mixture model - setname = f"mixture{self.multivariate_normal_num_sets}" - mix = pymc.Mixture( - setname, - w=mvg.weights, - comp_dists=comp_dists, - shape=len(mvpars), - testval=testvals, - ) - - for i, p in enumerate(mvpars): - self.multivariate_normal_sets[p] = {} - self.multivariate_normal_sets[p]["prior"] = mix[i] - self.multivariate_normal_sets[p]["set"] = setname - self.multivariate_normal_sets[p]["index"] = i - - self.multivariate_normal_num_sets += 1 - - # return required parameter - return self.multivariate_normal_sets[key]["prior"] - - else: - raise ValueError(f"Prior for '{key}' is not a MultivariateGaussian") - - def run_sampler(self): - # set the step method - pymc, STEP_METHODS, floatX = self._import_external_sampler() - step_methods = {m.__name__.lower(): m.__name__ for m in STEP_METHODS} - if "step" in self._kwargs: - self.step_method = self._kwargs.pop("step") - - # 'step' could be a dictionary of methods for different parameters, - # so check for this - if self.step_method is None: - pass - elif isinstance(self.step_method, dict): - for key in self.step_method: - if key not in self._search_parameter_keys: - raise ValueError( - f"Setting a step method for an unknown parameter '{key}'" - ) - else: - # check if using a compound step (a list of step - # methods for a particular parameter) - if isinstance(self.step_method[key], list): - sms = self.step_method[key] - else: - sms = [self.step_method[key]] - for sm in sms: - if sm.lower() not in step_methods: - raise ValueError( - f"Using invalid step method '{self.step_method[key]}'" - ) - else: - # check if using a compound step (a list of step - # methods for a particular parameter) - if isinstance(self.step_method, list): - sms = self.step_method - else: - sms = [self.step_method] - - for i in range(len(sms)): - if sms[i].lower() not in step_methods: - raise ValueError(f"Using invalid step method '{sms[i]}'") - else: - self.step_method = None - - # initialise the PyMC model - self.pymc_model = pymc.Model() - - # set the prior - self.set_prior() - - # if a custom log_likelihood function requires a `sampler` argument - # then use that log_likelihood function, with the assumption that it - # takes in a Pymc Sampler, with a pymc_model attribute, and defines - # the likelihood within that context manager - likeargs = infer_args_from_method(self.likelihood.log_likelihood) - if "sampler" in likeargs: - self.likelihood.log_likelihood(sampler=self) - else: - # set the likelihood function from predefined functions - self.set_likelihood() - - # get the step method keyword arguments - step_kwargs = self.kwargs.pop("step_kwargs") - if step_kwargs is not None: - # remove all individual default step kwargs if passed together using - # step_kwargs keywords - for key in self.default_step_kwargs: - self.kwargs.pop(key) - else: - # remove any None default step keywords and place others in step_kwargs - step_kwargs = {} - for key in self.default_step_kwargs: - if self.kwargs[key] is None: - self.kwargs.pop(key) - else: - step_kwargs[key] = self.kwargs.pop(key) - - nuts_kwargs = self.kwargs.pop("nuts_kwargs") - if nuts_kwargs is not None: - # remove all individual default nuts kwargs if passed together using - # nuts_kwargs keywords - for key in self.default_nuts_kwargs: - self.kwargs.pop(key) - else: - # remove any None default nuts keywords and place others in nut_kwargs - nuts_kwargs = {} - for key in self.default_nuts_kwargs: - if self.kwargs[key] is None: - self.kwargs.pop(key) - else: - nuts_kwargs[key] = self.kwargs.pop(key) - methodslist = [] - - # set the step method - if isinstance(self.step_method, dict): - # create list of step methods (any not given will default to NUTS) - self.kwargs["step"] = [] - with self.pymc_model: - for key in self.step_method: - # check for a compound step list - if isinstance(self.step_method[key], list): - for sms in self.step_method[key]: - curmethod = sms.lower() - methodslist.append(curmethod) - nuts_kwargs = self._create_nuts_kwargs( - curmethod, - key, - nuts_kwargs, - pymc, - step_kwargs, - step_methods, - ) - else: - curmethod = self.step_method[key].lower() - methodslist.append(curmethod) - nuts_kwargs = self._create_nuts_kwargs( - curmethod, - key, - nuts_kwargs, - pymc, - step_kwargs, - step_methods, - ) - else: - with self.pymc_model: - # check for a compound step list - if isinstance(self.step_method, list): - compound = [] - for sms in self.step_method: - curmethod = sms.lower() - methodslist.append(curmethod) - args, nuts_kwargs = self._create_args_and_nuts_kwargs( - curmethod, nuts_kwargs, step_kwargs - ) - compound.append(pymc.__dict__[step_methods[curmethod]](**args)) - self.kwargs["step"] = compound - else: - self.kwargs["step"] = None - if self.step_method is not None: - curmethod = self.step_method.lower() - methodslist.append(curmethod) - args, nuts_kwargs = self._create_args_and_nuts_kwargs( - curmethod, nuts_kwargs, step_kwargs - ) - self.kwargs["step"] = pymc.__dict__[step_methods[curmethod]]( - **args - ) - - # check whether only NUTS step method has been assigned - if np.all([sm.lower() == "nuts" for sm in methodslist]): - # in this case we can let PyMC autoinitialise NUTS, so remove the step methods and re-add nuts_kwargs - self.kwargs["step"] = None - - if len(nuts_kwargs) > 0: - # add NUTS kwargs to standard kwargs - self.kwargs.update(nuts_kwargs) - - with self.pymc_model: - # perform the sampling and then convert to inference data - trace = pymc.sample(**self.kwargs, return_inferencedata=False) - ikwargs = dict( - model=self.pymc_model, - save_warmup=not self.kwargs["discard_tuned_samples"], - log_likelihood=True, - ) - trace = pymc.to_inference_data(trace, **ikwargs) - - posterior = trace.posterior.to_dataframe().reset_index() - self.result.samples = posterior[self.search_parameter_keys] - self.result.log_likelihood_evaluations = np.sum( - trace.log_likelihood.likelihood.values, axis=-1 - ).flatten() - self.result.sampler_output = np.nan - self.calculate_autocorrelation(self.result.samples) - self.result.log_evidence = np.nan - self.result.log_evidence_err = np.nan - self.calc_likelihood_count() - return self.result - - def _create_args_and_nuts_kwargs(self, curmethod, nuts_kwargs, step_kwargs): - if curmethod == "nuts": - args, nuts_kwargs = self._get_nuts_args(nuts_kwargs, step_kwargs) - else: - args = step_kwargs.get(curmethod, {}) - return args, nuts_kwargs - - def _create_nuts_kwargs( - self, curmethod, key, nuts_kwargs, pymc, step_kwargs, step_methods - ): - if curmethod == "nuts": - args, nuts_kwargs = self._get_nuts_args(nuts_kwargs, step_kwargs) - else: - if step_kwargs is not None: - args = step_kwargs.get(curmethod, {}) - else: - args = {} - self.kwargs["step"].append( - pymc.__dict__[step_methods[curmethod]](vars=[self.pymc_priors[key]], **args) - ) - return nuts_kwargs - - @staticmethod - def _get_nuts_args(nuts_kwargs, step_kwargs): - if nuts_kwargs is not None: - args = nuts_kwargs - elif step_kwargs is not None: - args = step_kwargs.pop("nuts", {}) - # add values into nuts_kwargs - nuts_kwargs = args - else: - args = {} - return args, nuts_kwargs - - def set_prior(self): - """ - Set the PyMC prior distributions. - """ - - self.setup_prior_mapping() - - self.pymc_priors = dict() - pymc, _, _ = self._import_external_sampler() - - # initialise a dictionary of multivariate Gaussian parameters - self.multivariate_normal_sets = {} - self.multivariate_normal_num_sets = 0 - - # set the parameter prior distributions (in the model context manager) - with self.pymc_model: - for key in self.priors: - # if the prior contains ln_prob method that takes a 'sampler' argument - # then try using that - lnprobargs = infer_args_from_method(self.priors[key].ln_prob) - if "sampler" in lnprobargs: - try: - self.pymc_priors[key] = self.priors[key].ln_prob(sampler=self) - except RuntimeError: - raise RuntimeError((f"Problem setting PyMC prior for '{key}'")) - else: - # use Prior distribution name - distname = self.priors[key].__class__.__name__ - - if distname in self.prior_map: - # check if we have a predefined PyMC distribution - if ( - "pymc" in self.prior_map[distname] - and "argmap" in self.prior_map[distname] - ): - # check the required arguments for the PyMC distribution - pymcdistname = self.prior_map[distname]["pymc"] - - if pymcdistname not in pymc.__dict__: - raise ValueError( - f"Prior '{pymcdistname}' is not a known PyMC distribution." - ) - - reqargs = infer_args_from_method( - pymc.__dict__[pymcdistname].dist - ) - - # set keyword arguments - priorkwargs = {} - for (targ, parg) in self.prior_map[distname][ - "argmap" - ].items(): - if hasattr(self.priors[key], targ): - if parg in reqargs: - if "argtransform" in self.prior_map[distname]: - if ( - targ - in self.prior_map[distname][ - "argtransform" - ] - ): - tfunc = self.prior_map[distname][ - "argtransform" - ][targ] - else: - - def tfunc(x): - return x - - else: - - def tfunc(x): - return x - - priorkwargs[parg] = tfunc( - getattr(self.priors[key], targ) - ) - else: - raise ValueError(f"Unknown argument {parg}") - else: - if parg in reqargs: - priorkwargs[parg] = None - self.pymc_priors[key] = pymc.__dict__[pymcdistname]( - key, **priorkwargs - ) - elif "internal" in self.prior_map[distname]: - self.pymc_priors[key] = self.prior_map[distname][ - "internal" - ](key) - else: - raise ValueError( - f"Prior '{distname}' is not a known distribution." - ) - else: - raise ValueError( - f"Prior '{distname}' is not a known distribution." - ) - - def set_likelihood(self): - """ - Convert any bilby likelihoods to PyMC distributions. - """ - - # create Op for the log likelihood if not using a predefined model - pymc, _, _ = self._import_external_sampler() - _, tt, _ = self._import_tensor() - - class LogLike(tt.Op): - - itypes = [tt.dvector] - otypes = [tt.dscalar] - - def __init__(self, parameters, loglike, priors): - self.parameters = parameters - self.likelihood = loglike - self.priors = priors - - # set the fixed parameters - for key in self.priors.keys(): - if isinstance(self.priors[key], float): - self.parameters[key] = self.priors[key] - - self.logpgrad = LogLikeGrad( - self.parameters, self.likelihood, self.priors - ) - - def perform(self, node, inputs, outputs): - (theta,) = inputs - for i, key in enumerate(self.parameters): - self.parameters[key] = theta[i] - - logl = _safe_likelihood_call( - self.likelihood.log_likelihood, self.parameters - ) - outputs[0][0] = np.array(logl) - - def grad(self, inputs, g): - (theta,) = inputs - return [g[0] * self.logpgrad(theta)] - - # create Op for calculating the gradient of the log likelihood - class LogLikeGrad(tt.Op): - - itypes = [tt.dvector] - otypes = [tt.dvector] - - def __init__(self, parameters, loglike, priors): - self.parameters = parameters - self.Nparams = len(parameters) - self.likelihood = loglike - self.priors = priors - - # set the fixed parameters - for key in self.priors.keys(): - if isinstance(self.priors[key], float): - self.parameters[key] = self.priors[key] - - def perform(self, node, inputs, outputs): - (theta,) = inputs - - # define version of likelihood function to pass to derivative function - def lnlike(values): - for i, key in enumerate(self.parameters): - self.parameters[key] = values[i] - return _safe_likelihood_call( - self.likelihood.log_likelihood, self.parameters - ) - - # calculate gradients - grads = derivatives( - theta, lnlike, abseps=1e-5, mineps=1e-12, reltol=1e-2 - ) - - outputs[0][0] = grads - - with self.pymc_model: - # check if it is a predefined likelhood function - if isinstance(self.likelihood, GaussianLikelihood): - # check required attributes exist - if ( - not hasattr(self.likelihood, "sigma") - or not hasattr(self.likelihood, "x") - or not hasattr(self.likelihood, "y") - ): - raise ValueError( - "Gaussian Likelihood does not have all the correct attributes!" - ) - - if "sigma" in self.pymc_priors: - # if sigma is suppled use that value - if self.likelihood.sigma is None: - self.likelihood.sigma = self.pymc_priors.pop("sigma") - else: - del self.pymc_priors["sigma"] - - for key in self.pymc_priors: - if key not in self.likelihood.function_keys: - raise ValueError(f"Prior key '{key}' is not a function key!") - - model = self.likelihood.func(self.likelihood.x, **self.pymc_priors) - - # set the distribution - pymc.Normal( - "likelihood", - mu=model, - sigma=self.likelihood.sigma, - observed=self.likelihood.y, - ) - elif isinstance(self.likelihood, PoissonLikelihood): - # check required attributes exist - if not hasattr(self.likelihood, "x") or not hasattr( - self.likelihood, "y" - ): - raise ValueError( - "Poisson Likelihood does not have all the correct attributes!" - ) - - for key in self.pymc_priors: - if key not in self.likelihood.function_keys: - raise ValueError(f"Prior key '{key}' is not a function key!") - - # get rate function - model = self.likelihood.func(self.likelihood.x, **self.pymc_priors) - - # set the distribution - pymc.Poisson("likelihood", mu=model, observed=self.likelihood.y) - elif isinstance(self.likelihood, ExponentialLikelihood): - # check required attributes exist - if not hasattr(self.likelihood, "x") or not hasattr( - self.likelihood, "y" - ): - raise ValueError( - "Exponential Likelihood does not have all the correct attributes!" - ) - - for key in self.pymc_priors: - if key not in self.likelihood.function_keys: - raise ValueError(f"Prior key '{key}' is not a function key!") - - # get mean function - model = self.likelihood.func(self.likelihood.x, **self.pymc_priors) - - # set the distribution - pymc.Exponential( - "likelihood", lam=1.0 / model, observed=self.likelihood.y - ) - elif isinstance(self.likelihood, StudentTLikelihood): - # check required attributes exist - if ( - not hasattr(self.likelihood, "x") - or not hasattr(self.likelihood, "y") - or not hasattr(self.likelihood, "nu") - or not hasattr(self.likelihood, "sigma") - ): - raise ValueError( - "StudentT Likelihood does not have all the correct attributes!" - ) - - if "nu" in self.pymc_priors: - # if nu is suppled use that value - if self.likelihood.nu is None: - self.likelihood.nu = self.pymc_priors.pop("nu") - else: - del self.pymc_priors["nu"] - - for key in self.pymc_priors: - if key not in self.likelihood.function_keys: - raise ValueError(f"Prior key '{key}' is not a function key!") - - model = self.likelihood.func(self.likelihood.x, **self.pymc_priors) - - # set the distribution - pymc.StudentT( - "likelihood", - nu=self.likelihood.nu, - mu=model, - sigma=self.likelihood.sigma, - observed=self.likelihood.y, - ) - elif isinstance( - self.likelihood, - (GravitationalWaveTransient, BasicGravitationalWaveTransient), - ): - # set theano Op - pass _search_parameter_keys, which only contains non-fixed variables - logl = LogLike( - self._search_parameter_keys, self.likelihood, self.pymc_priors - ) - - parameters = dict() - for key in self._search_parameter_keys: - try: - parameters[key] = self.pymc_priors[key] - except KeyError: - raise KeyError( - f"Unknown key '{key}' when setting GravitationalWaveTransient likelihood" - ) - - # convert to tensor variable - values = tt.as_tensor_variable(list(parameters.values())) - - pymc.DensityDist( - "likelihood", lambda v: logl(v), observed={"v": values} - ) - else: - raise ValueError("Unknown likelihood has been provided") diff --git a/bilby/core/sampler/pymultinest.py b/bilby/core/sampler/pymultinest.py deleted file mode 100644 index aee289cd3..000000000 --- a/bilby/core/sampler/pymultinest.py +++ /dev/null @@ -1,201 +0,0 @@ -import datetime -import importlib -import os -import time -import warnings - -import numpy as np - -from .base_sampler import NestedSampler, _TemporaryFileSamplerMixin, signal_wrapper - - -class Pymultinest(_TemporaryFileSamplerMixin, NestedSampler): - """ - bilby wrapper of pymultinest - (https://github.com/JohannesBuchner/PyMultiNest) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `pymultinest.run`, see documentation - for that class for further help. Under Other Parameters, we list commonly - used kwargs and the bilby defaults. - - Parameters - ========== - npoints: int - The number of live points, note this can also equivalently be given as - one of [nlive, nlives, n_live_points] - importance_nested_sampling: bool, (False) - If true, use importance nested sampling - sampling_efficiency: float or {'parameter', 'model'}, ('parameter') - Defines the sampling efficiency - verbose: Bool - If true, print information information about the convergence during - resume: bool - If true, resume run from checkpoint (if available) - - """ - - msg = ( - "The PyMultiNest sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `pymultinest-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/pymultinest-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "pymultinest" - abbreviation = "pm" - default_kwargs = dict( - importance_nested_sampling=False, - resume=True, - verbose=True, - sampling_efficiency="parameter", - n_live_points=500, - n_params=None, - n_clustering_params=None, - wrapped_params=None, - multimodal=True, - const_efficiency_mode=False, - evidence_tolerance=0.5, - n_iter_before_update=100, - null_log_evidence=-1e90, - max_modes=100, - mode_tolerance=-1e90, - outputfiles_basename=None, - seed=-1, - context=0, - write_output=True, - log_zero=-1e100, - max_iter=0, - init_MPI=False, - dump_callback=None, - ) - short_name = "pm" - hard_exit = True - sampling_seed_key = "seed" - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - exit_code=77, - skip_import_verification=False, - temporary_directory=True, - **kwargs - ): - super(Pymultinest, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - exit_code=exit_code, - temporary_directory=temporary_directory, - **kwargs - ) - self._apply_multinest_boundaries() - - def _translate_kwargs(self, kwargs): - kwargs = super()._translate_kwargs(kwargs) - if "n_live_points" not in kwargs: - for equiv in self.npoints_equiv_kwargs: - if equiv in kwargs: - kwargs["n_live_points"] = kwargs.pop(equiv) - - def _verify_kwargs_against_default_kwargs(self): - """Check the kwargs""" - - self.outputfiles_basename = self.kwargs.pop("outputfiles_basename", None) - - # for PyMultiNest >=2.9 the n_params kwarg cannot be None - if self.kwargs["n_params"] is None: - self.kwargs["n_params"] = self.ndim - if self.kwargs["dump_callback"] is None: - self.kwargs["dump_callback"] = self._dump_callback - NestedSampler._verify_kwargs_against_default_kwargs(self) - - def _dump_callback(self, *args, **kwargs): - if self.use_temporary_directory: - self._copy_temporary_directory_contents_to_proper_path() - self._calculate_and_save_sampling_time() - - def _apply_multinest_boundaries(self): - if self.kwargs["wrapped_params"] is None: - self.kwargs["wrapped_params"] = list() - for param in self.search_parameter_keys: - if self.priors[param].boundary == "periodic": - self.kwargs["wrapped_params"].append(1) - else: - self.kwargs["wrapped_params"].append(0) - - @signal_wrapper - def run_sampler(self): - import pymultinest - - self._verify_kwargs_against_default_kwargs() - - self._setup_run_directory() - self._check_and_load_sampling_time_file() - if not self.kwargs["resume"]: - self.total_sampling_time = 0.0 - - # Overwrite pymultinest's signal handling function - pm_run = importlib.import_module("pymultinest.run") - pm_run.interrupt_handler = self.write_current_state_and_exit - - self.start_time = time.time() - out = pymultinest.solve( - LogLikelihood=self.log_likelihood, - Prior=self.prior_transform, - n_dims=self.ndim, - **self.kwargs - ) - self._calculate_and_save_sampling_time() - - self._clean_up_run_directory() - - post_equal_weights = os.path.join( - self.outputfiles_basename, "post_equal_weights.dat" - ) - post_equal_weights_data = np.loadtxt(post_equal_weights) - self.result.log_likelihood_evaluations = post_equal_weights_data[:, -1] - self.result.sampler_output = out - self.result.samples = post_equal_weights_data[:, :-1] - self.result.log_evidence = out["logZ"] - self.result.log_evidence_err = out["logZerr"] - self.calc_likelihood_count() - self.result.outputfiles_basename = self.outputfiles_basename - self.result.sampling_time = datetime.timedelta(seconds=self.total_sampling_time) - self.result.nested_samples = self._nested_samples - return self.result - - @property - def _nested_samples(self): - """ - Extract nested samples from the pymultinest files. - This requires combining the "dead" points from `ev.dat` and the "live" - points from `phys_live.points`. - The prior volume associated with the current live points is the simple - estimate of `remaining_prior_volume / N`. - """ - import pandas as pd - - dir_ = self.kwargs["outputfiles_basename"] - dead_points = np.genfromtxt(dir_ + "/ev.dat") - live_points = np.genfromtxt(dir_ + "/phys_live.points") - - nlive = self.kwargs["n_live_points"] - final_log_prior_volume = -len(dead_points) / nlive - np.log(nlive) - live_points = np.insert(live_points, -1, final_log_prior_volume, axis=-1) - - nested_samples = pd.DataFrame( - np.vstack([dead_points, live_points]).copy(), - columns=self.search_parameter_keys - + ["log_likelihood", "log_prior_volume", "mode"], - ) - return nested_samples diff --git a/bilby/core/sampler/ultranest.py b/bilby/core/sampler/ultranest.py deleted file mode 100644 index fb17b21c1..000000000 --- a/bilby/core/sampler/ultranest.py +++ /dev/null @@ -1,304 +0,0 @@ -import datetime -import inspect -import time -import warnings - -import numpy as np -from pandas import DataFrame - -from ..utils import logger -from .base_sampler import NestedSampler, _TemporaryFileSamplerMixin, signal_wrapper - - -class Ultranest(_TemporaryFileSamplerMixin, NestedSampler): - """ - bilby wrapper of ultranest - (https://johannesbuchner.github.io/UltraNest/index.html) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `ultranest.ReactiveNestedSampler.run` - or `ultranest.NestedSampler.run`, see documentation for those classes for - further help. Under Other Parameters, we list commonly used kwargs and the - bilby defaults. If the number of live points is specified the - `ultranest.NestedSampler` will be used, otherwise the - `ultranest.ReactiveNestedSampler` will be used. - - Parameters - ========== - num_live_points: int - The number of live points, note this can also equivalently be given as - one of [nlive, nlives, n_live_points, num_live_points]. If not given - then the `ultranest.ReactiveNestedSampler` will be used, which does not - require the number of live points to be specified. - show_status: Bool - If true, print information information about the convergence during - resume: bool - If true, resume run from checkpoint (if available) - step_sampler: - An UltraNest step sampler object. This defaults to None, so the default - stepping behaviour is used. - """ - - msg = ( - "The Ultranest sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `ultranest-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/ultranest-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "ultranest" - abbreviation = "ultra" - default_kwargs = dict( - resume=True, - show_status=True, - num_live_points=None, - wrapped_params=None, - log_dir=None, - derived_param_names=[], - run_num=None, - vectorized=False, - num_test_samples=2, - draw_multiple=True, - num_bootstraps=30, - update_interval_iter=None, - update_interval_ncall=None, - log_interval=None, - dlogz=None, - max_iters=None, - update_interval_volume_fraction=0.2, - viz_callback=None, - dKL=0.5, - frac_remain=0.01, - Lepsilon=0.001, - min_ess=400, - max_ncalls=None, - max_num_improvement_loops=-1, - min_num_live_points=400, - cluster_num_live_points=40, - step_sampler=None, - ) - - short_name = "ultra" - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - exit_code=77, - skip_import_verification=False, - temporary_directory=True, - callback_interval=10, - **kwargs, - ): - super(Ultranest, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - exit_code=exit_code, - temporary_directory=temporary_directory, - **kwargs, - ) - self._apply_ultranest_boundaries() - - if self.use_temporary_directory: - # set callback interval, so copying of results does not thrash the - # disk (ultranest will call viz_callback quite a lot) - self.callback_interval = callback_interval - - def _translate_kwargs(self, kwargs): - kwargs = super()._translate_kwargs(kwargs) - if "num_live_points" not in kwargs: - for equiv in self.npoints_equiv_kwargs: - if equiv in kwargs: - kwargs["num_live_points"] = kwargs.pop(equiv) - if "verbose" in kwargs and "show_status" not in kwargs: - kwargs["show_status"] = kwargs.pop("verbose") - resume = kwargs.get("resume", False) - if resume is True: - kwargs["resume"] = "overwrite" - elif resume is False: - kwargs["resume"] = "overwrite" - - def _verify_kwargs_against_default_kwargs(self): - """Check the kwargs""" - - self.outputfiles_basename = self.kwargs.pop("log_dir", None) - if self.kwargs["viz_callback"] is None: - self.kwargs["viz_callback"] = self._viz_callback - - NestedSampler._verify_kwargs_against_default_kwargs(self) - - def _viz_callback(self, *args, **kwargs): - if self.use_temporary_directory: - if not (self._viz_callback_counter % self.callback_interval): - self._copy_temporary_directory_contents_to_proper_path() - self._calculate_and_save_sampling_time() - self._viz_callback_counter += 1 - - def _apply_ultranest_boundaries(self): - if ( - self.kwargs["wrapped_params"] is None - or len(self.kwargs.get("wrapped_params", [])) == 0 - ): - self.kwargs["wrapped_params"] = [] - for param, value in self.priors.items(): - if param in self.search_parameter_keys: - if value.boundary == "periodic": - self.kwargs["wrapped_params"].append(1) - else: - self.kwargs["wrapped_params"].append(0) - - def _copy_temporary_directory_contents_to_proper_path(self): - """ - Copy the temporary back to the proper path. - Do not delete the temporary directory. - """ - if inspect.stack()[1].function != "_viz_callback": - super(Ultranest, self)._copy_temporary_directory_contents_to_proper_path() - - @property - def sampler_function_kwargs(self): - if self.kwargs.get("num_live_points", None) is not None: - keys = [ - "update_interval_iter", - "update_interval_ncall", - "log_interval", - "dlogz", - "max_iters", - ] - else: - keys = [ - "update_interval_volume_fraction", - "update_interval_ncall", - "log_interval", - "show_status", - "viz_callback", - "dlogz", - "dKL", - "frac_remain", - "Lepsilon", - "min_ess", - "max_iters", - "max_ncalls", - "max_num_improvement_loops", - "min_num_live_points", - "cluster_num_live_points", - ] - - function_kwargs = {key: self.kwargs[key] for key in keys if key in self.kwargs} - - return function_kwargs - - @property - def sampler_init_kwargs(self): - keys = [ - "derived_param_names", - "resume", - "run_num", - "vectorized", - "log_dir", - "wrapped_params", - ] - if self.kwargs.get("num_live_points", None) is not None: - keys += ["num_live_points"] - else: - keys += ["num_test_samples", "draw_multiple", "num_bootstraps"] - - init_kwargs = {key: self.kwargs[key] for key in keys if key in self.kwargs} - - return init_kwargs - - @signal_wrapper - def run_sampler(self): - import ultranest - import ultranest.stepsampler - - if self.kwargs["dlogz"] is None: - # remove dlogz, so ultranest defaults (which are different for - # NestedSampler and ReactiveNestedSampler) are used - self.kwargs.pop("dlogz") - - self._verify_kwargs_against_default_kwargs() - - stepsampler = self.kwargs.pop("step_sampler", None) - - self._setup_run_directory() - self.kwargs["log_dir"] = self.kwargs["outputfiles_basename"] - self._check_and_load_sampling_time_file() - - # use reactive nested sampler when no live points are given - if self.kwargs.get("num_live_points", None) is not None: - integrator = ultranest.integrator.NestedSampler - else: - integrator = ultranest.integrator.ReactiveNestedSampler - - sampler = integrator( - self.search_parameter_keys, - self.log_likelihood, - transform=self.prior_transform, - **self.sampler_init_kwargs, - ) - - if stepsampler is not None: - if isinstance(stepsampler, ultranest.stepsampler.StepSampler): - sampler.stepsampler = stepsampler - else: - logger.warning( - "The supplied step sampler is not the correct type. " - "The default step sampling will be used instead." - ) - - if self.use_temporary_directory: - self._viz_callback_counter = 1 - - self.start_time = time.time() - results = sampler.run(**self.sampler_function_kwargs) - self._calculate_and_save_sampling_time() - - self._clean_up_run_directory() - - self._generate_result(results) - self.calc_likelihood_count() - - return self.result - - def _generate_result(self, out): - # extract results - from ..utils import random - - data = np.array(out["weighted_samples"]["points"]) - weights = np.array(out["weighted_samples"]["weights"]) - - scaledweights = weights / weights.max() - mask = random.rng.uniform(0, 1, len(scaledweights)) < scaledweights - - nested_samples = DataFrame(data, columns=self.search_parameter_keys) - nested_samples["weights"] = weights - nested_samples["log_likelihood"] = out["weighted_samples"]["logl"] - self.result.log_likelihood_evaluations = np.array( - out["weighted_samples"]["logl"] - )[mask] - self.result.sampler_output = out - self.result.samples = data[mask, :] - self.result.nested_samples = nested_samples - self.result.log_evidence = out["logz"] - self.result.log_evidence_err = out["logzerr"] - if self.kwargs["num_live_points"] is not None: - self.result.information_gain = ( - np.power(out["logzerr"], 2) * self.kwargs["num_live_points"] - ) - - self.result.outputfiles_basename = self.outputfiles_basename - self.result.sampling_time = datetime.timedelta(seconds=self.total_sampling_time) - - def log_likelihood(self, theta): - log_l = super(Ultranest, self).log_likelihood(theta=theta) - return np.nan_to_num(log_l) diff --git a/bilby/core/sampler/zeus.py b/bilby/core/sampler/zeus.py deleted file mode 100644 index 18be50fdc..000000000 --- a/bilby/core/sampler/zeus.py +++ /dev/null @@ -1,195 +0,0 @@ -import os -import shutil -import warnings -from shutil import copyfile - -import numpy as np - -from .base_sampler import LikePriorEvaluator, SamplerError, signal_wrapper -from .emcee import Emcee - -_evaluator = LikePriorEvaluator() - - -class Zeus(Emcee): - """bilby wrapper for Zeus (https://zeus-mcmc.readthedocs.io/) - - All positional and keyword arguments (i.e., the args and kwargs) passed to - `run_sampler` will be propagated to `zeus.EnsembleSampler`, see - documentation for that class for further help. Under Other Parameters, we - list commonly used kwargs and the bilby defaults. - - Parameters - ========== - nwalkers: int, (500) - The number of walkers - nsteps: int, (100) - The number of steps - nburn: int (None) - If given, the fixed number of steps to discard as burn-in. These will - be discarded from the total number of steps set by `nsteps` and - therefore the value must be greater than `nsteps`. Else, nburn is - estimated from the autocorrelation time - burn_in_fraction: float, (0.25) - The fraction of steps to discard as burn-in in the event that the - autocorrelation time cannot be calculated - burn_in_act: float - The number of autocorrelation times to discard as burn-in - - """ - - msg = ( - "The Zeus sampler interface in bilby is deprecated and will" - " be removed in Bilby version 3. Please use the `zeus-bilby`" - "sampler plugin instead: https://github.com/bilby-dev/zeus-bilby." - ) - warnings.warn(msg, FutureWarning) - - sampler_name = "zeus" - default_kwargs = dict( - nwalkers=500, - args=[], - kwargs={}, - pool=None, - log_prob0=None, - start=None, - blobs0=None, - iterations=100, - thin=1, - ) - - def __init__( - self, - likelihood, - priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=False, - pos0=None, - nburn=None, - burn_in_fraction=0.25, - resume=True, - burn_in_act=3, - **kwargs, - ): - super(Zeus, self).__init__( - likelihood=likelihood, - priors=priors, - outdir=outdir, - label=label, - use_ratio=use_ratio, - plot=plot, - skip_import_verification=skip_import_verification, - pos0=pos0, - nburn=nburn, - burn_in_fraction=burn_in_fraction, - resume=resume, - burn_in_act=burn_in_act, - **kwargs, - ) - - def _translate_kwargs(self, kwargs): - super(Zeus, self)._translate_kwargs(kwargs=kwargs) - - # check if using emcee-style arguments - if "start" not in kwargs: - if "rstate0" in kwargs: - kwargs["start"] = kwargs.pop("rstate0") - if "log_prob0" not in kwargs: - if "lnprob0" in kwargs: - kwargs["log_prob0"] = kwargs.pop("lnprob0") - - @property - def sampler_function_kwargs(self): - keys = ["log_prob0", "start", "blobs0", "iterations", "thin", "progress"] - - function_kwargs = {key: self.kwargs[key] for key in keys if key in self.kwargs} - - return function_kwargs - - @property - def sampler_init_kwargs(self): - init_kwargs = { - key: value - for key, value in self.kwargs.items() - if key not in self.sampler_function_kwargs - } - - init_kwargs["logprob_fn"] = _evaluator.call_emcee - init_kwargs["ndim"] = self.ndim - - return init_kwargs - - def write_current_state(self): - self._sampler.distribute = map - super(Zeus, self).write_current_state() - self._sampler.distribute = getattr(self._sampler.pool, "map", map) - - def _initialise_sampler(self): - from zeus import EnsembleSampler - - self._sampler = EnsembleSampler(**self.sampler_init_kwargs) - self._init_chain_file() - - def write_chains_to_file(self, sample): - chain_file = self.checkpoint_info.chain_file - temp_chain_file = chain_file + ".temp" - if os.path.isfile(chain_file): - copyfile(chain_file, temp_chain_file) - - points = np.hstack([sample[0], np.array(sample[2])]) - - with open(temp_chain_file, "a") as ff: - for ii, point in enumerate(points): - ff.write(self.checkpoint_info.chain_template.format(ii, *point)) - shutil.move(temp_chain_file, chain_file) - - def _set_pos0_for_resume(self): - self.pos0 = self.sampler.get_last_sample() - - @signal_wrapper - def run_sampler(self): - self._setup_pool() - sampler_function_kwargs = self.sampler_function_kwargs - iterations = sampler_function_kwargs.pop("iterations") - iterations -= self._previous_iterations - - sampler_function_kwargs["start"] = self.pos0 - - # main iteration loop - for sample in self.sampler.sample( - iterations=iterations, **sampler_function_kwargs - ): - self.write_chains_to_file(sample) - self._close_pool() - self.write_current_state() - - self.result.sampler_output = np.nan - self.calculate_autocorrelation(self.sampler.chain.reshape((-1, self.ndim))) - self.print_nburn_logging_info() - - self._generate_result() - - self.result.samples = self.sampler.get_chain(flat=True, discard=self.nburn) - self.result.walkers = self.sampler.chain - return self.result - - def _generate_result(self): - self.result.nburn = self.nburn - self.calc_likelihood_count() - if self.result.nburn > self.nsteps: - raise SamplerError( - "The run has finished, but the chain is not burned in: " - f"`nburn < nsteps` ({self.result.nburn} < {self.nsteps})." - " Try increasing the number of steps." - ) - blobs = np.array(self.sampler.get_blobs(flat=True, discard=self.nburn)).reshape( - (-1, 2) - ) - log_likelihoods, log_priors = blobs.T - self.result.log_likelihood_evaluations = log_likelihoods - self.result.log_prior_evaluations = log_priors - self.result.log_evidence = np.nan - self.result.log_evidence_err = np.nan From edb9954bc9d7e8cdb620b98cfeae5c523381a206 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:22:02 +0000 Subject: [PATCH 02/15] TST: remove tests for external samplers --- test/core/sampler/cpnest_test.py | 67 ---------------- test/core/sampler/kombine_test.py | 81 ------------------- test/core/sampler/nessai_test.py | 103 ------------------------ test/core/sampler/nestle_test.py | 73 ----------------- test/core/sampler/ptemcee_test.py | 105 ------------------------ test/core/sampler/ptmcmc_test.py | 5 -- test/core/sampler/pymc_test.py | 82 ------------------- test/core/sampler/pymultinest_test.py | 71 ----------------- test/core/sampler/ultranest_test.py | 110 -------------------------- test/core/sampler/zeus_test.py | 63 --------------- 10 files changed, 760 deletions(-) delete mode 100644 test/core/sampler/cpnest_test.py delete mode 100644 test/core/sampler/kombine_test.py delete mode 100644 test/core/sampler/nessai_test.py delete mode 100644 test/core/sampler/nestle_test.py delete mode 100644 test/core/sampler/ptemcee_test.py delete mode 100644 test/core/sampler/ptmcmc_test.py delete mode 100644 test/core/sampler/pymc_test.py delete mode 100644 test/core/sampler/pymultinest_test.py delete mode 100644 test/core/sampler/ultranest_test.py delete mode 100644 test/core/sampler/zeus_test.py diff --git a/test/core/sampler/cpnest_test.py b/test/core/sampler/cpnest_test.py deleted file mode 100644 index 9ad420cf3..000000000 --- a/test/core/sampler/cpnest_test.py +++ /dev/null @@ -1,67 +0,0 @@ -import unittest - -import bilby -import bilby.core.sampler.cpnest - - -class TestCPNest(unittest.TestCase): - def setUp(self): - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1)) - ) - self.sampler = bilby.core.sampler.cpnest.Cpnest( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - - def test_default_kwargs(self): - expected = dict( - verbose=3, - nthreads=1, - nlive=500, - maxmcmc=1000, - seed=None, - poolsize=100, - nhamiltonian=0, - resume=True, - output="outdir/cpnest_label/", - proposals=None, - n_periodic_checkpoint=8000, - ) - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - expected = dict( - verbose=3, - nthreads=1, - nlive=250, - maxmcmc=1000, - seed=None, - poolsize=100, - nhamiltonian=0, - resume=True, - output="outdir/cpnest_label/", - proposals=None, - n_periodic_checkpoint=8000, - ) - for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["nlive"] - new_kwargs[equiv] = 250 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/kombine_test.py b/test/core/sampler/kombine_test.py deleted file mode 100644 index ff2d0a9a8..000000000 --- a/test/core/sampler/kombine_test.py +++ /dev/null @@ -1,81 +0,0 @@ -import unittest - -import bilby -import bilby.core.sampler.kombine - - -class TestKombine(unittest.TestCase): - def setUp(self): - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1)) - ) - self.sampler = bilby.core.sampler.kombine.Kombine( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - - def test_default_kwargs(self): - expected = dict( - nwalkers=500, - args=[], - pool=None, - transd=False, - lnpost0=None, - blob0=None, - iterations=500, - storechain=True, - processes=1, - update_interval=None, - kde=None, - kde_size=None, - spaces=None, - freeze_transd=False, - test_steps=16, - critical_pval=0.05, - max_steps=None, - burnin_verbose=False, - ) - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - expected = dict( - nwalkers=400, - args=[], - pool=None, - transd=False, - lnpost0=None, - blob0=None, - iterations=500, - storechain=True, - processes=1, - update_interval=None, - kde=None, - kde_size=None, - spaces=None, - freeze_transd=False, - test_steps=16, - critical_pval=0.05, - max_steps=None, - burnin_verbose=False, - ) - for equiv in bilby.core.sampler.base_sampler.MCMCSampler.nwalkers_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["nwalkers"] - new_kwargs[equiv] = 400 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/nessai_test.py b/test/core/sampler/nessai_test.py deleted file mode 100644 index 6b1c9a870..000000000 --- a/test/core/sampler/nessai_test.py +++ /dev/null @@ -1,103 +0,0 @@ -import unittest -from unittest.mock import patch, mock_open - -import bilby -import bilby.core.sampler.nessai -import os - - -class TestNessai(unittest.TestCase): - maxDiff = None - - def setUp(self): - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1)) - ) - self.sampler = bilby.core.sampler.nessai.Nessai( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - sampling_seed=150914, - ) - self.expected = self.sampler.default_kwargs - self.expected["n_pool"] = 1 # Because npool=1 by default - self.expected['output'] = 'outdir/label_nessai/' - self.expected['seed'] = 150914 - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - del self.expected - - def test_translate_kwargs_nlive(self): - expected = self.expected.copy() - # nlive in the default kwargs is not a fixed but depends on the - # version of nessai, so get the value here and use it when setting - # the equivalent kwargs. - nlive = expected["nlive"] - for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["nlive"] - new_kwargs[equiv] = nlive - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs_npool(self): - expected = self.expected.copy() - expected["n_pool"] = 2 - for equiv in bilby.core.sampler.base_sampler.NestedSampler.npool_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["n_pool"] - new_kwargs[equiv] = 2 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_split_kwargs(self): - kwargs, run_kwargs = self.sampler.split_kwargs() - assert "save" not in run_kwargs - assert "plot" in run_kwargs - - def test_translate_kwargs_no_npool(self): - expected = self.expected.copy() - expected["n_pool"] = 3 - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["n_pool"] - self.sampler._npool = 3 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs_seed(self): - assert self.expected["seed"] == 150914 - - @patch("builtins.open", mock_open(read_data='{"nlive": 4000}')) - def test_update_from_config_file(self): - expected = self.expected.copy() - expected["nlive"] = 4000 - new_kwargs = self.expected.copy() - new_kwargs["config_file"] = "config_file.json" - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - -def test_get_expected_outputs(): - label = "par0" - outdir = os.path.join("some", "bilby_pipe", "dir") - filenames, directories = bilby.core.sampler.nessai.Nessai.get_expected_outputs( - outdir=outdir, label=label - ) - assert len(filenames) == 0 - assert len(directories) == 3 - base_dir = os.path.join(outdir, f"{label}_nessai", "") - assert base_dir in directories - assert os.path.join(base_dir, "proposal", "") in directories - assert os.path.join(base_dir, "diagnostics", "") in directories - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/nestle_test.py b/test/core/sampler/nestle_test.py deleted file mode 100644 index 36ce4c81c..000000000 --- a/test/core/sampler/nestle_test.py +++ /dev/null @@ -1,73 +0,0 @@ -import unittest - -import bilby -import bilby.core.sampler.nestle - - -class TestNestle(unittest.TestCase): - def setUp(self): - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1)) - ) - self.sampler = bilby.core.sampler.nestle.Nestle( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - verbose=False, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - - def test_default_kwargs(self): - expected = dict( - verbose=False, - method="multi", - npoints=500, - update_interval=None, - npdim=None, - maxiter=None, - maxcall=None, - dlogz=None, - decline_factor=None, - rstate=None, - callback=None, - steps=20, - enlarge=1.2, - ) - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - expected = dict( - verbose=False, - method="multi", - npoints=345, - update_interval=None, - npdim=None, - maxiter=None, - maxcall=None, - dlogz=None, - decline_factor=None, - rstate=None, - callback=None, - steps=20, - enlarge=1.2, - ) - self.sampler.kwargs["npoints"] = 123 - for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["npoints"] - new_kwargs[equiv] = 345 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/ptemcee_test.py b/test/core/sampler/ptemcee_test.py deleted file mode 100644 index 4708a12b0..000000000 --- a/test/core/sampler/ptemcee_test.py +++ /dev/null @@ -1,105 +0,0 @@ -import unittest - -from bilby.core.likelihood import GaussianLikelihood -from bilby.core.prior import Uniform, PriorDict -from bilby.core.sampler.ptemcee import Ptemcee -from bilby.core.sampler.base_sampler import MCMCSampler -import numpy as np -import os - - -class TestPTEmcee(unittest.TestCase): - def setUp(self): - self.likelihood = GaussianLikelihood( - x=np.linspace(0, 1, 2), - y=np.linspace(0, 1, 2), - func=lambda x, **kwargs: x, - sigma=1, - ) - self.priors = PriorDict(dict(a=Uniform(0, 1), b=Uniform(0, 1))) - self._args = (self.likelihood, self.priors) - self._kwargs = dict( - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - ) - self.sampler = Ptemcee(*self._args, **self._kwargs) - self.expected = dict( - ntemps=10, - nwalkers=100, - Tmax=None, - betas=None, - a=2.0, - adaptation_lag=10000, - adaptation_time=100, - random=None, - adapt=False, - swap_ratios=False, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - - def test_default_kwargs(self): - self.assertDictEqual(self.expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - for equiv in MCMCSampler.nwalkers_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["nwalkers"] - new_kwargs[equiv] = 100 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(self.expected, self.sampler.kwargs) - - def test_set_pos0_using_array(self): - """ - Verify that setting the initial points from an array matches the - default method. - """ - pos0 = self.sampler.get_pos0() - new_sampler = Ptemcee(*self._args, **self._kwargs, pos0=pos0) - self.assertTrue(np.array_equal(new_sampler.get_pos0(), pos0)) - - def test_set_pos0_using_dict(self): - """ - Verify that setting the initial points from a dictionary matches the - default method. - """ - old = np.array(self.sampler.get_pos0()) - pos0 = np.moveaxis(old, -1, 0) - pos0 = { - key: points for key, points in - zip(self.sampler.search_parameter_keys, pos0) - } - new_sampler = Ptemcee(*self._args, **self._kwargs, pos0=pos0) - new = new_sampler.get_pos0() - self.assertTrue(np.array_equal(new, old)) - - def test_set_pos0_from_minimize(self): - """ - Verify that the minimize method of setting the initial points - returns the same shape as the default. - """ - old = self.sampler.get_pos0().shape - new_sampler = Ptemcee(*self._args, **self._kwargs, pos0="minimize") - new = new_sampler.get_pos0().shape - self.assertEqual(old, new) - - -def test_get_expected_outputs(): - label = "par0" - outdir = os.path.join("some", "bilby_pipe", "dir") - filenames, directories = Ptemcee.get_expected_outputs( - outdir=outdir, label=label - ) - assert len(filenames) == 1 - assert len(directories) == 0 - assert os.path.join(outdir, f"{label}_checkpoint_resume.pickle") in filenames - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/ptmcmc_test.py b/test/core/sampler/ptmcmc_test.py deleted file mode 100644 index 9b4a926f8..000000000 --- a/test/core/sampler/ptmcmc_test.py +++ /dev/null @@ -1,5 +0,0 @@ -import unittest - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/pymc_test.py b/test/core/sampler/pymc_test.py deleted file mode 100644 index 9fc79c21c..000000000 --- a/test/core/sampler/pymc_test.py +++ /dev/null @@ -1,82 +0,0 @@ -import unittest - -import bilby -import bilby.core.sampler.pymc - - -class TestPyMC(unittest.TestCase): - def setUp(self): - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1)) - ) - self.sampler = bilby.core.sampler.pymc.Pymc( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - - def test_default_kwargs(self): - expected = dict( - draws=500, - step=None, - init="auto", - n_init=200000, - initvals=None, - trace=None, - chains=2, - cores=1, - tune=500, - progressbar=True, - model=None, - nuts_kwargs=None, - step_kwargs=None, - random_seed=None, - discard_tuned_samples=True, - compute_convergence_checks=True, - ) - expected.update(self.sampler.default_nuts_kwargs) - expected.update(self.sampler.default_step_kwargs) - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - expected = dict( - draws=500, - step=None, - init="auto", - n_init=200000, - initvals=None, - trace=None, - chains=2, - cores=1, - tune=500, - progressbar=True, - model=None, - nuts_kwargs=None, - step_kwargs=None, - random_seed=None, - discard_tuned_samples=True, - compute_convergence_checks=True, - ) - expected.update(self.sampler.default_nuts_kwargs) - expected.update(self.sampler.default_step_kwargs) - self.sampler.kwargs["draws"] = 123 - for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["draws"] - new_kwargs[equiv] = 500 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/pymultinest_test.py b/test/core/sampler/pymultinest_test.py deleted file mode 100644 index 35f6da7fa..000000000 --- a/test/core/sampler/pymultinest_test.py +++ /dev/null @@ -1,71 +0,0 @@ -import unittest - -import bilby -import bilby.core.sampler.pymultinest - - -class TestPymultinest(unittest.TestCase): - def setUp(self): - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1)) - ) - self.priors["a"] = bilby.core.prior.Prior(boundary="periodic") - self.priors["b"] = bilby.core.prior.Prior(boundary="reflective") - self.sampler = bilby.core.sampler.pymultinest.Pymultinest( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - - def test_default_kwargs(self): - expected = dict(importance_nested_sampling=False, resume=True, - verbose=True, sampling_efficiency='parameter', - n_live_points=500, n_params=2, - n_clustering_params=None, wrapped_params=None, - multimodal=True, const_efficiency_mode=False, - evidence_tolerance=0.5, - n_iter_before_update=100, null_log_evidence=-1e90, - max_modes=100, mode_tolerance=-1e90, seed=-1, - context=0, write_output=True, log_zero=-1e100, - max_iter=0, init_MPI=False, dump_callback='dumper') - self.sampler.kwargs['dump_callback'] = 'dumper' # Check like the dynesty print_func - self.assertListEqual([1, 0], self.sampler.kwargs['wrapped_params']) # Check this separately - self.sampler.kwargs['wrapped_params'] = None # The dict comparison can't handle lists - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - expected = dict(importance_nested_sampling=False, resume=True, - verbose=True, sampling_efficiency='parameter', - n_live_points=123, n_params=2, - n_clustering_params=None, wrapped_params=None, - multimodal=True, const_efficiency_mode=False, - evidence_tolerance=0.5, - n_iter_before_update=100, null_log_evidence=-1e90, - max_modes=100, mode_tolerance=-1e90, seed=-1, - context=0, write_output=True, log_zero=-1e100, - max_iter=0, init_MPI=False, dump_callback='dumper') - - for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["n_live_points"] - new_kwargs[ - "wrapped_params" - ] = None # The dict comparison can't handle lists - new_kwargs['dump_callback'] = 'dumper' # Check this like Dynesty print_func - new_kwargs[equiv] = 123 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/ultranest_test.py b/test/core/sampler/ultranest_test.py deleted file mode 100644 index 4776b8853..000000000 --- a/test/core/sampler/ultranest_test.py +++ /dev/null @@ -1,110 +0,0 @@ -import shutil -import unittest - -import bilby -import bilby.core.sampler.ultranest - - -class TestUltranest(unittest.TestCase): - - def setUp(self): - self.maxDiff = None - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), - b=bilby.core.prior.Uniform(0, 1))) - self.priors["a"] = bilby.core.prior.Prior(boundary="periodic") - self.priors["b"] = bilby.core.prior.Prior(boundary="reflective") - self.sampler = bilby.core.sampler.ultranest.Ultranest( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - shutil.rmtree("outdir") - - def test_default_kwargs(self): - expected = dict( - resume="overwrite", - show_status=True, - num_live_points=None, - wrapped_params=None, - derived_param_names=None, - run_num=None, - vectorized=False, - num_test_samples=2, - draw_multiple=True, - num_bootstraps=30, - update_interval_iter=None, - update_interval_ncall=None, - log_interval=None, - dlogz=None, - max_iters=None, - update_interval_volume_fraction=0.2, - viz_callback=None, - dKL=0.5, - frac_remain=0.01, - Lepsilon=0.001, - min_ess=400, - max_ncalls=None, - max_num_improvement_loops=-1, - min_num_live_points=400, - cluster_num_live_points=40, - step_sampler=None, - ) - self.assertListEqual([1, 0], self.sampler.kwargs["wrapped_params"]) # Check this separately - self.sampler.kwargs["wrapped_params"] = None # The dict comparison can't handle lists - self.sampler.kwargs["derived_param_names"] = None - self.sampler.kwargs["viz_callback"] = None - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - expected = dict( - resume="overwrite", - show_status=True, - num_live_points=123, - wrapped_params=None, - derived_param_names=None, - run_num=None, - vectorized=False, - num_test_samples=2, - draw_multiple=True, - num_bootstraps=30, - update_interval_iter=None, - update_interval_ncall=None, - log_interval=None, - dlogz=None, - max_iters=None, - update_interval_volume_fraction=0.2, - viz_callback=None, - dKL=0.5, - frac_remain=0.01, - Lepsilon=0.001, - min_ess=400, - max_ncalls=None, - max_num_improvement_loops=-1, - min_num_live_points=400, - cluster_num_live_points=40, - step_sampler=None, - ) - for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs['num_live_points'] - new_kwargs[equiv] = 123 - self.sampler.kwargs = new_kwargs - self.sampler.kwargs["wrapped_params"] = None - self.sampler.kwargs["derived_param_names"] = None - self.sampler.kwargs["viz_callback"] = None - self.assertDictEqual(expected, self.sampler.kwargs) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/core/sampler/zeus_test.py b/test/core/sampler/zeus_test.py deleted file mode 100644 index b1749115e..000000000 --- a/test/core/sampler/zeus_test.py +++ /dev/null @@ -1,63 +0,0 @@ -import unittest - -import bilby -import bilby.core.sampler.zeus - - -class TestZeus(unittest.TestCase): - def setUp(self): - self.likelihood = bilby.core.likelihood.Likelihood() - self.priors = bilby.core.prior.PriorDict( - dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1)) - ) - self.sampler = bilby.core.sampler.zeus.Zeus( - self.likelihood, - self.priors, - outdir="outdir", - label="label", - use_ratio=False, - plot=False, - skip_import_verification=True, - ) - - def tearDown(self): - del self.likelihood - del self.priors - del self.sampler - - def test_default_kwargs(self): - expected = dict( - nwalkers=500, - args=[], - kwargs={}, - pool=None, - log_prob0=None, - start=None, - blobs0=None, - iterations=100, - thin=1, - ) - self.assertDictEqual(expected, self.sampler.kwargs) - - def test_translate_kwargs(self): - expected = dict( - nwalkers=100, - args=[], - kwargs={}, - pool=None, - log_prob0=None, - start=None, - blobs0=None, - iterations=100, - thin=1, - ) - for equiv in bilby.core.sampler.base_sampler.MCMCSampler.nwalkers_equiv_kwargs: - new_kwargs = self.sampler.kwargs.copy() - del new_kwargs["nwalkers"] - new_kwargs[equiv] = 100 - self.sampler.kwargs = new_kwargs - self.assertDictEqual(expected, self.sampler.kwargs) - - -if __name__ == "__main__": - unittest.main() From b2327405b541fc600298c41c84373ef2014a01cc Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:22:15 +0000 Subject: [PATCH 03/15] TST: only run internal sampler tests --- test/integration/sampler_run_test.py | 29 ---------------------------- 1 file changed, 29 deletions(-) diff --git a/test/integration/sampler_run_test.py b/test/integration/sampler_run_test.py index f35cf07c6..875045c77 100644 --- a/test/integration/sampler_run_test.py +++ b/test/integration/sampler_run_test.py @@ -1,6 +1,5 @@ import multiprocessing import os -import sys import threading import time from signal import SIGINT @@ -18,7 +17,6 @@ _sampler_kwargs = dict( bilby_mcmc=dict(nsamples=200, printdt=1), - cpnest=dict(nlive=100), dynesty=dict(nlive=10, sample="acceptance-walk", nact=5, proposals=["diff"]), dynamic_dynesty=dict( nlive_init=10, @@ -29,25 +27,6 @@ sample="act-walk", ), emcee=dict(iterations=1000, nwalkers=10), - kombine=dict(iterations=200, nwalkers=10, autoburnin=False), - nessai=dict( - nlive=100, - poolsize=100, - max_iteration=500, - ), - nestle=dict(nlive=100), - ptemcee=dict( - nsamples=100, - nwalkers=50, - burn_in_act=1, - ntemps=1, - frac_threshold=0.5, - ), - PTMCMCSampler=dict(Niter=101, burn=100, covUpdate=100, isave=100), - pymc=dict(draws=50, tune=50, n_init=250), - pymultinest=dict(nlive=100), - ultranest=dict(nlive=100, temporary_directory=False), - zeus=dict(nwalkers=10, iterations=100) ) sampler_imports = dict( @@ -55,8 +34,6 @@ dynamic_dynesty="dynesty" ) -no_pool_test = ["pymultinest", "nestle", "ptmcmcsampler", "ultranest", "pymc"] - loaded_samplers = {k: v.load() for k, v in bilby.core.sampler.IMPLEMENTED_SAMPLERS.items()} @@ -119,8 +96,6 @@ def test_run_sampler_pool(self, sampler): def _run_sampler(self, sampler, pool_size, **extra_kwargs): pytest.importorskip(sampler_imports.get(sampler, sampler)) - if pool_size > 1 and sampler.lower() in no_pool_test: - pytest.skip(f"{sampler} cannot be parallelized") bilby.core.utils.check_directory_exists_and_if_not_mkdir("outdir") kwargs = _sampler_kwargs[sampler] res = bilby.run_sampler( @@ -147,10 +122,6 @@ def _run_with_signal_handling(self, sampler, pool_size=1): pytest.importorskip(sampler_imports.get(sampler, sampler)) if loaded_samplers[sampler.lower()].hard_exit: pytest.skip(f"{sampler} hard exits, can't test signal handling.") - if pool_size > 1 and sampler.lower() in no_pool_test: - pytest.skip(f"{sampler} cannot be parallelized") - if sys.version_info.minor == 8 and sampler.lower == "cpnest": - pytest.skip("Pool interrupting broken for cpnest with py3.8") pid = os.getpid() print(sampler) From 16af3c5c6ed957aac4ab2f30f68cb3a1ca43ddb9 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:22:31 +0000 Subject: [PATCH 04/15] TST: remove mention of external samplers --- test/core/result_test.py | 12 ++++++------ test/gw/plot_test.py | 2 +- test/test_samplers_import.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/core/result_test.py b/test/core/result_test.py index dc13a20e8..96cd9b3c8 100644 --- a/test/core/result_test.py +++ b/test/core/result_test.py @@ -77,7 +77,7 @@ def setUp(self): result = bilby.core.result.Result( label="label", outdir=self.outdir, - sampler="nestle", + sampler="emcee", search_parameter_keys=["x", "y"], fixed_parameter_keys=["c", "d"], priors=priors, @@ -182,7 +182,7 @@ def test_unset_priors(self): result = bilby.core.result.Result( label="label", outdir="outdir", - sampler="nestle", + sampler="emcee", search_parameter_keys=["x", "y"], fixed_parameter_keys=["c", "d"], priors=None, @@ -202,7 +202,7 @@ def test_unknown_priors_fail(self): bilby.core.result.Result( label="label", outdir="outdir", - sampler="nestle", + sampler="emcee", search_parameter_keys=["x", "y"], fixed_parameter_keys=["c", "d"], priors=["a", "b"], @@ -648,7 +648,7 @@ def setUp(self): result = bilby.core.result.Result( label=self.label + str(i), outdir=self.outdir, - sampler="cpnest", + sampler="emcee", search_parameter_keys=["x", "y"], fixed_parameter_keys=["c", "d"], priors=self.priors, @@ -780,7 +780,7 @@ def test_combine_inconsistent_sampling_data(self): result = bilby.core.result.Result( label=self.label, outdir=self.outdir, - sampler="cpnest", + sampler="emcee", search_parameter_keys=["x", "y"], fixed_parameter_keys=["c", "d"], priors=self.priors, @@ -939,7 +939,7 @@ def setUp(self): result = bilby.core.result.Result( label="label", outdir=self.outdir, - sampler="nestle", + sampler="emcee", search_parameter_keys=["x", "y"], fixed_parameter_keys=["c", "d"], priors=priors, diff --git a/test/gw/plot_test.py b/test/gw/plot_test.py index bd9414212..32313c9d4 100644 --- a/test/gw/plot_test.py +++ b/test/gw/plot_test.py @@ -37,7 +37,7 @@ def setUp(self): self.result = bilby.gw.result.CBCResult( label="label", outdir="outdir", - sampler="nestle", + sampler="emcee", search_parameter_keys=list(priors.keys()), fixed_parameter_keys=list(), priors=priors, diff --git a/test/test_samplers_import.py b/test/test_samplers_import.py index 1646c38be..4e3cdd190 100644 --- a/test/test_samplers_import.py +++ b/test/test_samplers_import.py @@ -11,7 +11,7 @@ def test_sampler_import(sampler_name): Do not test :code:`FakeSampler` since it requires an additional argument. """ - if sampler_name in ["dnest4", "fake_sampler", "pypolychord"]: + if sampler_name in ["fake_sampler"]: pytest.skip(f"Skipping import test for {sampler_name}") bilby.core.utils.logger.setLevel("ERROR") likelihood = bilby.core.likelihood.Likelihood() From 030b42b8c52f8d85f9aabb1d450c8bef0022379c Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:25:53 +0000 Subject: [PATCH 05/15] EXAMP: add explicit error if sampler is missing --- .../alternative_samplers/linear_regression_pymc.py | 6 ++++++ .../linear_regression_pymc_custom_likelihood.py | 9 ++++++++- examples/core_examples/logo/sample_logo.py | 5 +++++ .../injection_examples/custom_proposal_example.py | 5 +++++ .../gw_examples/injection_examples/eccentric_inspiral.py | 6 ++++++ examples/gw_examples/injection_examples/non_tensor.py | 5 +++++ examples/gw_examples/injection_examples/plot_skymap.py | 5 +++++ .../gw_examples/injection_examples/relative_binning.py | 5 +++++ 8 files changed, 45 insertions(+), 1 deletion(-) diff --git a/examples/core_examples/alternative_samplers/linear_regression_pymc.py b/examples/core_examples/alternative_samplers/linear_regression_pymc.py index cc28e1c56..d5f6e514c 100644 --- a/examples/core_examples/alternative_samplers/linear_regression_pymc.py +++ b/examples/core_examples/alternative_samplers/linear_regression_pymc.py @@ -11,6 +11,12 @@ from bilby.core.likelihood import GaussianLikelihood from bilby.core.utils import random +if "pymc" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "pymc is required to run this example. Install with `pip install pymc-bilby`" + ) + + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility random.seed(123) diff --git a/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py b/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py index cb381aec7..e70d2db11 100644 --- a/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py +++ b/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py @@ -6,13 +6,20 @@ likelihood function to show how it should be defined, although this would give equivalent results as using the pre-defined 'Gaussian Likelihood' + """ import bilby import matplotlib.pyplot as plt import numpy as np import pymc as pm -from bilby.core.sampler.pymc import Pymc + +try: + from pymc_bilby import Pymc +except ImportError: + raise ImportError( + "pymc_bilby is required to run this example. Install with `pip install pymc-bilby`" + ) from bilby.core.utils import random # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility diff --git a/examples/core_examples/logo/sample_logo.py b/examples/core_examples/logo/sample_logo.py index 61ef772df..ed817b7a9 100644 --- a/examples/core_examples/logo/sample_logo.py +++ b/examples/core_examples/logo/sample_logo.py @@ -4,6 +4,11 @@ import scipy.interpolate as si from skimage import io +if "nestle" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "nestle is required to run this example. Install with `pip install nestle-bilby`" + ) + class Likelihood(bilby.core.likelihood.Likelihood): def __init__(self, interp): diff --git a/examples/gw_examples/injection_examples/custom_proposal_example.py b/examples/gw_examples/injection_examples/custom_proposal_example.py index 07fdaf527..49bdcceed 100644 --- a/examples/gw_examples/injection_examples/custom_proposal_example.py +++ b/examples/gw_examples/injection_examples/custom_proposal_example.py @@ -15,6 +15,11 @@ from bilby.core.sampler import proposal from bilby.core.utils.random import seed +if "cpnest" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "cpnest is required to run this example. Install with `pip install cpnest-bilby`" + ) + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility seed(123) diff --git a/examples/gw_examples/injection_examples/eccentric_inspiral.py b/examples/gw_examples/injection_examples/eccentric_inspiral.py index fdb9dd043..12faa977c 100644 --- a/examples/gw_examples/injection_examples/eccentric_inspiral.py +++ b/examples/gw_examples/injection_examples/eccentric_inspiral.py @@ -12,6 +12,12 @@ import numpy as np from bilby.core.utils.random import seed +if "pymultinest" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "pymultinest is required to run this example. See pymultinest-bilby for installation instructions:" + "https://github.com/bilby-dev/pymultinest-bilby" + ) + # Sets bilby's random number generator seed to ensure reproducibility seed(123) diff --git a/examples/gw_examples/injection_examples/non_tensor.py b/examples/gw_examples/injection_examples/non_tensor.py index 9721919f3..8425a8ebb 100644 --- a/examples/gw_examples/injection_examples/non_tensor.py +++ b/examples/gw_examples/injection_examples/non_tensor.py @@ -10,6 +10,11 @@ import numpy as np from bilby.core.utils.random import seed +if "nestle" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "nestle is required to run this example. Install with `pip install nestle-bilby`" + ) + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility seed(123) diff --git a/examples/gw_examples/injection_examples/plot_skymap.py b/examples/gw_examples/injection_examples/plot_skymap.py index d98d0dc1e..2a9d3c516 100644 --- a/examples/gw_examples/injection_examples/plot_skymap.py +++ b/examples/gw_examples/injection_examples/plot_skymap.py @@ -6,6 +6,11 @@ import bilby from bilby.core.utils.random import seed +if "nestle" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "nestle is required to run this example. Install with `pip install nestle-bilby`" + ) + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility seed(123) diff --git a/examples/gw_examples/injection_examples/relative_binning.py b/examples/gw_examples/injection_examples/relative_binning.py index c64c62051..12f5c7ca1 100644 --- a/examples/gw_examples/injection_examples/relative_binning.py +++ b/examples/gw_examples/injection_examples/relative_binning.py @@ -14,6 +14,11 @@ from bilby.core.utils import random from tqdm.auto import tqdm +if "nestle" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "nestle is required to run this example. Install with `pip install nestle-bilby`" + ) + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility random.seed(123) From 3783cb7f5f44297e30da507d714a7740a63def76 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:26:04 +0000 Subject: [PATCH 06/15] TST: change nestle to emce --- test/gw/result_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/gw/result_test.py b/test/gw/result_test.py index d3b0977f2..27233445a 100644 --- a/test/gw/result_test.py +++ b/test/gw/result_test.py @@ -44,7 +44,7 @@ def setUp(self): self.result = bilby.gw.result.CBCResult( label="label", outdir=self.outdir, - sampler="nestle", + sampler="emcee", search_parameter_keys=list(priors.keys()), fixed_parameter_keys=list(), priors=priors, From 8cf029b4564b27729f7806eab60d074c47ed3e71 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:26:17 +0000 Subject: [PATCH 07/15] BLD: remove sampler requirements --- pyproject.toml | 1 - sampler_requirements.txt | 13 ------------- 2 files changed, 14 deletions(-) delete mode 100644 sampler_requirements.txt diff --git a/pyproject.toml b/pyproject.toml index 145d905d1..e8e651a63 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -134,7 +134,6 @@ dependencies = {file = ["requirements.txt"]} all = {file = [ "gw_requirements.txt", "mcmc_requirements.txt", - "sampler_requirements.txt", "optional_requirements.txt" ]} gw = {file = ["gw_requirements.txt"]} diff --git a/sampler_requirements.txt b/sampler_requirements.txt deleted file mode 100644 index c066c6a6b..000000000 --- a/sampler_requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -cpnest>=0.9.4 -dynesty -emcee -nestle -ptmcmcsampler!=2.1.0,!=0.0.0 -ptemcee -pymc>=4.0.0 -pymultinest -kombine -ultranest>=3.0.0 -nessai>=0.7.0 -schwimmbad -zeus-mcmc>=2.3.0 From 48a40a677e450d68363198c91442a3845eb68a8b Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:27:19 +0000 Subject: [PATCH 08/15] CI: remove samplers from containers --- containers/environment.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/containers/environment.yml b/containers/environment.yml index 969155b75..41f5eeea4 100644 --- a/containers/environment.yml +++ b/containers/environment.yml @@ -44,17 +44,7 @@ dependencies: - sphinx-design - dynesty - emcee - - nestle - - ptemcee - - pymultinest - - ultranest - - cpnest - - kombine - - zeus-mcmc - pytorch - - pymc>=5.9 - - nessai - - ptmcmcsampler - jaxlib>=0.4 - jax>=0.4 - numba>0.53.1 From fe8f85a811624b9ad1ba8de2cd2f374302f05650 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:33:02 +0000 Subject: [PATCH 09/15] REV: partially revert ee438118eec4e3ae8b9cfe2316ba618cc7aeb4fc Re-add proposal.py --- bilby/core/sampler/proposal.py | 358 +++++++++++++++++++++++++++++++++ 1 file changed, 358 insertions(+) create mode 100644 bilby/core/sampler/proposal.py diff --git a/bilby/core/sampler/proposal.py b/bilby/core/sampler/proposal.py new file mode 100644 index 000000000..d23d19b4c --- /dev/null +++ b/bilby/core/sampler/proposal.py @@ -0,0 +1,358 @@ +from inspect import isclass + +import numpy as np + +from ..prior import Uniform +from ..utils import random + + +class Sample(dict): + def __init__(self, dictionary=None): + if dictionary is None: + dictionary = dict() + super(Sample, self).__init__(dictionary) + + def __add__(self, other): + return Sample({key: self[key] + other[key] for key in self.keys()}) + + def __sub__(self, other): + return Sample({key: self[key] - other[key] for key in self.keys()}) + + def __mul__(self, other): + return Sample({key: self[key] * other for key in self.keys()}) + + @classmethod + def from_cpnest_live_point(cls, cpnest_live_point): + res = cls(dict()) + for i, key in enumerate(cpnest_live_point.names): + res[key] = cpnest_live_point.values[i] + return res + + @classmethod + def from_external_type(cls, external_sample, sampler_name): + if sampler_name == "cpnest": + return cls.from_cpnest_live_point(external_sample) + return external_sample + + +class JumpProposal(object): + def __init__(self, priors=None): + """A generic class for jump proposals + + Parameters + ========== + priors: bilby.core.prior.PriorDict + Dictionary of priors used in this sampling run + + Attributes + ========== + log_j: float + Log Jacobian of the proposal. Characterises whether or not detailed balance + is preserved. If not, log_j needs to be adjusted accordingly. + """ + self.priors = priors + self.log_j = 0.0 + + def __call__(self, sample, **kwargs): + """A generic wrapper for the jump proposal function + + Parameters + ========== + args: Arguments that are going to be passed into the proposal function + kwargs: Keyword arguments that are going to be passed into the proposal function + + Returns + ======= + dict: A dictionary with the new samples. Boundary conditions are applied. + + """ + return self._apply_boundaries(sample) + + def _move_reflecting_keys(self, sample): + keys = [ + key for key in sample.keys() if self.priors[key].boundary == "reflective" + ] + for key in keys: + if ( + sample[key] > self.priors[key].maximum + or sample[key] < self.priors[key].minimum + ): + r = self.priors[key].maximum - self.priors[key].minimum + delta = (sample[key] - self.priors[key].minimum) % (2 * r) + if delta > r: + sample[key] = ( + 2 * self.priors[key].maximum - self.priors[key].minimum - delta + ) + elif delta < r: + sample[key] = self.priors[key].minimum + delta + return sample + + def _move_periodic_keys(self, sample): + keys = [key for key in sample.keys() if self.priors[key].boundary == "periodic"] + for key in keys: + if ( + sample[key] > self.priors[key].maximum + or sample[key] < self.priors[key].minimum + ): + sample[key] = self.priors[key].minimum + ( + (sample[key] - self.priors[key].minimum) + % (self.priors[key].maximum - self.priors[key].minimum) + ) + return sample + + def _apply_boundaries(self, sample): + sample = self._move_periodic_keys(sample) + sample = self._move_reflecting_keys(sample) + return sample + + +class JumpProposalCycle(object): + def __init__(self, proposal_functions, weights, cycle_length=100): + """A generic wrapper class for proposal cycles + + Parameters + ========== + proposal_functions: list + A list of callable proposal functions/objects + weights: list + A list of integer weights for the respective proposal functions + cycle_length: int, optional + Length of the proposal cycle + """ + self.proposal_functions = proposal_functions + self.weights = weights + self.cycle_length = cycle_length + self._index = 0 + self._cycle = np.array([]) + self.update_cycle() + + def __call__(self, **kwargs): + proposal = self._cycle[self.index] + self._index = (self.index + 1) % self.cycle_length + return proposal(**kwargs) + + def __len__(self): + return len(self.proposal_functions) + + def update_cycle(self): + self._cycle = random.rng.choice( + self.proposal_functions, + size=self.cycle_length, + p=self.weights, + replace=True, + ) + + @property + def proposal_functions(self): + return self._proposal_functions + + @proposal_functions.setter + def proposal_functions(self, proposal_functions): + for i, proposal in enumerate(proposal_functions): + if isclass(proposal): + proposal_functions[i] = proposal() + self._proposal_functions = proposal_functions + + @property + def index(self): + return self._index + + @property + def weights(self): + """ + + Returns + ======= + Normalised proposal weights + + """ + return np.array(self._weights) / np.sum(np.array(self._weights)) + + @weights.setter + def weights(self, weights): + assert len(weights) == len(self.proposal_functions) + self._weights = weights + + @property + def unnormalised_weights(self): + return self._weights + + +class NormJump(JumpProposal): + def __init__(self, step_size, priors=None): + """ + A normal distributed step centered around the old sample + + Parameters + ========== + step_size: float + The scalable step size + priors: + See superclass + """ + super(NormJump, self).__init__(priors) + self.step_size = step_size + + def __call__(self, sample, **kwargs): + for key in sample.keys(): + sample[key] = random.rng.normal(sample[key], self.step_size) + return super(NormJump, self).__call__(sample) + + +class EnsembleWalk(JumpProposal): + def __init__( + self, + random_number_generator=random.rng.uniform, + n_points=3, + priors=None, + **random_number_generator_args + ): + """ + An ensemble walk + + Parameters + ========== + random_number_generator: func, optional + A random number generator. Default is random.random + n_points: int, optional + Number of points in the ensemble to average over. Default is 3. + priors: + See superclass + random_number_generator_args: + Additional keyword arguments for the random number generator + """ + super(EnsembleWalk, self).__init__(priors) + self.random_number_generator = random_number_generator + self.n_points = n_points + self.random_number_generator_args = random_number_generator_args + + def __call__(self, sample, **kwargs): + subset = random.rng.choice(kwargs["coordinates"], self.n_points, replace=False) + for i in range(len(subset)): + subset[i] = Sample.from_external_type( + subset[i], kwargs.get("sampler_name", None) + ) + center_of_mass = self.get_center_of_mass(subset) + for x in subset: + sample += (x - center_of_mass) * self.random_number_generator( + **self.random_number_generator_args + ) + return super(EnsembleWalk, self).__call__(sample) + + @staticmethod + def get_center_of_mass(subset): + return {key: np.mean([c[key] for c in subset]) for key in subset[0].keys()} + + +class EnsembleStretch(JumpProposal): + def __init__(self, scale=2.0, priors=None): + """ + Stretch move. Calculates the log Jacobian which can be used in cpnest to bias future moves. + + Parameters + ========== + scale: float, optional + Stretching scale. Default is 2.0. + """ + super(EnsembleStretch, self).__init__(priors) + self.scale = scale + + def __call__(self, sample, **kwargs): + second_sample = random.rng.choice(kwargs["coordinates"]) + second_sample = Sample.from_external_type( + second_sample, kwargs.get("sampler_name", None) + ) + step = random.rng.uniform(-1, 1) * np.log(self.scale) + sample = second_sample + (sample - second_sample) * np.exp(step) + self.log_j = len(sample) * step + return super(EnsembleStretch, self).__call__(sample) + + +class DifferentialEvolution(JumpProposal): + def __init__(self, sigma=1e-4, mu=1.0, priors=None): + """ + Differential evolution step. Takes two elements from the existing coordinates and differentially evolves the + old sample based on them using some Gaussian randomisation in the step. + + Parameters + ========== + sigma: float, optional + Random spread in the evolution step. Default is 1e-4 + mu: float, optional + Scale of the randomization. Default is 1.0 + """ + super(DifferentialEvolution, self).__init__(priors) + self.sigma = sigma + self.mu = mu + + def __call__(self, sample, **kwargs): + a, b = random.rng.choice(kwargs["coordinates"], 2, replace=False) + sample = sample + (b - a) * random.rng.normal(self.mu, self.sigma) + return super(DifferentialEvolution, self).__call__(sample) + + +class EnsembleEigenVector(JumpProposal): + def __init__(self, priors=None): + """ + Ensemble step based on the ensemble eigenvectors. + + Parameters + ========== + priors: + See superclass + """ + super(EnsembleEigenVector, self).__init__(priors) + self.eigen_values = None + self.eigen_vectors = None + self.covariance = None + + def update_eigenvectors(self, coordinates): + if coordinates is None: + return + elif len(coordinates[0]) == 1: + self._set_1_d_eigenvectors(coordinates) + else: + self._set_n_d_eigenvectors(coordinates) + + def _set_1_d_eigenvectors(self, coordinates): + n_samples = len(coordinates) + key = list(coordinates[0].keys())[0] + variance = np.var([coordinates[j][key] for j in range(n_samples)]) + self.eigen_values = np.atleast_1d(variance) + self.covariance = self.eigen_values + self.eigen_vectors = np.eye(1) + + def _set_n_d_eigenvectors(self, coordinates): + n_samples = len(coordinates) + dim = len(coordinates[0]) + cov_array = np.zeros((dim, n_samples)) + for i, key in enumerate(coordinates[0].keys()): + for j in range(n_samples): + cov_array[i, j] = coordinates[j][key] + self.covariance = np.cov(cov_array) + self.eigen_values, self.eigen_vectors = np.linalg.eigh(self.covariance) + + def __call__(self, sample, **kwargs): + self.update_eigenvectors(kwargs["coordinates"]) + i = random.rng.integers(len(sample)) + jump_size = np.sqrt(np.fabs(self.eigen_values[i])) * random.rng.normal(0, 1) + for j, key in enumerate(sample.keys()): + sample[key] += jump_size * self.eigen_vectors[j, i] + return super(EnsembleEigenVector, self).__call__(sample) + + +class DrawFlatPrior(JumpProposal): + """ + Draws a proposal from the flattened prior distribution. + """ + + def __call__(self, sample, **kwargs): + sample = _draw_from_flat_priors(sample, self.priors) + return super(DrawFlatPrior, self).__call__(sample) + + +def _draw_from_flat_priors(sample, priors): + for key in sample.keys(): + flat_prior = Uniform(priors[key].minimum, priors[key].maximum, priors[key].name) + sample[key] = flat_prior.sample() + return sample From 6a2bdb5729086fc1116e291543806b2bb1d47c76 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:33:53 +0000 Subject: [PATCH 10/15] BLD: remove sampler entry points --- pyproject.toml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e8e651a63..319e6e4d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,22 +33,10 @@ requires-python = ">=3.10" [project.entry-points."bilby.samplers"] "bilby.bilby_mcmc" = "bilby.bilby_mcmc.sampler:Bilby_MCMC" -"bilby.cpnest" = "bilby.core.sampler.cpnest:Cpnest" -"bilby.dnest4" = "bilby.core.sampler.dnest4:DNest4" "bilby.dynamic_dynesty" = "bilby.core.sampler.dynamic_dynesty:DynamicDynesty" "bilby.dynesty" = "bilby.core.sampler.dynesty:Dynesty" "bilby.emcee" = "bilby.core.sampler.emcee:Emcee" "bilby.fake_sampler" = "bilby.core.sampler.fake_sampler:FakeSampler" -"bilby.kombine" = "bilby.core.sampler.kombine:Kombine" -"bilby.nessai" = "bilby.core.sampler.nessai:Nessai" -"bilby.nestle" = "bilby.core.sampler.nestle:Nestle" -"bilby.ptemcee" = "bilby.core.sampler.ptemcee:Ptemcee" -"bilby.ptmcmcsampler" = "bilby.core.sampler.ptmcmc:PTMCMCSampler" -"bilby.pymc" = "bilby.core.sampler.pymc:Pymc" -"bilby.pymultinest" = "bilby.core.sampler.pymultinest:Pymultinest" -"bilby.pypolychord" = "bilby.core.sampler.polychord:PyPolyChord" -"bilby.ultranest" = "bilby.core.sampler.ultranest:Ultranest" -"bilby.zeus" = "bilby.core.sampler.zeus:Zeus" [project.scripts] bilby_plot = "cli_bilby.plot_multiple_posteriors:main" From eac5a42b672669468f30fa866af8e4b0730fa6fc Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:37:49 +0000 Subject: [PATCH 11/15] MAINT: remove mention of sampler_requirements.txt --- MANIFEST.in | 1 - test/import_test.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 331a31917..e402afd1b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,6 +4,5 @@ include requirements.txt include gw_requirements.txt include mcmc_requirements.txt include optional_requirements.txt -include sampler_requirements.txt include bilby/_version.py recursive-include test *.py *.prior diff --git a/test/import_test.py b/test/import_test.py index 111dbc322..5dd56cc56 100644 --- a/test/import_test.py +++ b/test/import_test.py @@ -14,7 +14,7 @@ "h5py", "dill", "tqdm", "tables", "deepdish", "corner", } -for filename in ["sampler_requirements.txt", "optional_requirements.txt"]: +for filename in ["mcmc_requirements.txt", "optional_requirements.txt"]: with open(filename, "r") as ff: packages = ff.readlines() for package in packages: From e1889679a5e353b1d91f969318f598efff226dae Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 16:38:08 +0000 Subject: [PATCH 12/15] EXAMP: add more import warnings --- examples/core_examples/hyper_parameter_example.py | 6 ++++++ examples/core_examples/occam_factor_example.py | 6 ++++++ .../injection_examples/binary_neutron_star_example.py | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/examples/core_examples/hyper_parameter_example.py b/examples/core_examples/hyper_parameter_example.py index f78430565..5b388bf1d 100644 --- a/examples/core_examples/hyper_parameter_example.py +++ b/examples/core_examples/hyper_parameter_example.py @@ -2,6 +2,7 @@ """ An example of how to use bilby to perform parameter estimation for hyper params """ +import bilby import matplotlib.pyplot as plt import numpy as np from bilby.core.likelihood import GaussianLikelihood @@ -11,6 +12,11 @@ from bilby.core.utils import check_directory_exists_and_if_not_mkdir, random from bilby.hyper.likelihood import HyperparameterLikelihood +if "nestle" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "nestle is required to run this example. Install with `pip install nestle-bilby`" + ) + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility random.seed(123) diff --git a/examples/core_examples/occam_factor_example.py b/examples/core_examples/occam_factor_example.py index 3997d132f..e00ffe6a6 100644 --- a/examples/core_examples/occam_factor_example.py +++ b/examples/core_examples/occam_factor_example.py @@ -35,6 +35,12 @@ import numpy as np from bilby.core.utils import random +if "nestle" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "nestle is required to run this example. Install with `pip install nestle-bilby`" + ) + + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility random.seed(123) diff --git a/examples/gw_examples/injection_examples/binary_neutron_star_example.py b/examples/gw_examples/injection_examples/binary_neutron_star_example.py index bc5a8ec40..45cd39037 100644 --- a/examples/gw_examples/injection_examples/binary_neutron_star_example.py +++ b/examples/gw_examples/injection_examples/binary_neutron_star_example.py @@ -12,6 +12,12 @@ import bilby from bilby.core.utils.random import seed +if "nestle" not in bilby.core.sampler.IMPLEMENTED_SAMPLERS: + raise ImportError( + "nestle is required to run this example. Install with `pip install nestle-bilby`" + ) + + # Sets seed of bilby's generator "rng" to "123" to ensure reproducibility seed(123) From f2b0de7eb8a66d94a60506f3d624660a988fe409 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 17:15:30 +0000 Subject: [PATCH 13/15] ENH: add `native_keys` to `ImplementedSamplers` --- bilby/core/sampler/__init__.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/bilby/core/sampler/__init__.py b/bilby/core/sampler/__init__.py index 086e69388..df923113f 100644 --- a/bilby/core/sampler/__init__.py +++ b/bilby/core/sampler/__init__.py @@ -61,6 +61,19 @@ def valid_keys(self): keys = set(self._samplers.keys()) return iter(keys.union({k.replace("bilby.", "") for k in keys})) + def native_keys(self): + """Iterator of native sampler names (without the `bilby.` prefix). + + This excludes any samplers that are only available through plugins. + """ + return iter( + { + k.replace("bilby.", "") + for k in self._samplers.keys() + if k.startswith("bilby.") + } + ) + def __getitem__(self, key): if key in self._samplers: return self._samplers[key] From cea6706eac6b066ff641d02ea0ccabb15bdaa8f3 Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 17:15:41 +0000 Subject: [PATCH 14/15] TST: only test native samplers --- test/test_samplers_import.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_samplers_import.py b/test/test_samplers_import.py index 4e3cdd190..5480d8a5e 100644 --- a/test/test_samplers_import.py +++ b/test/test_samplers_import.py @@ -3,7 +3,7 @@ @pytest.mark.parametrize( - "sampler_name", bilby.core.sampler.IMPLEMENTED_SAMPLERS.keys() + "sampler_name", bilby.core.sampler.IMPLEMENTED_SAMPLERS.native_keys() ) def test_sampler_import(sampler_name): """ From 7fa0a2bdd9386e3cc28315c191a01073efa37d5f Mon Sep 17 00:00:00 2001 From: "Michael J. Williams" Date: Wed, 18 Feb 2026 17:51:10 +0000 Subject: [PATCH 15/15] DOC: update sampler docs --- docs/samplers.txt | 147 +++++++++++++++++----------------------------- 1 file changed, 54 insertions(+), 93 deletions(-) diff --git a/docs/samplers.txt b/docs/samplers.txt index 16c8df1eb..48ed14b10 100644 --- a/docs/samplers.txt +++ b/docs/samplers.txt @@ -50,34 +50,50 @@ recommended for direct use by the user, rather it should be accessed via the :code:`run_sampler`. --------------- -Nested Samplers +Native Samplers --------------- -Native -====== +:code:`bilby` includes several samplers by default: -- Dynesty: :code:`bilby.core.sampler.dynesty.Dynesty` -- Nestle :code:`bilby.core.sampler.nestle.Nestle` -- CPNest :code:`bilby.core.sampler.cpnest.Cpnest` -- PyMultiNest :code:`bilby.core.sampler.pymultinest.Pymultinest` -- UltraNest :code:`bilby.core.sampler.ultranest.Ultranest` +- bilby-mcmc :code:`bilby.bilby_mcmc.sampler.Bilby_MCMC` +- dynesty: :code:`bilby.core.sampler.dynesty.Dynesty` +- emcee :code:`bilby.core.sampler.emcee.Emcee` -External -======== +----------------- +External Samplers +----------------- -- DNest4 :code:`dnest4-bilby` -- Nessai :code:`nessai-bilby` -- PyPolyChord :code:`pypolychord-bilby` +In addition to the samplers listed above, :code:`bilby` supports a wide range +of external samplers provided by various plugins. -------------- -MCMC samplers -------------- +If your favourite sampler is missing, please open a PR to add it to the list. -- bilby-mcmc :code:`bilby.bilby_mcmc.sampler.Bilby_MCMC` -- emcee :code:`bilby.core.sampler.emcee.Emcee` -- ptemcee :code:`bilby.core.sampler.ptemcee.Ptemcee` -- pymc :code:`bilby.core.sampler.pymc.Pymc` -- zeus :code:`bilby.core.sampler.zeus.Zeus` +MCMC Samplers +============= + +- kombine: https://github.com/bilby-dev/kombine-bilby +- numypyro: https://github.com/ColmTalbot/bilby-numpyro +- PTMCMCSampler: https://github.com/bilby-dev/ptmcmsampler-bilby +- ptemcee: https://github.com/bilby-dev/ptemcee-bilby +- pymc: https://github.com/bilby-dev/pymc-bilby +- zeus: https://github.com/bilby-dev/zeus-bilby + +Nested Samplers +=============== + +- cpnest: https://github.com/bilby-dev/cpnest-bilby +- dnest4: https://github.com/bilby-dev/dnest4-bilby +- nestle: https://github.com/bilby-dev/nestle-bilby +- nessai: https://github.com/bilby-dev/nessai-bilby +- pymultinest: https://github.com/bilby-dev/pymultinest-bilby +- pypolychord: https://github.com/bilby-dev/pypolychord-bilby +- ultranest: https://github.com/bilby-dev/ultranest-bilby + +Sequential Monte Carlo Samplers +=============================== + +- aspire: https://github.com/mj-will/aspire-bilby +- pocomc: https://github.com/mj-will/pocomc-bilby -------------------------- Listing available samplers @@ -95,89 +111,34 @@ the plugin version. See `sampler plugins`_ for more details. Installing samplers ------------------- -pip-installable samplers -======================== - -Most samplers can be installed using `pip `_ -(see exceptions below). E.g., to install the :code:`emcee` - -.. code:: console - - $ pip install emcee - -If you installed :code:`bilby` from source, then all the samplers can be -installed using - -.. code:: console +Native samplers are installed by default when installing :code:`bilby`. +External samplers require you to install the corresponding plugin. - $ pip install -r sampler_requirements.txt +Most plugins can be installed from `pip `_ or +:code:`conda`. For example: -where the file `sampler_requirements.txt -`_ can -be found in the at the top-level of `the repository -`_ (Note: if you installed from pip, you -can simply download that file and use the command above). -.. note:: - - Some samplers are being migrated to use sampler plugins and may require an - additional step, see `migrating to sampler plugins`_. - -Installing PyPolyChord -====================== - -If you want to use the `PyPolyChord` sampler, you first need the -PolyChord library to be installed to work properly. An image of PolyChord can be found on github. -Clone the following repository onto your system. Navigate to the folder you want to install PolyChord in and run: - -.. code-block:: console - - $ git clone https://github.com/PolyChord/PolyChordLite.git - -Then navigate into the PolyChord directory and install PolyChord/PyPolyChord with - -.. code-block:: console - - $ make pypolychord MPI= - $ python setup.py install --user +.. tab-set:: -Add a number after `MPI=` to compile with `MPI`. Leave it like it is if you don't wish to compile with MPI. + .. tabe-item:: Pip -Installing pymultinest -====================== + .. code-block + + $ pip install nestle-bilby -If you want to use the `pymultinest` sampler, you first need the -MultiNest library to be installed to work properly. The full instructions can -be found here: https://johannesbuchner.github.io/PyMultiNest/install.html. Here -is a shortened version: + .. tab-item:: Conda -First, install the dependencies (for Ubuntu/Linux): + .. code-block -.. code-block:: console + $ conda install conda-forge::nestle-bilby - $ sudo apt-get install python-{scipy,numpy,matplotlib,progressbar} ipython libblas{3,-dev} liblapack{3,-dev} libatlas{3-base,-dev} cmake build-essential git gfortran -For Mac, the advice in the instructions are "If you google for “MultiNest Mac OSX” or “PyMultiNest Mac OSX” you will find installation instructions". - -The following will place a directory `MultiNest` in your :code:`$HOME` directory, if you want -to place it somewhere, adjust the instructions as such. - -.. code-block:: console - - $ git clone https://github.com/JohannesBuchner/MultiNest $HOME - $ cd $HOME/MultiNest/build - $ cmake .. - $ make - -Finally, add the libraries to you path. Add this to the `.bashrc` file ( -replacing the path where appropriate) - -.. code-block:: console - - $ export LD_LIBRARY_PATH=$HOME/MultiNest/lib: - -(you'll need to re-source your `.bashrc` after this, i.e. run `bash`). +Once the plugin is installed, the sampler can be by specfiying, for example, :code:`sampler="nestle"`. +.. warning:: + Some external samplers are only available via :code:`conda` or directly from source. + See the installation instructions in the corresponding sampler plugin for more + details. ---------------------------- Adding new samplers to bilby