3535 BotorchTensorDimensionError ,
3636 InfeasibilityError ,
3737)
38- from botorch .utils .transforms import standardize
3938from botorch .exceptions .warnings import UserInputWarning
4039from botorch .sampling .qmc import NormalQMCEngine
4140
42- from botorch .utils .transforms import normalize , unnormalize
41+ from botorch .utils .transforms import normalize , standardize , unnormalize
4342from scipy .spatial import Delaunay , HalfspaceIntersection
4443from torch import LongTensor , Tensor
4544from torch .distributions import Normal
@@ -1000,10 +999,12 @@ def sparse_to_dense_constraints(
1000999def optimize_posterior_samples (
10011000 paths : GenericDeterministicModel ,
10021001 bounds : Tensor ,
1003- raw_samples : int = 1024 ,
1004- num_restarts : int = 20 ,
1002+ raw_samples : int = 2048 ,
1003+ num_restarts : int = 4 ,
10051004 sample_transform : Callable [[Tensor ], Tensor ] | None = None ,
10061005 return_transformed : bool = False ,
1006+ suggested_points : Tensor | None = None ,
1007+ options : dict | None = None ,
10071008) -> tuple [Tensor , Tensor ]:
10081009 r"""Cheaply maximizes posterior samples by random querying followed by
10091010 gradient-based optimization using SciPy's L-BFGS-B routine.
@@ -1012,19 +1013,27 @@ def optimize_posterior_samples(
10121013 paths: Random Fourier Feature-based sample paths from the GP
10131014 bounds: The bounds on the search space.
10141015 raw_samples: The number of samples with which to query the samples initially.
1016+ Raw samples are cheap to evaluate, so this should ideally be set much higher
1017+ than num_restarts.
10151018 num_restarts: The number of points selected for gradient-based optimization.
1019+ Should be set low relative to the number of raw
10161020 sample_transform: A callable transform of the sample outputs (e.g.
10171021 MCAcquisitionObjective or ScalarizedPosteriorTransform.evaluate) used to
10181022 negate the objective or otherwise transform the output.
10191023 return_transformed: A boolean indicating whether to return the transformed
10201024 or non-transformed samples.
1025+ suggested_points: Tensor of suggested input locations that are high-valued.
1026+ These are more densely evaluated during the sampling phase of optimization.
1027+ options: Options for generation of initial candidates, passed to
1028+ gen_batch_initial_conditions.
10211029
10221030 Returns:
10231031 A two-element tuple containing:
10241032 - X_opt: A `num_optima x [batch_size] x d`-dim tensor of optimal inputs x*.
10251033 - f_opt: A `num_optima x [batch_size] x m`-dim, optionally
10261034 `num_optima x [batch_size] x 1`-dim, tensor of optimal outputs f*.
10271035 """
1036+ options = {} if options is None else options
10281037
10291038 def path_func (x ) -> Tensor :
10301039 res = paths (x )
@@ -1033,21 +1042,35 @@ def path_func(x) -> Tensor:
10331042
10341043 return res .squeeze (- 1 )
10351044
1036- candidate_set = unnormalize (
1037- SobolEngine (dimension = bounds .shape [1 ], scramble = True ).draw (n = raw_samples ),
1038- bounds = bounds ,
1039- )
10401045 # queries all samples on all candidates - output shape
10411046 # raw_samples * num_optima * num_models
1047+ frac_random = 1 if suggested_points is None else options .get ("frac_random" , 0.9 )
1048+ candidate_set = draw_sobol_samples (
1049+ bounds = bounds , n = round (raw_samples * frac_random ), q = 1
1050+ ).squeeze (- 2 )
1051+ if frac_random < 1 :
1052+ perturbed_suggestions = sample_truncated_normal_perturbations (
1053+ X = suggested_points ,
1054+ n_discrete_points = round (raw_samples * (1 - frac_random )),
1055+ sigma = options .get ("sample_around_best_sigma" , 1e-2 ),
1056+ bounds = bounds ,
1057+ )
1058+ candidate_set = torch .cat ((candidate_set , perturbed_suggestions ))
1059+
10421060 candidate_queries = path_func (candidate_set )
1043- argtop_k = torch .topk (candidate_queries , num_restarts , dim = - 1 ).indices
1044- X_top_k = candidate_set [argtop_k , :]
1061+ idx = boltzmann_sample (
1062+ function_values = candidate_queries .unsqueeze (- 1 ),
1063+ num_samples = num_restarts ,
1064+ eta = options .get ("eta" , 5.0 ),
1065+ replacement = False ,
1066+ )
1067+ ics = candidate_set [idx , :]
10451068
10461069 # to avoid circular import, the import occurs here
10471070 from botorch .generation .gen import gen_candidates_scipy
10481071
10491072 X_top_k , f_top_k = gen_candidates_scipy (
1050- X_top_k ,
1073+ ics ,
10511074 path_func ,
10521075 lower_bounds = bounds [0 ],
10531076 upper_bounds = bounds [1 ],
@@ -1101,8 +1124,9 @@ def boltzmann_sample(
11011124 eta *= temp_decrease
11021125 weights = torch .exp (eta * norm_weights )
11031126
1127+ # squeeze in case of m = 1 (mono-output provided as batch_size x N x 1)
11041128 return batched_multinomial (
1105- weights = weights , num_samples = num_samples , replacement = replacement
1129+ weights = weights . squeeze ( - 1 ) , num_samples = num_samples , replacement = replacement
11061130 )
11071131
11081132
0 commit comments