diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index 9344dc9190..50b9c3c90d 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -1700,12 +1700,14 @@ def test_bernoulli_wrong_arguments(self): Bernoulli("x") def test_discrete_weibull(self): - check_logp( - DiscreteWeibull, - Nat, - {"q": Unit, "beta": NatSmall}, - discrete_weibull_logpmf, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "divide by zero encountered in log", RuntimeWarning) + check_logp( + DiscreteWeibull, + Nat, + {"q": Unit, "beta": NatSmall}, + discrete_weibull_logpmf, + ) check_selfconsistency_discrete_logcdf( DiscreteWeibull, Nat, @@ -1732,8 +1734,10 @@ def test_poisson(self): ) def test_diracdeltadist(self): - check_logp(DiracDelta, I, {"c": I}, lambda value, c: np.log(c == value)) - check_logcdf(DiracDelta, I, {"c": I}, lambda value, c: np.log(value >= c)) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "divide by zero encountered in log", RuntimeWarning) + check_logp(DiracDelta, I, {"c": I}, lambda value, c: np.log(c == value)) + check_logcdf(DiracDelta, I, {"c": I}, lambda value, c: np.log(value >= c)) def test_zeroinflatedpoisson(self): def logp_fn(value, psi, mu): @@ -2370,12 +2374,15 @@ def test_categorical_p_not_normalized(self): @pytest.mark.parametrize("n", [2, 3, 4]) def test_orderedlogistic(self, n): - check_logp( - OrderedLogistic, - Domain(range(n), dtype="int64", edges=(None, None)), - {"eta": R, "cutpoints": Vector(R, n - 1)}, - lambda value, eta, cutpoints: orderedlogistic_logpdf(value, eta, cutpoints), - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "invalid value encountered in log", RuntimeWarning) + warnings.filterwarnings("ignore", "divide by zero encountered in log", RuntimeWarning) + check_logp( + OrderedLogistic, + Domain(range(n), dtype="int64", edges=(None, None)), + {"eta": R, "cutpoints": Vector(R, n - 1)}, + lambda value, eta, cutpoints: orderedlogistic_logpdf(value, eta, cutpoints), + ) @pytest.mark.parametrize("n", [2, 3, 4]) def test_orderedprobit(self, n): @@ -2622,6 +2629,7 @@ def ref_logp(value, mu, sigma, steps): {"mu": R, "sigma": Rplus, "steps": Nat}, ref_logp, decimal=select_by_precision(float64=6, float32=1), + extra_args={"init_dist": Normal.dist(0, 100)}, ) @@ -2631,8 +2639,14 @@ class TestBound: def test_continuous(self): with Model() as model: dist = Normal.dist(mu=0, sigma=1) - UnboundedNormal = Bound("unbound", dist, transform=None) - InfBoundedNormal = Bound("infbound", dist, lower=-np.inf, upper=np.inf, transform=None) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in add", RuntimeWarning + ) + UnboundedNormal = Bound("unbound", dist, transform=None) + InfBoundedNormal = Bound( + "infbound", dist, lower=-np.inf, upper=np.inf, transform=None + ) LowerNormal = Bound("lower", dist, lower=0, transform=None) UpperNormal = Bound("upper", dist, upper=0, transform=None) BoundedNormal = Bound("bounded", dist, lower=1, upper=10, transform=None) @@ -2667,7 +2681,11 @@ def test_continuous(self): def test_discrete(self): with Model() as model: dist = Poisson.dist(mu=4) - UnboundedPoisson = Bound("unbound", dist) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in add", RuntimeWarning + ) + UnboundedPoisson = Bound("unbound", dist) LowerPoisson = Bound("lower", dist, lower=1) UpperPoisson = Bound("upper", dist, upper=10) BoundedPoisson = Bound("bounded", dist, lower=1, upper=10) @@ -2714,8 +2732,12 @@ def test_arguments_checks(self): msg = "Cannot transform discrete variable." with pm.Model() as m: x = pm.Poisson.dist(0.5) - with pytest.raises(ValueError, match=msg): - pm.Bound("bound", x, transform=pm.distributions.transforms.log) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in add", RuntimeWarning + ) + with pytest.raises(ValueError, match=msg): + pm.Bound("bound", x, transform=pm.distributions.transforms.log) msg = "Given dims do not exist in model coordinates." with pm.Model() as m: @@ -2784,8 +2806,12 @@ def test_bound_dist(self): def test_array_bound(self): with Model() as model: dist = Normal.dist() - LowerPoisson = Bound("lower", dist, lower=[1, None], transform=None) - UpperPoisson = Bound("upper", dist, upper=[np.inf, 10], transform=None) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in add", RuntimeWarning + ) + LowerPoisson = Bound("lower", dist, lower=[1, None], transform=None) + UpperPoisson = Bound("upper", dist, upper=[np.inf, 10], transform=None) BoundedPoisson = Bound("bounded", dist, lower=[1, 2], upper=[9, 10], transform=None) first, second = joint_logp(LowerPoisson, [0, 0], sum=False)[0].eval() @@ -3081,7 +3107,9 @@ def random(rng, size): with pm.Model(): pm.Normal("x") y = pm.DensityDist("y", logp=func, random=random) - pm.sample(draws=5, tune=1, mp_ctx="spawn") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(draws=5, tune=1, mp_ctx="spawn") import cloudpickle diff --git a/pymc/tests/test_distributions_timeseries.py b/pymc/tests/test_distributions_timeseries.py index 186c865451..4be518fa97 100644 --- a/pymc/tests/test_distributions_timeseries.py +++ b/pymc/tests/test_distributions_timeseries.py @@ -293,10 +293,25 @@ def test_batched_size(self, constant): beta_tp = np.random.randn(batch_size, ar_order + int(constant)) y_tp = np.random.randn(batch_size, steps) with Model() as t0: - y = AR("y", beta_tp, shape=(batch_size, steps), initval=y_tp, constant=constant) + y = AR( + "y", + beta_tp, + shape=(batch_size, steps), + initval=y_tp, + constant=constant, + init_dist=Normal.dist(0, 100, shape=(batch_size, steps)), + ) with Model() as t1: for i in range(batch_size): - AR(f"y_{i}", beta_tp[i], sigma=1.0, shape=steps, initval=y_tp[i], constant=constant) + AR( + f"y_{i}", + beta_tp[i], + sigma=1.0, + shape=steps, + initval=y_tp[i], + constant=constant, + init_dist=Normal.dist(0, 100, shape=steps), + ) assert y.owner.op.ar_order == ar_order @@ -402,7 +417,14 @@ def test_batched_init_dist(self): AR("y", beta_tp, sigma=0.01, init_dist=init_dist, steps=steps, initval=y_tp) with Model() as t1: for i in range(batch_size): - AR(f"y_{i}", beta_tp, sigma=0.01, shape=steps, initval=y_tp[i]) + AR( + f"y_{i}", + beta_tp, + sigma=0.01, + shape=steps, + initval=y_tp[i], + init_dist=Normal.dist(0, 100, shape=steps), + ) np.testing.assert_allclose( t0.compile_logp()(t0.initial_point()), diff --git a/pymc/tests/test_hmc.py b/pymc/tests/test_hmc.py index 96d8235def..32a9d4c557 100644 --- a/pymc/tests/test_hmc.py +++ b/pymc/tests/test_hmc.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import warnings import numpy as np import numpy.testing as npt @@ -59,9 +60,11 @@ def test_nuts_tuning(): with pymc.Model(): pymc.Normal("mu", mu=0, sigma=1) step = pymc.NUTS() - idata = pymc.sample( - 10, step=step, tune=5, discard_tuned_samples=False, progressbar=False, chains=1 - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pymc.sample( + 10, step=step, tune=5, discard_tuned_samples=False, progressbar=False, chains=1 + ) assert not step.tune ss_tuned = idata.warmup_sample_stats["step_size"][0, -1] diff --git a/pymc/tests/test_idata_conversion.py b/pymc/tests/test_idata_conversion.py index f27fa49116..0d42ac0fa2 100644 --- a/pymc/tests/test_idata_conversion.py +++ b/pymc/tests/test_idata_conversion.py @@ -1,4 +1,5 @@ # pylint: disable=no-member, invalid-name, redefined-outer-name, protected-access, too-many-public-methods +import warnings from typing import Dict, Tuple @@ -18,6 +19,7 @@ predictions_to_inference_data, to_inference_data, ) +from pymc.exceptions import ImputationWarning @pytest.fixture(scope="module") @@ -204,9 +206,13 @@ def test_posterior_predictive_keep_size(self, data, chains, draws, eight_schools def test_posterior_predictive_warning(self, data, eight_schools_params, caplog): with data.model: - posterior_predictive = pm.sample_posterior_predictive( - data.obj, 370, return_inferencedata=False, keep_size=False - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*smaller than nchains times ndraws.*", UserWarning + ) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, 370, return_inferencedata=False, keep_size=False + ) with pytest.warns(UserWarning, match="shape of variables"): inference_data = to_inference_data( trace=data.obj, @@ -222,7 +228,9 @@ def test_posterior_predictive_thinned(self, data): with data.model: draws = 20 thin_by = 4 - idata = pm.sample(tune=5, draws=draws, chains=2, return_inferencedata=True) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample(tune=5, draws=draws, chains=2, return_inferencedata=True) thinned_idata = idata.sel(draw=slice(None, None, thin_by)) idata.extend(pm.sample_posterior_predictive(thinned_idata)) test_dict = { @@ -275,9 +283,13 @@ def test_autodetect_coords_from_model(self, use_context): step=pm.Metropolis(), ) if use_context: - idata = to_inference_data(trace=trace) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + idata = to_inference_data(trace=trace) if not use_context: - idata = to_inference_data(trace=trace, model=model) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + idata = to_inference_data(trace=trace, model=model) assert "city" in list(idata.posterior.dims) assert "city" in list(idata.observed_data.dims) @@ -320,7 +332,8 @@ def test_missing_data_model(self): model = pm.Model() with model: x = pm.Normal("x", 1, 1) - y = pm.Normal("y", x, 1, observed=data) + with pytest.warns(ImputationWarning): + y = pm.Normal("y", x, 1, observed=data) inference_data = pm.sample(100, chains=2, return_inferencedata=True) # make sure that data is really missing @@ -349,7 +362,8 @@ def test_mv_missing_data_model(self): # pylint: disable=unpacking-non-sequence chol, *_ = pm.LKJCholeskyCov("chol_cov", n=2, eta=1, sd_dist=sd_dist, compute_corr=True) # pylint: enable=unpacking-non-sequence - y = pm.MvNormal("y", mu=mu, chol=chol, observed=data) + with pytest.warns(ImputationWarning): + y = pm.MvNormal("y", mu=mu, chol=chol, observed=data) inference_data = pm.sample(100, chains=2, return_inferencedata=True) # make sure that data is really missing @@ -457,7 +471,11 @@ def test_predictions_constant_data(self): # this should be four chains of 100 samples # assert predictive_trace["obs"].shape == (400, 2) # but the shape seems to vary between pymc versions - inference_data = predictions_to_inference_data(predictive_trace, posterior_trace=trace) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + inference_data = predictions_to_inference_data( + predictive_trace, posterior_trace=trace + ) test_dict = {"posterior": ["beta"], "~observed_data": ""} fails = check_multiple_attrs(test_dict, inference_data) assert not fails, "Posterior data not copied over as expected." @@ -542,7 +560,9 @@ def test_multivariate_observations(self): p = pm.Beta("p", 1, 1, size=(3,)) p = p / p.sum() pm.Multinomial("y", 20, p, dims=("experiment", "direction"), observed=data) - idata = pm.sample(draws=50, chains=2, tune=100, return_inferencedata=True) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample(draws=50, chains=2, tune=100, return_inferencedata=True) test_dict = { "posterior": ["p"], "sample_stats": ["lp"], @@ -626,16 +646,19 @@ def test_save_warmup(self, save_warmup, chains, tune, draws): with pm.Model(): pm.Uniform("u1") pm.Normal("n1") - idata = pm.sample( - tune=tune, - draws=draws, - chains=chains, - cores=1, - step=pm.Metropolis(), - discard_tuned_samples=False, - return_inferencedata=True, - idata_kwargs={"save_warmup": save_warmup}, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + idata = pm.sample( + tune=tune, + draws=draws, + chains=chains, + cores=1, + step=pm.Metropolis(), + discard_tuned_samples=False, + return_inferencedata=True, + idata_kwargs={"save_warmup": save_warmup}, + ) warmup_prefix = "" if save_warmup and (tune > 0) else "~" post_prefix = "" if draws > 0 else "~" test_dict = { @@ -659,15 +682,17 @@ def test_save_warmup_issue_1208_after_3_9(self): with pm.Model(): pm.Uniform("u1") pm.Normal("n1") - trace = pm.sample( - tune=100, - draws=200, - chains=2, - cores=1, - step=pm.Metropolis(), - discard_tuned_samples=False, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Tuning samples will be included.*", UserWarning) + trace = pm.sample( + tune=100, + draws=200, + chains=2, + cores=1, + step=pm.Metropolis(), + discard_tuned_samples=False, + return_inferencedata=False, + ) assert isinstance(trace, pm.backends.base.MultiTrace) assert len(trace) == 300 diff --git a/pymc/tests/test_math.py b/pymc/tests/test_math.py index d751883603..119cd20ee4 100644 --- a/pymc/tests/test_math.py +++ b/pymc/tests/test_math.py @@ -148,7 +148,10 @@ def test_log1mexp(): ) actual = at.log1mexp(-vals).eval() npt.assert_allclose(actual, expected) - actual_ = log1mexp_numpy(-vals, negative_input=True) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "divide by zero encountered in log", RuntimeWarning) + warnings.filterwarnings("ignore", "invalid value encountered in log", RuntimeWarning) + actual_ = log1mexp_numpy(-vals, negative_input=True) npt.assert_allclose(actual_, expected) # Check that input was not changed in place npt.assert_allclose(vals, vals_) @@ -193,7 +196,9 @@ def test_log1mexp_deprecation_warnings(): def test_logdiffexp(): a = np.log([1, 2, 3, 4]) - b = np.log([0, 1, 2, 3]) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "divide by zero encountered in log", RuntimeWarning) + b = np.log([0, 1, 2, 3]) assert np.allclose(logdiffexp_numpy(a, b), 0) assert np.allclose(logdiffexp(a, b).eval(), 0) diff --git a/pymc/tests/test_missing.py b/pymc/tests/test_missing.py index 2a7f92be78..59251ba32e 100644 --- a/pymc/tests/test_missing.py +++ b/pymc/tests/test_missing.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings import aesara import numpy as np @@ -87,7 +88,9 @@ def test_missing_dual_observations(): prior_trace = sample_prior_predictive(return_inferencedata=False) assert {"beta1", "beta2", "theta", "o1", "o2"} <= set(prior_trace.keys()) # TODO: Assert something - trace = sample(chains=1, draws=50) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = sample(chains=1, draws=50) def test_interval_missing_observations(): @@ -154,7 +157,8 @@ def test_double_counting(): assert logp_val == -4.0 with Model(check_bounds=False) as m2: - x = Gamma("x", 1, 1, observed=[1, 1, 1, np.nan]) + with pytest.warns(ImputationWarning): + x = Gamma("x", 1, 1, observed=[1, 1, 1, np.nan]) logp_val = m2.compile_logp()({"x_missing_log__": np.array([0])}) assert logp_val == -4.0 @@ -167,8 +171,9 @@ def test_missing_logp(): m_logp = m.compile_logp()({}) with Model() as m_missing: - theta1 = Normal("theta1", 0, 5, observed=np.array([0, 1, np.nan, 3, np.nan])) - theta2 = Normal("theta2", mu=theta1, observed=np.array([np.nan, np.nan, 2, np.nan, 4])) + with pytest.warns(ImputationWarning): + theta1 = Normal("theta1", 0, 5, observed=np.array([0, 1, np.nan, 3, np.nan])) + theta2 = Normal("theta2", mu=theta1, observed=np.array([np.nan, np.nan, 2, np.nan, 4])) m_missing_logp = m_missing.compile_logp()( {"theta1_missing": [2, 4], "theta2_missing": [0, 1, 3]} ) @@ -184,9 +189,10 @@ def test_missing_multivariate(): NotImplementedError, match="Automatic inputation is only supported for univariate RandomVariables", ): - x = Dirichlet( - "x", a=[1, 2, 3], observed=np.array([[0.3, 0.3, 0.4], [np.nan, np.nan, np.nan]]) - ) + with pytest.warns(ImputationWarning): + x = Dirichlet( + "x", a=[1, 2, 3], observed=np.array([[0.3, 0.3, 0.4], [np.nan, np.nan, np.nan]]) + ) # TODO: Test can be used when local_subtensor_rv_lift supports multivariate distributions # from pymc.distributions.transforms import simplex @@ -203,12 +209,13 @@ def test_missing_multivariate(): def test_missing_vector_parameter(): with Model() as m: - x = Normal( - "x", - np.array([-10, 10]), - 0.1, - observed=np.array([[np.nan, 10], [-10, np.nan], [np.nan, np.nan]]), - ) + with pytest.warns(ImputationWarning): + x = Normal( + "x", + np.array([-10, 10]), + 0.1, + observed=np.array([[np.nan, 10], [-10, np.nan], [np.nan, np.nan]]), + ) x_draws = x.eval() assert x_draws.shape == (3, 2) assert np.all(x_draws[:, 0] < 0) @@ -228,7 +235,8 @@ def test_missing_symmetric(): buling the logp graph """ with Model() as m: - x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan])) + with pytest.warns(ImputationWarning): + x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan])) x_obs_rv = m["x_observed"] x_obs_vv = m.rvs_to_values[x_obs_rv] diff --git a/pymc/tests/test_mixture.py b/pymc/tests/test_mixture.py index 1a2ba28627..4cdf6ed1db 100644 --- a/pymc/tests/test_mixture.py +++ b/pymc/tests/test_mixture.py @@ -442,14 +442,16 @@ def test_single_poisson_sampling(self): mu = Gamma("mu", 1.0, 1.0, shape=pois_w.size) Mixture("x_obs", w, Poisson.dist(mu), observed=pois_x) step = Metropolis() - trace = sample( - 5000, - step, - random_seed=self.random_seed, - progressbar=False, - chains=1, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + trace = sample( + 5000, + step, + random_seed=self.random_seed, + progressbar=False, + chains=1, + return_inferencedata=False, + ) assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(pois_w), rtol=0.1, atol=0.1) assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(pois_mu), rtol=0.1, atol=0.1) @@ -463,14 +465,16 @@ def test_list_poissons_sampling(self): w = Dirichlet("w", floatX(np.ones_like(pois_w)), shape=pois_w.shape) mu = Gamma("mu", 1.0, 1.0, shape=pois_w.size) Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=pois_x) - trace = sample( - 5000, - chains=1, - step=Metropolis(), - random_seed=self.random_seed, - progressbar=False, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + trace = sample( + 5000, + chains=1, + step=Metropolis(), + random_seed=self.random_seed, + progressbar=False, + return_inferencedata=False, + ) assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(pois_w), rtol=0.1, atol=0.1) assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(pois_mu), rtol=0.1, atol=0.1) @@ -491,14 +495,16 @@ def test_list_normals_sampling(self): [Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])], observed=norm_x, ) - trace = sample( - 5000, - chains=1, - step=Metropolis(), - random_seed=self.random_seed, - progressbar=False, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + trace = sample( + 5000, + chains=1, + step=Metropolis(), + random_seed=self.random_seed, + progressbar=False, + return_inferencedata=False, + ) assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(norm_w), rtol=0.1, atol=0.1) assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(norm_mu), rtol=0.1, atol=0.1) @@ -747,14 +753,16 @@ def test_normal_mixture_sampling(self): tau = Gamma("tau", 1.0, 1.0, shape=norm_w.size) NormalMixture("x_obs", w, mu, tau=tau, observed=norm_x) step = Metropolis() - trace = sample( - 5000, - step, - random_seed=self.random_seed, - progressbar=False, - chains=1, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws.*", UserWarning) + trace = sample( + 5000, + step, + random_seed=self.random_seed, + progressbar=False, + chains=1, + return_inferencedata=False, + ) assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(norm_w), rtol=0.1, atol=0.1) assert_allclose(np.sort(trace["mu"].mean(axis=0)), np.sort(norm_mu), rtol=0.1, atol=0.1) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 63e6e8e5c1..07ca8a1f9b 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -38,7 +38,7 @@ from pymc import Deterministic, Potential from pymc.blocking import DictToArrayBijection, RaveledVars from pymc.distributions import Normal, transforms -from pymc.exceptions import ShapeError, ShapeWarning +from pymc.exceptions import ImputationWarning, ShapeError, ShapeWarning from pymc.model import Point, ValueGradFunction from pymc.tests.helpers import SeededTest @@ -341,7 +341,8 @@ def test_missing_data(self): X = np.ma.masked_values(X, value=-1) with pm.Model() as m: x1 = pm.Uniform("x1", 0.0, 1.0) - x2 = pm.Bernoulli("x2", x1, observed=X) + with pytest.warns(ImputationWarning): + x2 = pm.Bernoulli("x2", x1, observed=X) gf = m.logp_dlogp_function() gf._extra_are_set = True @@ -522,7 +523,8 @@ def test_make_obs_var(): del fake_model.named_vars[fake_distribution.name] # Here the RandomVariable is split into observed/imputed and a Deterministic is returned - masked_output = fake_model.make_obs_var(fake_distribution, masked_array_input, None, None) + with pytest.warns(ImputationWarning): + masked_output = fake_model.make_obs_var(fake_distribution, masked_array_input, None, None) assert masked_output != fake_distribution assert not isinstance(masked_output, RandomVariable) # Ensure it has missing values diff --git a/pymc/tests/test_model_graph.py b/pymc/tests/test_model_graph.py index af79f128c5..1df3588465 100644 --- a/pymc/tests/test_model_graph.py +++ b/pymc/tests/test_model_graph.py @@ -22,6 +22,7 @@ import pymc as pm +from pymc.exceptions import ImputationWarning from pymc.model_graph import ModelGraph, model_to_graphviz, model_to_networkx from pymc.tests.helpers import SeededTest @@ -138,7 +139,8 @@ def model_with_imputations(): with pm.Model() as model: a = pm.Normal("a") - pm.Normal("L", a, 1.0, observed=x) + with pytest.warns(ImputationWarning): + pm.Normal("L", a, 1.0, observed=x) compute_graph = { "a": set(), diff --git a/pymc/tests/test_ode.py b/pymc/tests/test_ode.py index 6b0835ccdf..a722b5eb2f 100644 --- a/pymc/tests/test_ode.py +++ b/pymc/tests/test_ode.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings import aesara import aesara.tensor as at @@ -363,7 +364,9 @@ def system(y, t, p): y = pm.LogNormal("y", mu=pm.math.log(forward), sigma=sigma, observed=yobs) with aesara.config.change_flags(mode=fast_unstable_sampling_mode): - idata = pm.sample(50, tune=0, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["alpha"].shape == (1, 50) assert idata.posterior["y0"].shape == (1, 50) @@ -394,7 +397,9 @@ def system(y, t, p): y = pm.LogNormal("y", mu=pm.math.log(forward), sigma=sigma, observed=yobs) with aesara.config.change_flags(mode=fast_unstable_sampling_mode): - idata = pm.sample(50, tune=0, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["alpha"].shape == (1, 50) assert idata.posterior["beta"].shape == (1, 50) @@ -436,7 +441,9 @@ def system(y, t, p): y = pm.LogNormal("y", mu=pm.math.log(forward), sigma=sigma, observed=yobs) with aesara.config.change_flags(mode=fast_unstable_sampling_mode): - idata = pm.sample(50, tune=0, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["R"].shape == (1, 50) assert idata.posterior["sigma"].shape == (1, 50, 2) @@ -477,7 +484,9 @@ def system(y, t, p): y = pm.LogNormal("y", mu=pm.math.log(forward), sigma=sigma, observed=yobs) with aesara.config.change_flags(mode=fast_unstable_sampling_mode): - idata = pm.sample(50, tune=0, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample(50, tune=0, chains=1) assert idata.posterior["beta"].shape == (1, 50) assert idata.posterior["gamma"].shape == (1, 50) diff --git a/pymc/tests/test_parallel_sampling.py b/pymc/tests/test_parallel_sampling.py index d321ef8688..80cb49c041 100644 --- a/pymc/tests/test_parallel_sampling.py +++ b/pymc/tests/test_parallel_sampling.py @@ -14,6 +14,7 @@ import multiprocessing import os import platform +import warnings import aesara import aesara.tensor as at @@ -34,7 +35,9 @@ def test_context(): with pm.Model(): pm.Normal("x") ctx = multiprocessing.get_context("spawn") - pm.sample(tune=2, draws=2, chains=2, cores=2, mp_ctx=ctx) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(tune=2, draws=2, chains=2, cores=2, mp_ctx=ctx) class NoUnpickle: @@ -194,7 +197,9 @@ def func(x): return -2 * (x**2).sum() obs = pm.DensityDist("density_dist", logp=func, observed=np.random.randn(100)) - pm.sample(draws=10, tune=10, step=pm.Metropolis(), cores=2, mp_ctx="spawn") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(draws=10, tune=10, step=pm.Metropolis(), cores=2, mp_ctx="spawn") def test_spawn_densitydist_bound_method(): @@ -208,4 +213,6 @@ def logp(x, mu): return out obs = pm.DensityDist("density_dist", mu, logp=logp, observed=np.random.randn(N), size=N) - pm.sample(draws=10, tune=10, step=pm.Metropolis(), cores=2, mp_ctx="spawn") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(draws=10, tune=10, step=pm.Metropolis(), cores=2, mp_ctx="spawn") diff --git a/pymc/tests/test_quadpotential.py b/pymc/tests/test_quadpotential.py index 0ddef347fe..c485edf66b 100644 --- a/pymc/tests/test_quadpotential.py +++ b/pymc/tests/test_quadpotential.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings import numpy as np import numpy.testing as npt @@ -150,7 +151,9 @@ def energy(self, x, velocity=None): pot = Potential(floatX([1])) with model: step = pymc.NUTS(potential=pot) - pymc.sample(10, step=step, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pymc.sample(10, step=step, chains=1) assert called @@ -206,7 +209,8 @@ def test_full_adapt_sample_p(seed=4566): ) n_samples = 1000 - pot = quadpotential.QuadPotentialFullAdapt(2, np.zeros(2), m_inv, 1) + with pytest.warns(UserWarning, match="experimental feature"): + pot = quadpotential.QuadPotentialFullAdapt(2, np.zeros(2), m_inv, 1) samples = [pot.random() for n in range(n_samples)] sample_cov = np.cov(samples, rowvar=0) @@ -218,7 +222,8 @@ def test_full_adapt_sample_p(seed=4566): def test_full_adapt_update_window(seed=1123): np.random.seed(seed) init_cov = np.array([[1.0, 0.02], [0.02, 0.8]]) - pot = quadpotential.QuadPotentialFullAdapt(2, np.zeros(2), init_cov, 1, update_window=50) + with pytest.warns(UserWarning, match="experimental feature"): + pot = quadpotential.QuadPotentialFullAdapt(2, np.zeros(2), init_cov, 1, update_window=50) assert np.allclose(pot._cov, init_cov) for i in range(49): pot.update(np.random.randn(2), None, True) @@ -230,17 +235,19 @@ def test_full_adapt_update_window(seed=1123): def test_full_adapt_adaptation_window(seed=8978): np.random.seed(seed) window = 10 - pot = quadpotential.QuadPotentialFullAdapt( - 2, np.zeros(2), np.eye(2), 1, adaptation_window=window - ) + with pytest.warns(UserWarning, match="experimental feature"): + pot = quadpotential.QuadPotentialFullAdapt( + 2, np.zeros(2), np.eye(2), 1, adaptation_window=window + ) for i in range(window + 1): pot.update(np.random.randn(2), None, True) assert pot._previous_update == window assert pot.adaptation_window == window * pot.adaptation_window_multiplier - pot = quadpotential.QuadPotentialFullAdapt( - 2, np.zeros(2), np.eye(2), 1, adaptation_window=window - ) + with pytest.warns(UserWarning, match="experimental feature"): + pot = quadpotential.QuadPotentialFullAdapt( + 2, np.zeros(2), np.eye(2), 1, adaptation_window=window + ) for i in range(window + 1): pot.update(np.random.randn(2), None, True) assert pot._previous_update == window @@ -249,11 +256,16 @@ def test_full_adapt_adaptation_window(seed=8978): def test_full_adapt_not_invertible(): window = 10 - pot = quadpotential.QuadPotentialFullAdapt( - 2, np.zeros(2), np.eye(2), 0, adaptation_window=window - ) + with pytest.warns(UserWarning, match="experimental feature"): + pot = quadpotential.QuadPotentialFullAdapt( + 2, np.zeros(2), np.eye(2), 0, adaptation_window=window + ) for i in range(window + 1): - pot.update(np.ones(2), None, True) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in true_divide", RuntimeWarning + ) + pot.update(np.ones(2), None, True) with pytest.raises(ValueError): pot.raise_ok(None) @@ -276,6 +288,11 @@ def test_full_adapt_sampling(seed=289586): initial_point = model.initial_point() initial_point_size = sum(initial_point[n.name].size for n in model.value_vars) - pot = quadpotential.QuadPotentialFullAdapt(initial_point_size, np.zeros(initial_point_size)) + with pytest.warns(UserWarning, match="experimental feature"): + pot = quadpotential.QuadPotentialFullAdapt( + initial_point_size, np.zeros(initial_point_size) + ) step = pymc.NUTS(model=model, potential=pot) - pymc.sample(draws=10, tune=1000, random_seed=seed, step=step, cores=1, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pymc.sample(draws=10, tune=1000, random_seed=seed, step=step, cores=1, chains=1) diff --git a/pymc/tests/test_sampling.py b/pymc/tests/test_sampling.py index 76e28a9cb7..31bb3dd881 100644 --- a/pymc/tests/test_sampling.py +++ b/pymc/tests/test_sampling.py @@ -11,9 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import re import unittest.mock as mock +import warnings from contextlib import ExitStack as does_not_raise from typing import Tuple @@ -110,9 +110,11 @@ def test_default_sample_does_not_set_global_seed(self, mocked_seed): # on global seeding for reproducible behavior. kwargs = dict(tune=2, draws=2, random_seed=None) with self.model: - pm.sample(chains=1, **kwargs) - pm.sample(chains=2, cores=1, **kwargs) - pm.sample(chains=2, cores=2, **kwargs) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(chains=1, **kwargs) + pm.sample(chains=2, cores=1, **kwargs) + pm.sample(chains=2, cores=2, **kwargs) mocked_seed.assert_not_called() def test_sample_does_not_rely_on_external_global_seeding(self): @@ -124,19 +126,21 @@ def test_sample_does_not_rely_on_external_global_seeding(self): return_inferencedata=False, ) with self.model: - np.random.seed(1) - idata11 = pm.sample(chains=1, **kwargs) - np.random.seed(1) - idata12 = pm.sample(chains=2, cores=1, **kwargs) - np.random.seed(1) - idata13 = pm.sample(chains=2, cores=2, **kwargs) - - np.random.seed(1) - idata21 = pm.sample(chains=1, **kwargs) - np.random.seed(1) - idata22 = pm.sample(chains=2, cores=1, **kwargs) - np.random.seed(1) - idata23 = pm.sample(chains=2, cores=2, **kwargs) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + np.random.seed(1) + idata11 = pm.sample(chains=1, **kwargs) + np.random.seed(1) + idata12 = pm.sample(chains=2, cores=1, **kwargs) + np.random.seed(1) + idata13 = pm.sample(chains=2, cores=2, **kwargs) + + np.random.seed(1) + idata21 = pm.sample(chains=1, **kwargs) + np.random.seed(1) + idata22 = pm.sample(chains=2, cores=1, **kwargs) + np.random.seed(1) + idata23 = pm.sample(chains=2, cores=2, **kwargs) assert np.all(idata11["x"] != idata21["x"]) assert np.all(idata12["x"] != idata22["x"]) @@ -147,13 +151,18 @@ def test_sample(self): with self.model: for cores in test_cores: for steps in [1, 10, 300]: - pm.sample( - steps, - tune=0, - step=self.step, - cores=cores, - random_seed=self.random_seed, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings( + "ignore", "More chains .* than draws .*", UserWarning + ) + pm.sample( + steps, + tune=0, + step=self.step, + cores=cores, + random_seed=self.random_seed, + ) def test_sample_init(self): with self.model: @@ -167,13 +176,20 @@ def test_sample_init(self): "adapt_full", "jitter+adapt_full", ): - pm.sample( - init=init, - tune=120, - n_init=1000, - draws=50, - random_seed=self.random_seed, - ) + kwargs = { + "init": init, + "tune": 120, + "n_init": 1000, + "draws": 50, + "random_seed": self.random_seed, + } + with warnings.catch_warnings(record=True) as rec: + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + if init.endswith("adapt_full"): + with pytest.warns(UserWarning, match="experimental feature"): + pm.sample(**kwargs) + else: + pm.sample(**kwargs) def test_sample_args(self): with self.model: @@ -199,34 +215,45 @@ def test_iter_sample(self): def test_parallel_start(self): with self.model: - idata = pm.sample( - 0, - tune=5, - cores=2, - discard_tuned_samples=False, - initvals=[{"x": [10, 10]}, {"x": [-10, -10]}], - random_seed=self.random_seed, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample( + 0, + tune=5, + cores=2, + discard_tuned_samples=False, + initvals=[{"x": [10, 10]}, {"x": [-10, -10]}], + random_seed=self.random_seed, + ) assert idata.warmup_posterior["x"].sel(chain=0, draw=0).values[0] > 0 assert idata.warmup_posterior["x"].sel(chain=1, draw=0).values[0] < 0 def test_sample_tune_len(self): with self.model: - trace = pm.sample(draws=100, tune=50, cores=1, return_inferencedata=False) - assert len(trace) == 100 - trace = pm.sample( - draws=100, tune=50, cores=1, return_inferencedata=False, discard_tuned_samples=False - ) - assert len(trace) == 150 - trace = pm.sample(draws=100, tune=50, cores=4, return_inferencedata=False) - assert len(trace) == 100 + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings("ignore", "Tuning samples will be included.*", UserWarning) + trace = pm.sample(draws=100, tune=50, cores=1, return_inferencedata=False) + assert len(trace) == 100 + trace = pm.sample( + draws=100, + tune=50, + cores=1, + return_inferencedata=False, + discard_tuned_samples=False, + ) + assert len(trace) == 150 + trace = pm.sample(draws=100, tune=50, cores=4, return_inferencedata=False) + assert len(trace) == 100 def test_reset_tuning(self): with self.model: tune = 50 chains = 2 start, step = pm.sampling.init_nuts(chains=chains, random_seed=[1, 2]) - pm.sample(draws=2, tune=tune, chains=chains, step=step, initvals=start, cores=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(draws=2, tune=tune, chains=chains, step=step, initvals=start, cores=1) assert step.potential._n_samples == tune assert step.step_adapt._count == tune + 1 @@ -236,15 +263,19 @@ def test_trace_report(self, step_cls, discard): with self.model: # add more variables, because stats are 2D with CompoundStep! pm.Uniform("uni") - trace = pm.sample( - draws=100, - tune=50, - cores=1, - discard_tuned_samples=discard, - step=step_cls(), - compute_convergence_checks=False, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*Tuning samples will be included.*", UserWarning + ) + trace = pm.sample( + draws=100, + tune=50, + cores=1, + discard_tuned_samples=discard, + step=step_cls(), + compute_convergence_checks=False, + return_inferencedata=False, + ) assert trace.report.n_tune == 50 assert trace.report.n_draws == 100 assert isinstance(trace.report.t_sampling, float) @@ -286,14 +317,17 @@ def test_return_inferencedata(self): @pytest.mark.parametrize("cores", [1, 2]) def test_sampler_stat_tune(self, cores): with self.model: - tune_stat = pm.sample( - tune=5, - draws=7, - cores=cores, - discard_tuned_samples=False, - return_inferencedata=False, - step=pm.Metropolis(), - ).get_sampler_stats("tune", chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings("ignore", "Tuning samples will be included.*", UserWarning) + tune_stat = pm.sample( + tune=5, + draws=7, + cores=cores, + discard_tuned_samples=False, + return_inferencedata=False, + step=pm.Metropolis(), + ).get_sampler_stats("tune", chains=1) assert list(tune_stat).count(True) == 5 assert list(tune_stat).count(False) == 7 @@ -320,15 +354,17 @@ def test_sample_callback(self): with self.model: for cores in test_cores: for chain in test_chains: - pm.sample( - 10, - tune=0, - chains=chain, - step=self.step, - cores=cores, - random_seed=self.random_seed, - callback=callback, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample( + 10, + tune=0, + chains=chain, + step=self.step, + cores=cores, + random_seed=self.random_seed, + callback=callback, + ) assert callback.called def test_callback_can_cancel(self): @@ -339,29 +375,35 @@ def callback(trace, draw): raise KeyboardInterrupt() with self.model: - trace = pm.sample( - 10, - tune=0, - chains=1, - step=self.step, - cores=1, - random_seed=self.random_seed, - callback=callback, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample( + 10, + tune=0, + chains=1, + step=self.step, + cores=1, + random_seed=self.random_seed, + callback=callback, + return_inferencedata=False, + ) assert len(trace) == trace_cancel_length def test_sequential_backend(self): with self.model: backend = NDArray() - pm.sample(10, cores=1, chains=2, trace=backend) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(10, cores=1, chains=2, trace=backend) def test_exceptions(self): # Test iteration over MultiTrace NotImplementedError with pm.Model() as model: mu = pm.Normal("mu", 0.0, 1.0) a = pm.Normal("a", mu=mu, sigma=1, observed=np.array([0.5, 0.2])) - trace = pm.sample(tune=0, draws=10, chains=2, return_inferencedata=False) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample(tune=0, draws=10, chains=2, return_inferencedata=False) with pytest.raises(NotImplementedError): xvars = [t["mu"] for t in trace] @@ -386,7 +428,9 @@ def test_transform_with_rv_dependency(self): bounds_fn=lambda *inputs: (inputs[-2], inputs[-1]) ) y = pm.Uniform("y", lower=0, upper=x, transform=transform) - trace = pm.sample(tune=10, draws=50, return_inferencedata=False, random_seed=336) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample(tune=10, draws=50, return_inferencedata=False, random_seed=336) assert np.allclose(scipy.special.expit(trace["y_interval__"]), trace["y"]) @@ -403,12 +447,16 @@ def test_sample_find_MAP_does_not_modify_start(): # make sure sample does not modify the start dict start = {"untransformed": 0.2} - pm.sample(draws=10, step=pm.Metropolis(), tune=5, initvals=start, chains=3) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(draws=10, step=pm.Metropolis(), tune=5, initvals=start, chains=3) assert start == {"untransformed": 0.2} # make sure sample does not modify the start when passes as list of dict start = [{"untransformed": 2}, {"untransformed": 0.2}] - pm.sample(draws=10, step=pm.Metropolis(), tune=5, initvals=start, chains=2) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(draws=10, step=pm.Metropolis(), tune=5, initvals=start, chains=2) assert start == [{"untransformed": 2}, {"untransformed": 0.2}] @@ -593,12 +641,14 @@ def test_normal_scalar_idata(self): with pm.Model() as model: mu = pm.Normal("mu", 0.0, 1.0) a = pm.Normal("a", mu=mu, sigma=1, observed=0.0) - trace = pm.sample( - draws=ndraws, - chains=nchains, - return_inferencedata=False, - discard_tuned_samples=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Tuning samples will be included.*", UserWarning) + trace = pm.sample( + draws=ndraws, + chains=nchains, + return_inferencedata=False, + discard_tuned_samples=False, + ) assert not isinstance(trace, InferenceData) @@ -621,9 +671,13 @@ def test_normal_vector(self, caplog): ppc0 = pm.sample_posterior_predictive( [model.initial_point()], return_inferencedata=False, samples=10 ) - ppc = pm.sample_posterior_predictive( - trace, return_inferencedata=False, samples=12, var_names=[] - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*smaller than nchains times ndraws.*", UserWarning + ) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=12, var_names=[] + ) assert len(ppc) == 0 # test keep_size parameter @@ -686,9 +740,13 @@ def test_vector_observed(self): # TODO: Assert something about the output # ppc = pm.sample_posterior_predictive(idata, samples=12, var_names=[]) # assert len(ppc) == 0 - ppc = pm.sample_posterior_predictive( - idata, return_inferencedata=False, samples=12, var_names=["a"] - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*smaller than nchains times ndraws.*", UserWarning + ) + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=12, var_names=["a"] + ) assert "a" in ppc assert ppc["a"].shape == (12, 2) @@ -700,13 +758,17 @@ def test_sum_normal(self): with model: # test list input - ppc0 = pm.sample_posterior_predictive( - [model.initial_point()], return_inferencedata=False, samples=10 - ) - assert ppc0 == {} - ppc = pm.sample_posterior_predictive( - idata, return_inferencedata=False, samples=1000, var_names=["b"] - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*smaller than nchains times ndraws.*", UserWarning + ) + ppc0 = pm.sample_posterior_predictive( + [model.initial_point()], return_inferencedata=False, samples=10 + ) + assert ppc0 == {} + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=1000, var_names=["b"] + ) assert len(ppc) == 1 assert ppc["b"].shape == (1000,) scale = np.sqrt(1 + 0.2**2) @@ -720,13 +782,19 @@ def test_model_not_drawable_prior(self): mu = pm.HalfFlat("sigma") pm.Poisson("foo", mu=mu, observed=data) with aesara.config.change_flags(mode=fast_unstable_sampling_mode): - idata = pm.sample(tune=10, draws=40, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = pm.sample(tune=10, draws=40, chains=1) with model: with pytest.raises(NotImplementedError) as excinfo: pm.sample_prior_predictive(50) assert "Cannot sample" in str(excinfo.value) - samples = pm.sample_posterior_predictive(idata, 40, return_inferencedata=False) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*smaller than nchains times ndraws.*", UserWarning + ) + samples = pm.sample_posterior_predictive(idata, 40, return_inferencedata=False) assert samples["foo"].shape == (40, 200) def test_model_shared_variable(self): @@ -750,9 +818,13 @@ def test_model_shared_variable(self): samples = 100 with model: - post_pred = pm.sample_posterior_predictive( - trace, return_inferencedata=False, samples=samples, var_names=["p", "obs"] - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*smaller than nchains times ndraws.*", UserWarning + ) + post_pred = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=samples, var_names=["p", "obs"] + ) expected_p = np.array([logistic.eval({coeff: val}) for val in trace["x"][:samples]]) assert post_pred["obs"].shape == (samples, 3) @@ -850,7 +922,11 @@ def test_variable_type(self): ) with model: - ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, samples=1) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", ".*smaller than nchains times ndraws.*", UserWarning + ) + ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, samples=1) assert ppc["a"].dtype.kind == "f" assert ppc["b"].dtype.kind == "i" @@ -1031,7 +1107,11 @@ def check_exec_nuts_init(method): ], ) def test_exec_nuts_init(method): - check_exec_nuts_init(method) + if method.endswith("adapt_full"): + with pytest.warns(UserWarning, match="experimental feature"): + check_exec_nuts_init(method) + else: + check_exec_nuts_init(method) @pytest.mark.skip(reason="Test requires monkey patching of RandomGenerator") @@ -1463,9 +1543,13 @@ def test_step_args(): a = pm.Normal("a") b = pm.Poisson("b", 1) idata0 = pm.sample(target_accept=0.5, random_seed=1418) - idata1 = pm.sample( - nuts={"target_accept": 0.5}, metropolis={"scaling": 0}, random_seed=1418 * 2 - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in double_scalars", RuntimeWarning + ) + idata1 = pm.sample( + nuts={"target_accept": 0.5}, metropolis={"scaling": 0}, random_seed=1418 * 2 + ) npt.assert_almost_equal(idata0.sample_stats.acceptance_rate.mean(), 0.5, decimal=1) npt.assert_almost_equal(idata1.sample_stats.acceptance_rate.mean(), 0.5, decimal=1) @@ -1475,14 +1559,18 @@ def test_step_args(): def test_init_nuts(caplog): with pm.Model() as model: a = pm.Normal("a") - pm.sample(10, tune=10) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(10, tune=10) assert "Initializing NUTS" in caplog.text def test_no_init_nuts_step(caplog): with pm.Model() as model: a = pm.Normal("a") - pm.sample(10, tune=10, step=pm.NUTS([a])) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(10, tune=10, step=pm.NUTS([a])) assert "Initializing NUTS" not in caplog.text @@ -1490,7 +1578,9 @@ def test_no_init_nuts_compound(caplog): with pm.Model() as model: a = pm.Normal("a") b = pm.Poisson("b", 1) - pm.sample(10, tune=10) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + pm.sample(10, tune=10) assert "Initializing NUTS" not in caplog.text diff --git a/pymc/tests/test_sampling_jax.py b/pymc/tests/test_sampling_jax.py index 5fd494b316..9b0c0ab909 100644 --- a/pymc/tests/test_sampling_jax.py +++ b/pymc/tests/test_sampling_jax.py @@ -16,17 +16,18 @@ import pymc as pm -from pymc.sampling_jax import ( - _get_batched_jittered_initial_points, - _get_log_likelihood, - _numpyro_nuts_defaults, - _replace_shared_variables, - _update_numpyro_nuts_kwargs, - get_jaxified_graph, - get_jaxified_logp, - sample_blackjax_nuts, - sample_numpyro_nuts, -) +with pytest.warns(UserWarning, match="module is experimental"): + from pymc.sampling_jax import ( + _get_batched_jittered_initial_points, + _get_log_likelihood, + _numpyro_nuts_defaults, + _replace_shared_variables, + _update_numpyro_nuts_kwargs, + get_jaxified_graph, + get_jaxified_logp, + sample_blackjax_nuts, + sample_numpyro_nuts, + ) @pytest.mark.parametrize( @@ -129,7 +130,9 @@ def test_get_log_likelihood(): sigma = pm.HalfNormal("sigma") b = pm.Normal("b", a, sigma=sigma, observed=obs_at) - trace = pm.sample(tune=10, draws=10, chains=2, random_seed=1322) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample(tune=10, draws=10, chains=2, random_seed=1322) b_true = trace.log_likelihood.b.values a = np.array(trace.posterior.a) diff --git a/pymc/tests/test_smc.py b/pymc/tests/test_smc.py index a040221cf8..9f65b2d92c 100644 --- a/pymc/tests/test_smc.py +++ b/pymc/tests/test_smc.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings import aesara import aesara.tensor as at @@ -170,14 +171,16 @@ def test_start(self): def test_kernel_kwargs(self): with self.fast_model: - trace = pm.sample_smc( - draws=10, - chains=1, - threshold=0.7, - correlation_threshold=0.02, - return_inferencedata=False, - kernel=pm.smc.IMH, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample_smc( + draws=10, + chains=1, + threshold=0.7, + correlation_threshold=0.02, + return_inferencedata=False, + kernel=pm.smc.IMH, + ) assert trace.report.threshold == 0.7 assert trace.report.n_draws == 10 @@ -185,14 +188,16 @@ def test_kernel_kwargs(self): assert trace.report.correlation_threshold == 0.02 with self.fast_model: - trace = pm.sample_smc( - draws=10, - chains=1, - threshold=0.95, - correlation_threshold=0.02, - return_inferencedata=False, - kernel=pm.smc.MH, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample_smc( + draws=10, + chains=1, + threshold=0.95, + correlation_threshold=0.02, + return_inferencedata=False, + kernel=pm.smc.MH, + ) assert trace.report.threshold == 0.95 assert trace.report.n_draws == 10 @@ -203,8 +208,11 @@ def test_return_datatype(self, chains): draws = 10 with self.fast_model: - idata = pm.sample_smc(chains=chains, draws=draws) - mt = pm.sample_smc(chains=chains, draws=draws, return_inferencedata=False) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings("ignore", "More chains .* than draws .*", UserWarning) + idata = pm.sample_smc(chains=chains, draws=draws) + mt = pm.sample_smc(chains=chains, draws=draws, return_inferencedata=False) assert isinstance(idata, InferenceData) assert "sample_stats" in idata @@ -342,7 +350,9 @@ def test_custom_dist_sum_stat(self, floatX): assert self.count_rvs(m.logp()) == 1 with m: - pm.sample_smc(draws=100) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws .*", UserWarning) + pm.sample_smc(draws=100) @pytest.mark.parametrize("floatX", ["float32", "float64"]) def test_custom_dist_sum_stat_scalar(self, floatX): @@ -529,7 +539,12 @@ def test_automatic_use_of_sort(self): def test_name_is_string_type(self): with self.SMABC_potential: assert not self.SMABC_potential.name - trace = pm.sample_smc(draws=10, chains=1, return_inferencedata=False) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + warnings.filterwarnings( + "ignore", "invalid value encountered in true_divide", RuntimeWarning + ) + trace = pm.sample_smc(draws=10, chains=1, return_inferencedata=False) assert isinstance(trace._straces[0].name, str) def test_named_model(self): @@ -542,7 +557,9 @@ def test_named_model(self): b = pm.HalfNormal("b", sigma=1) s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data) - trace = pm.sample_smc(draws=10, chains=2, return_inferencedata=False) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample_smc(draws=10, chains=2, return_inferencedata=False) assert f"{name}::a" in trace.varnames assert f"{name}::b" in trace.varnames assert f"{name}::b_log__" in trace.varnames @@ -568,9 +585,11 @@ def test_proposal_dist_shape(self): with pm.Model() as m: x = pm.Normal("x", 0, 1) y = pm.Normal("y", x, 1, observed=0) - trace = pm.sample_smc( - draws=10, - chains=1, - kernel=pm.smc.MH, - return_inferencedata=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = pm.sample_smc( + draws=10, + chains=1, + kernel=pm.smc.MH, + return_inferencedata=False, + ) diff --git a/pymc/tests/test_starting.py b/pymc/tests/test_starting.py index 2484131108..3c9ce07f98 100644 --- a/pymc/tests/test_starting.py +++ b/pymc/tests/test_starting.py @@ -26,6 +26,7 @@ Uniform, find_MAP, ) +from pymc.exceptions import ImputationWarning from pymc.tests.checks import close_to from pymc.tests.helpers import select_by_precision from pymc.tests.models import non_normal, simple_arbitrary_det, simple_model @@ -127,7 +128,8 @@ def test_find_MAP_issue_5923(): def test_find_MAP_issue_4488(): # Test for https://github.com/pymc-devs/pymc/issues/4488 with Model() as m: - x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan])) + with pytest.warns(ImputationWarning): + x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan])) y = Deterministic("y", x + 1) map_estimate = find_MAP() diff --git a/pymc/tests/test_step.py b/pymc/tests/test_step.py index 8cf809069c..cf30ee0be4 100644 --- a/pymc/tests/test_step.py +++ b/pymc/tests/test_step.py @@ -11,10 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import shutil import sys import tempfile +import warnings import aesara import aesara.tensor as at @@ -121,15 +121,17 @@ def test_step_continuous(self, step_fn, draws): _, model_coarse, _ = mv_simple_coarse() with model: step = step_fn(C, model_coarse) - idata = sample( - tune=1000, - draws=draws, - chains=1, - step=step, - start=start, - model=model, - random_seed=1, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "More chains .* than draws .*", UserWarning) + idata = sample( + tune=1000, + draws=draws, + chains=1, + step=step, + initvals=start, + model=model, + random_seed=1, + ) self.check_stat(check, idata, step.__class__.__name__) self.check_stat_dtype(idata, step) @@ -144,7 +146,7 @@ def test_step_discrete(self): draws=2000, chains=1, step=step, - start=start, + initvals=start, model=model, random_seed=1, ) @@ -163,7 +165,7 @@ def test_step_categorical(self, proposal): draws=2000, chains=1, step=step, - start=start, + initvals=start, model=model, random_seed=1, ) @@ -294,25 +296,29 @@ def test_checks_population_size(self): n = Normal("n", mu=0, sigma=1) for stepper in TestPopulationSamplers.steppers: step = stepper() - with pytest.raises(ValueError): - sample(draws=10, tune=10, chains=1, cores=1, step=step) - # don't parallelize to make test faster - sample(draws=10, tune=10, chains=4, cores=1, step=step) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + with pytest.raises(ValueError): + sample(draws=10, tune=10, chains=1, cores=1, step=step) + # don't parallelize to make test faster + sample(draws=10, tune=10, chains=4, cores=1, step=step) def test_demcmc_warning_on_small_populations(self): """Test that a warning is raised when n_chains <= n_dims""" with Model() as model: Normal("n", mu=0, sigma=1, size=(2, 3)) - with pytest.warns(UserWarning) as record: - sample( - draws=5, - tune=5, - chains=6, - step=DEMetropolis(), - # make tests faster by not parallelizing; disable convergence warning - cores=1, - compute_convergence_checks=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + with pytest.warns(UserWarning) as record: + sample( + draws=5, + tune=5, + chains=6, + step=DEMetropolis(), + # make tests faster by not parallelizing; disable convergence warning + cores=1, + compute_convergence_checks=False, + ) def test_demcmc_tune_parameter(self): """Tests that validity of the tune setting is checked""" @@ -336,7 +342,9 @@ def test_nonparallelized_chains_are_random(self): x = Normal("x", 0, 1) for stepper in TestPopulationSamplers.steppers: step = stepper() - idata = sample(chains=4, cores=1, draws=20, tune=0, step=DEMetropolis()) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = sample(chains=4, cores=1, draws=20, tune=0, step=DEMetropolis()) samples = idata.posterior["x"].values[:, 5] assert len(set(samples)) == 4, f"Parallelized {stepper} chains are identical." @@ -346,7 +354,9 @@ def test_parallelized_chains_are_random(self): x = Normal("x", 0, 1) for stepper in TestPopulationSamplers.steppers: step = stepper() - idata = sample(chains=4, cores=4, draws=20, tune=0, step=DEMetropolis()) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + idata = sample(chains=4, cores=4, draws=20, tune=0, step=DEMetropolis()) samples = idata.posterior["x"].values[:, 5] assert len(set(samples)) == 4, f"Parallelized {stepper} chains are identical." @@ -546,14 +556,16 @@ def test_custom_proposal_dist(self): with Model() as pmodel: D = 3 Normal("n", 0, 2, size=(D,)) - trace = sample( - tune=100, - draws=50, - step=DEMetropolisZ(proposal_dist=NormalProposal), - cores=1, - chains=3, - discard_tuned_samples=False, - ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = sample( + tune=100, + draws=50, + step=DEMetropolisZ(proposal_dist=NormalProposal), + cores=1, + chains=3, + discard_tuned_samples=False, + ) class TestNutsCheckTrace: @@ -562,7 +574,9 @@ def test_multiple_samplers(self, caplog): prob = Beta("prob", alpha=5.0, beta=3.0) Binomial("outcome", n=1, p=prob) caplog.clear() - sample(3, tune=2, discard_tuned_samples=False, n_init=None, chains=1) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + sample(3, tune=2, discard_tuned_samples=False, n_init=None, chains=1) messages = [msg.msg for msg in caplog.records] assert all("boolean index did not" not in msg for msg in messages) @@ -588,7 +602,9 @@ def test_linalg(self, caplog): b = at.slinalg.solve(floatX(np.eye(2)), a, check_finite=False) Normal("c", mu=b, size=2, initval=floatX(np.r_[0.0, 0.0])) caplog.clear() - trace = sample(20, tune=5, chains=2, return_inferencedata=False, random_seed=526) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = sample(20, tune=5, chains=2, return_inferencedata=False, random_seed=526) warns = [msg.msg for msg in caplog.records] assert np.any(trace["diverging"]) assert ( @@ -606,7 +622,9 @@ def test_linalg(self, caplog): def test_sampler_stats(self): with Model() as model: Normal("x", mu=0, sigma=1) - trace = sample(draws=10, tune=1, chains=1, return_inferencedata=False) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + trace = sample(draws=10, tune=1, chains=1, return_inferencedata=False) # Assert stats exist and have the correct shape. expected_stat_names = { diff --git a/pymc/tests/test_tuning.py b/pymc/tests/test_tuning.py index e8e37978ab..7ed406ee8c 100644 --- a/pymc/tests/test_tuning.py +++ b/pymc/tests/test_tuning.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings import numpy as np import pytest @@ -24,7 +25,9 @@ def test_adjust_precision(): a = np.array([-10, -0.01, 0, 10, 1e300, -inf, inf]) - a1 = scaling.adjust_precision(a) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "divide by zero encountered in log", RuntimeWarning) + a1 = scaling.adjust_precision(a) assert all((a1 > 0) & (a1 < 1e200)) diff --git a/pymc/tests/test_types.py b/pymc/tests/test_types.py index f5e884abfa..342776342e 100644 --- a/pymc/tests/test_types.py +++ b/pymc/tests/test_types.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings from copy import copy @@ -45,7 +46,9 @@ def test_float64(self): for sampler in self.samplers: with model: - sample(draws=10, tune=10, chains=1, step=sampler()) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + sample(draws=10, tune=10, chains=1, step=sampler()) @aesara.config.change_flags({"floatX": "float32", "warn_float64": "warn"}) def test_float32(self): @@ -58,4 +61,6 @@ def test_float32(self): for sampler in self.samplers: with model: - sample(draws=10, tune=10, chains=1, step=sampler()) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".*number of samples.*", UserWarning) + sample(draws=10, tune=10, chains=1, step=sampler()) diff --git a/pymc/tests/test_variational_inference.py b/pymc/tests/test_variational_inference.py index 0524b542d2..c5b8a80cf4 100644 --- a/pymc/tests/test_variational_inference.py +++ b/pymc/tests/test_variational_inference.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import functools import io import operator @@ -517,7 +516,11 @@ def inference_spec(request): def init_(**kw): k = init.copy() k.update(kw) - return cls(**k) + if cls == ASVGD: + with pytest.warns(UserWarning, match="experimental inference Operator"): + return cls(**k) + else: + return cls(**k) init_.cls = cls return init_ @@ -641,11 +644,19 @@ def fit_method_with_object(request, another_simple_model): ) def test_fit_fn_text(method, kwargs, error, another_simple_model): with another_simple_model: - if error is not None: - with pytest.raises(error): - fit(10, method=method, **kwargs) + if method == "asvgd": + with pytest.warns(UserWarning, match="experimental inference Operator"): + if error is not None: + with pytest.raises(error): + fit(10, method=method, **kwargs) + else: + fit(10, method=method, **kwargs) else: - fit(10, method=method, **kwargs) + if error is not None: + with pytest.raises(error): + fit(10, method=method, **kwargs) + else: + fit(10, method=method, **kwargs) @pytest.fixture(scope="module") diff --git a/pymc/tuning/starting.py b/pymc/tuning/starting.py index 7ba3cda150..ea5b4c13cd 100644 --- a/pymc/tuning/starting.py +++ b/pymc/tuning/starting.py @@ -142,10 +142,11 @@ def find_MAP( ) method = "Powell" - if compute_gradient: + if compute_gradient and method != "Powell": cost_func = CostFuncWrapper(maxeval, progressbar, logp_func, dlogp_func) else: cost_func = CostFuncWrapper(maxeval, progressbar, logp_func) + compute_gradient = False try: opt_result = minimize(