Skip to content

Commit

Permalink
Fix Flaky Euler-Maruyama Tests (#6287)
Browse files Browse the repository at this point in the history
* seeding the test

* being nice to the docs

* seeding and flipping any

* switch from seed arg to random_seed for initial point

* using the numpy_rng instance where applicable

* change back the test after discussion
  • Loading branch information
wd60622 authored Nov 19, 2022
1 parent e153121 commit cb37afa
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 10 deletions.
6 changes: 3 additions & 3 deletions pymc/distributions/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,11 +905,11 @@ class EulerMaruyama(Distribution):
Parameters
----------
dt: float
dt : float
time step of discretization
sde_fn: callable
sde_fn : callable
function returning the drift and diffusion coefficients of SDE
sde_pars: tuple
sde_pars : tuple
parameters of the SDE, passed as ``*args`` to ``sde_fn``
init_dist : unnamed distribution, optional
Scalar distribution for initial values. Unnamed refers to distributions created with
Expand Down
20 changes: 13 additions & 7 deletions pymc/tests/distributions/test_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -835,8 +835,11 @@ class TestEulerMaruyama:
@pytest.mark.parametrize("batched_param", [1, 2])
@pytest.mark.parametrize("explicit_shape", (True, False))
def test_batched_size(self, explicit_shape, batched_param):
RANDOM_SEED = 42
numpy_rng = np.random.default_rng(RANDOM_SEED)

steps, batch_size = 100, 5
param_val = np.square(np.random.randn(batch_size))
param_val = np.square(numpy_rng.standard_normal(batch_size))
if explicit_shape:
kwargs = {"shape": (batch_size, steps)}
else:
Expand All @@ -853,7 +856,7 @@ def sde_fn(x, k, d, s):
"y", dt=0.02, sde_fn=sde_fn, sde_pars=sde_pars, init_dist=init_dist, **kwargs
)

y_eval = draw(y, draws=2)
y_eval = draw(y, draws=2, random_seed=numpy_rng)
assert y_eval[0].shape == (batch_size, steps)
assert not np.any(np.isclose(y_eval[0], y_eval[1]))

Expand All @@ -873,7 +876,7 @@ def sde_fn(x, k, d, s):
**kwargs,
)

t0_init = t0.initial_point()
t0_init = t0.initial_point(random_seed=RANDOM_SEED)
t1_init = {f"y_{i}": t0_init["y"][i] for i in range(batch_size)}
np.testing.assert_allclose(
t0.compile_logp()(t0_init),
Expand Down Expand Up @@ -919,17 +922,20 @@ def test_linear_model(self):
N = 300
dt = 1e-1

RANDOM_SEED = 42
numpy_rng = np.random.default_rng(RANDOM_SEED)

def _gen_sde_path(sde, pars, dt, n, x0):
xs = [x0]
wt = np.random.normal(size=(n,) if isinstance(x0, float) else (n, x0.size))
wt = numpy_rng.normal(size=(n,) if isinstance(x0, float) else (n, x0.size))
for i in range(n):
f, g = sde(xs[-1], *pars)
xs.append(xs[-1] + f * dt + np.sqrt(dt) * g * wt[i])
return np.array(xs)

sde = lambda x, lam: (lam * x, sig2)
x = floatX(_gen_sde_path(sde, (lam,), dt, N, 5.0))
z = x + np.random.randn(x.size) * sig2
z = x + numpy_rng.standard_normal(size=x.size) * sig2
# build model
with Model() as model:
lamh = Flat("lamh")
Expand All @@ -939,9 +945,9 @@ def _gen_sde_path(sde, pars, dt, n, x0):
Normal("zh", mu=xh, sigma=sig2, observed=z)
# invert
with model:
trace = sample(chains=1)
trace = sample(chains=1, random_seed=numpy_rng)

ppc = sample_posterior_predictive(trace, model=model)
ppc = sample_posterior_predictive(trace, model=model, random_seed=numpy_rng)

p95 = [2.5, 97.5]
lo, hi = np.percentile(trace.posterior["lamh"], p95, axis=[0, 1])
Expand Down

0 comments on commit cb37afa

Please sign in to comment.