diff --git a/.azure-pipelines/azure-pipelines-docs.yml b/.azure-pipelines/azure-pipelines-docs.yml deleted file mode 100644 index 5188df0b98..0000000000 --- a/.azure-pipelines/azure-pipelines-docs.yml +++ /dev/null @@ -1,78 +0,0 @@ -jobs: -- job: BuildDocs - dependsOn: - - BaseTests - condition: and(succeeded(), or(eq(variables['Build.SourceBranchName'], 'main'), eq(variables['Build.Reason'], 'PullRequest'))) - pool: - vmImage: 'ubuntu-latest' - variables: - - name: NUMBA_DISABLE_JIT - value: 1 - timeoutInMinutes: 360 - strategy: - matrix: - Python_39: - python.version: "3.9" - name: "Python 3.9 - docs" - - steps: - - script: | - PRTitle=$(curl -s https://api.github.com/repos/arviz-devs/arviz/pulls/${PULLREQUESTNUMBER} | grep title | cut -d'"' -f4) - echo "##vso[task.setvariable variable=PRTitle]${PRTitle}" - echo "PR title: ${PRTitle}" - env: - PULLREQUESTNUMBER: $(System.PullRequest.PullRequestNumber) - displayName: 'Read PR title' - - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(python.version)' - displayName: 'Use Python $(python.version)' - - - script: | - ls -ahl - env - pwd - gcc --version - python --version - displayName: 'Debug information' - - - script: | - sudo apt-get update - sudo apt-get install -y pandoc - displayName: 'Install external libraries' - - - script: | - python -m pip install --upgrade pip - python -m pip install wheel - python -m pip install --no-cache-dir -r requirements.txt - python -m pip install --no-cache-dir -r requirements-docs.txt - python -m pip install . - displayName: 'Install requirements' - - - script: | - python -m pip freeze - displayName: 'Print packages' - - - script: | - python -msphinx -M clean doc/source doc/build - sphinx-build doc/source doc/build -b html - displayName: 'Build docs' - - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: 'doc/build' - artifactName: 'arviz_doc_build' - condition: or(eq(variables['Build.SourceBranchName'], 'main'), or(contains(variables['PRTitle'], '[doc]'), contains(variables['PRTitle'], '[docs]'), contains(variables['PRTitle'], '[viz]'), contains(variables['PRTitle'], '[plot]'))) - displayName: 'Upload docs' - - - script: | - ls -ahl - ls -ahl doc - env - displayName: 'Debug information 2' - - - script: | - ghp-import -pfnr https://$(GH_USER):$(GH_TOKEN)@github.com/${BUILD_REPOSITORY_NAME}.git doc/build - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'main')) - displayName: 'Deploy docs' diff --git a/CHANGELOG.md b/CHANGELOG.md index 68553a9abd..648ad9d7a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,12 +8,15 @@ ### Maintenance and fixes - Fix `reloo` outdated usage of `ELPDData` ([2158](https://github.com/arviz-devs/arviz/pull/2158)) +- plot_bpv smooth discrete data only when computing u_values ([2179](https://github.com/arviz-devs/arviz/pull/2179)) +- Fix bug when beanmachine objects lack some fields ([2154](https://github.com/arviz-devs/arviz/pull/2154)) - Fix gap for `plot_trace` with option `kind="rank_bars"` ([2180](https://github.com/arviz-devs/arviz/pull/2180)) ### Deprecation ### Documentation - Add PyMC and CmdStanPy sampling wrapper examples ([2158](https://github.com/arviz-devs/arviz/pull/2158)) +- Fix docstring for plot_trace chain_prop and compact_prop parameters ([2176](https://github.com/arviz-devs/arviz/pull/2176)) ## v0.14.0 (2022 Nov 15) diff --git a/arviz/data/io_beanmachine.py b/arviz/data/io_beanmachine.py index 5247b1e869..c5516ac1f3 100644 --- a/arviz/data/io_beanmachine.py +++ b/arviz/data/io_beanmachine.py @@ -24,15 +24,23 @@ def __init__( if "posterior" in self.sampler.namespaces: self.posterior = self.sampler.namespaces["posterior"].samples + else: + self.posterior = None if "posterior_predictive" in self.sampler.namespaces: self.posterior_predictive = self.sampler.namespaces["posterior_predictive"].samples + else: + self.posterior_predictive = None if self.sampler.log_likelihoods is not None: self.log_likelihoods = self.sampler.log_likelihoods + else: + self.log_likelihoods = None if self.sampler.observations is not None: self.observations = self.sampler.observations + else: + self.observations = None @requires("posterior") def posterior_to_xarray(self): diff --git a/arviz/plots/backends/bokeh/bpvplot.py b/arviz/plots/backends/bokeh/bpvplot.py index 9ce22c3f83..3b9aced0ab 100644 --- a/arviz/plots/backends/bokeh/bpvplot.py +++ b/arviz/plots/backends/bokeh/bpvplot.py @@ -89,9 +89,6 @@ def plot_bpv( obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) - if obs_vals.dtype.kind == "i" or pp_vals.dtype.kind == "i": - obs_vals, pp_vals = smooth_data(obs_vals, pp_vals) - if kind == "p_value": tstat_pit = np.mean(pp_vals <= obs_vals, axis=-1) x_s, tstat_pit_dens = kde(tstat_pit) @@ -117,6 +114,9 @@ def plot_bpv( ) elif kind == "u_value": + if obs_vals.dtype.kind == "i" or pp_vals.dtype.kind == "i": + obs_vals, pp_vals = smooth_data(obs_vals, pp_vals) + tstat_pit = np.mean(pp_vals <= obs_vals, axis=0) x_s, tstat_pit_dens = kde(tstat_pit) ax_i.line(x_s, tstat_pit_dens, color=color) diff --git a/arviz/plots/backends/matplotlib/bpvplot.py b/arviz/plots/backends/matplotlib/bpvplot.py index d736378055..5c2d758f8e 100644 --- a/arviz/plots/backends/matplotlib/bpvplot.py +++ b/arviz/plots/backends/matplotlib/bpvplot.py @@ -86,9 +86,6 @@ def plot_bpv( obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) - if obs_vals.dtype.kind == "i" or pp_vals.dtype.kind == "i": - obs_vals, pp_vals = smooth_data(obs_vals, pp_vals) - if kind == "p_value": tstat_pit = np.mean(pp_vals <= obs_vals, axis=-1) x_s, tstat_pit_dens = kde(tstat_pit) @@ -113,6 +110,9 @@ def plot_bpv( ax_i.plot(x_ss, u_dens, linewidth=linewidth, **plot_ref_kwargs) elif kind == "u_value": + if obs_vals.dtype.kind == "i" or pp_vals.dtype.kind == "i": + obs_vals, pp_vals = smooth_data(obs_vals, pp_vals) + tstat_pit = np.mean(pp_vals <= obs_vals, axis=0) x_s, tstat_pit_dens = kde(tstat_pit) ax_i.plot(x_s, tstat_pit_dens, color=color) diff --git a/arviz/plots/traceplot.py b/arviz/plots/traceplot.py index f8dbde5ad8..c462e89a63 100644 --- a/arviz/plots/traceplot.py +++ b/arviz/plots/traceplot.py @@ -82,13 +82,18 @@ def plot_trace( compact: bool, optional Plot multidimensional variables in a single plot. compact_prop: str or dict {str: array_like}, optional - Tuple containing the property name and the property values to distinguish different - dimensions with compact=True + Defines the property name and the property values to distinguish different + dimensions with compact=True. + When compact=True it defaults to color, it is + ignored otherwise. combined: bool, optional Flag for combining multiple chains into a single line. If False (default), chains will be plotted separately. chain_prop: str or dict {str: array_like}, optional - Tuple containing the property name and the property values to distinguish different chains + Defines the property name and the property values to distinguish different chains. + If compact=True it defaults to linestyle, + otherwise it uses the color to distinguish + different chains. legend: bool, optional Add a legend to the figure with the chain color code. plot_kwargs, fill_kwargs, rug_kwargs, hist_kwargs: dict, optional diff --git a/arviz/tests/external_tests/test_data_beanmachine.py b/arviz/tests/external_tests/test_data_beanmachine.py new file mode 100644 index 0000000000..3e04495fbb --- /dev/null +++ b/arviz/tests/external_tests/test_data_beanmachine.py @@ -0,0 +1,76 @@ +# pylint: disable=no-member, invalid-name, redefined-outer-name +import numpy as np +import pytest + +from ...data.io_beanmachine import from_beanmachine # pylint: disable=wrong-import-position +from ..helpers import ( # pylint: disable=unused-import, wrong-import-position + chains, + draws, + eight_schools_params, + importorskip, + load_cached_models, +) + +# Skip all tests if beanmachine or pytorch not installed +torch = importorskip("torch") +bm = importorskip("beanmachine.ppl") +dist = torch.distributions + + +class TestDataBeanMachine: + @pytest.fixture(scope="class") + def data(self, eight_schools_params, draws, chains): + class Data: + model, prior, obj = load_cached_models( + eight_schools_params, + draws, + chains, + "beanmachine", + )["beanmachine"] + + return Data + + @pytest.fixture(scope="class") + def predictions_data(self, data): + """Generate predictions for predictions_params""" + posterior_samples = data.obj + model = data.model + predictions = bm.inference.predictive.simulate([model.obs()], posterior_samples) + return predictions + + def get_inference_data(self, eight_schools_params, predictions_data): + predictions = predictions_data + return from_beanmachine( + sampler=predictions, + coords={ + "school": np.arange(eight_schools_params["J"]), + "school_pred": np.arange(eight_schools_params["J"]), + }, + ) + + def test_inference_data(self, data, eight_schools_params, predictions_data): + inference_data = self.get_inference_data(eight_schools_params, predictions_data) + model = data.model + mu = model.mu() + tau = model.tau() + eta = model.eta() + obs = model.obs() + + assert mu in inference_data.posterior + assert tau in inference_data.posterior + assert eta in inference_data.posterior + assert obs in inference_data.posterior_predictive + + def test_inference_data_has_log_likelihood_and_observed_data(self, data): + idata = from_beanmachine(data.obj) + obs = data.model.obs() + + assert obs in idata.log_likelihood + assert obs in idata.observed_data + + def test_inference_data_no_posterior(self, data): + model = data.model + # only prior + inference_data = from_beanmachine(data.prior) + assert not model.obs() in inference_data.posterior + assert "observed_data" not in inference_data diff --git a/arviz/tests/helpers.py b/arviz/tests/helpers.py index 2ab42a191c..16364333f1 100644 --- a/arviz/tests/helpers.py +++ b/arviz/tests/helpers.py @@ -486,6 +486,52 @@ def pystan_noncentered_schools(data, draws, chains): return stan_model, fit +def bm_schools_model(data, draws, chains): + import beanmachine.ppl as bm + import torch + import torch.distributions as dist + + class EightSchools: + @bm.random_variable + def mu(self): + return dist.Normal(0, 5) + + @bm.random_variable + def tau(self): + return dist.HalfCauchy(5) + + @bm.random_variable + def eta(self): + return dist.Normal(0, 1).expand((data["J"],)) + + @bm.functional + def theta(self): + return self.mu() + self.tau() * self.eta() + + @bm.random_variable + def obs(self): + return dist.Normal(self.theta(), torch.from_numpy(data["sigma"]).float()) + + model = EightSchools() + + prior = bm.GlobalNoUTurnSampler().infer( + queries=[model.mu(), model.tau(), model.eta()], + observations={}, + num_samples=draws, + num_adaptive_samples=500, + num_chains=chains, + ) + + posterior = bm.GlobalNoUTurnSampler().infer( + queries=[model.mu(), model.tau(), model.eta()], + observations={model.obs(): torch.from_numpy(data["y"]).float()}, + num_samples=draws, + num_adaptive_samples=500, + num_chains=chains, + ) + return model, prior, posterior + + def library_handle(library): """Import a library and return the handle.""" if library == "pystan": @@ -506,6 +552,7 @@ def load_cached_models(eight_schools_data, draws, chains, libs=None): ("emcee", emcee_schools_model), ("pyro", pyro_noncentered_schools), ("numpyro", numpyro_schools_model), + ("beanmachine", bm_schools_model), ) data_directory = os.path.join(here, "saved_models") models = {} diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f708d65535..94bb16a5ad 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -20,5 +20,4 @@ jobs: - template: .azure-pipelines/azure-pipelines-base.yml - template: .azure-pipelines/azure-pipelines-external.yml - template: .azure-pipelines/azure-pipelines-benchmarks.yml - - template: .azure-pipelines/azure-pipelines-docs.yml - template: .azure-pipelines/azure-pipelines-wheel.yml diff --git a/doc/source/conf.py b/doc/source/conf.py index 0eb3077646..b69c9a1857 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -187,7 +187,7 @@ "name": "Mastodon", "url": "https://bayes.club/@ArviZ", "icon": "fa-brands fa-mastodon", - } + }, ], "navbar_start": ["navbar-logo", "navbar-version"], "header_links_before_dropdown": 7, diff --git a/doc/source/getting_started/ConversionGuideEmcee.ipynb b/doc/source/getting_started/ConversionGuideEmcee.ipynb index 386952490d..929a4b2fd6 100644 --- a/doc/source/getting_started/ConversionGuideEmcee.ipynb +++ b/doc/source/getting_started/ConversionGuideEmcee.ipynb @@ -80,14 +80,16 @@ " # Half-cauchy prior, hwhm=25\n", " if tau < 0:\n", " return -np.inf\n", - " prior_tau = -np.log(tau ** 2 + 25 ** 2)\n", - " prior_mu = -(mu / 10) ** 2 # normal prior, loc=0, scale=10\n", - " prior_eta = -np.sum(eta ** 2) # normal prior, loc=0, scale=1\n", + " prior_tau = -np.log(tau**2 + 25**2)\n", + " prior_mu = -((mu / 10) ** 2) # normal prior, loc=0, scale=10\n", + " prior_eta = -np.sum(eta**2) # normal prior, loc=0, scale=1\n", " return prior_mu + prior_tau + prior_eta\n", "\n", + "\n", "def log_likelihood_8school(theta, y, s):\n", " mu, tau, eta = theta[0], theta[1], theta[2:]\n", - " return -((mu + tau * eta - y) / s) ** 2\n", + " return -(((mu + tau * eta - y) / s) ** 2)\n", + "\n", "\n", "def lnprob_8school(theta, y, s):\n", " prior = log_prior_8school(theta)\n", @@ -108,12 +110,7 @@ "draws = 1500\n", "pos = np.random.normal(size=(nwalkers, ndim))\n", "pos[:, 1] = np.absolute(pos[:, 1])\n", - "sampler = emcee.EnsembleSampler(\n", - " nwalkers,\n", - " ndim,\n", - " lnprob_8school,\n", - " args=(y_obs, sigma)\n", - ")\n", + "sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_8school, args=(y_obs, sigma))\n", "sampler.run_mcmc(pos, draws);" ] }, @@ -5132,6 +5129,7 @@ " like = np.sum(like_vect)\n", " return like + prior, like_vect\n", "\n", + "\n", "sampler_blobs = emcee.EnsembleSampler(\n", " nwalkers,\n", " ndim,\n", @@ -7145,11 +7143,11 @@ "dims = {\"eta\": [\"school\"], \"log_likelihood\": [\"school\"]}\n", "idata3 = az.from_emcee(\n", " sampler_blobs,\n", - " var_names = [\"mu\", \"tau\", \"eta\"],\n", - " slices=[0, 1, slice(2,None)],\n", + " var_names=[\"mu\", \"tau\", \"eta\"],\n", + " slices=[0, 1, slice(2, None)],\n", " blob_names=[\"log_likelihood\"],\n", " dims=dims,\n", - " coords={\"school\": range(8)}\n", + " coords={\"school\": range(8)},\n", ")\n", "idata3" ] @@ -9981,25 +9979,26 @@ " # and posterior predictive samples as blobs\n", " return like + prior, (like_vect, np.random.normal((mu + tau * eta), sigma))\n", "\n", + "\n", "sampler_blobs = emcee.EnsembleSampler(\n", " nwalkers,\n", " ndim,\n", " lnprob_8school_blobs,\n", " args=(y_obs, sigma),\n", ")\n", - "sampler_blobs.run_mcmc(pos, draws);\n", + "sampler_blobs.run_mcmc(pos, draws)\n", "\n", "dims = {\"eta\": [\"school\"], \"log_likelihood\": [\"school\"], \"y\": [\"school\"]}\n", "idata4 = az.from_emcee(\n", " sampler_blobs,\n", - " var_names = [\"mu\", \"tau\", \"eta\"],\n", - " slices=[0, 1, slice(2,None)],\n", - " arg_names=[\"y\",\"sigma\"],\n", + " var_names=[\"mu\", \"tau\", \"eta\"],\n", + " slices=[0, 1, slice(2, None)],\n", + " arg_names=[\"y\", \"sigma\"],\n", " arg_groups=[\"observed_data\", \"constant_data\"],\n", " blob_names=[\"log_likelihood\", \"y\"],\n", " blob_groups=[\"log_likelihood\", \"posterior_predictive\"],\n", " dims=dims,\n", - " coords={\"school\": range(8)}\n", + " coords={\"school\": range(8)},\n", ")\n", "idata4" ] diff --git a/doc/source/getting_started/CreatingInferenceData.ipynb b/doc/source/getting_started/CreatingInferenceData.ipynb index 1fd306249b..8ee0f74707 100644 --- a/doc/source/getting_started/CreatingInferenceData.ipynb +++ b/doc/source/getting_started/CreatingInferenceData.ipynb @@ -4123,8 +4123,8 @@ "import pandas as pd\n", "import xarray as xr\n", "\n", - "data = np.random.rand(100,2)\n", - "df = pd.DataFrame({'a':data[:,0], 'b':data[:,1]})\n", + "data = np.random.rand(100, 2)\n", + "df = pd.DataFrame({\"a\": data[:, 0], \"b\": data[:, 1]})\n", "df[\"chain\"] = 0\n", "df[\"draw\"] = np.arange(len(df), dtype=int)\n", "df = df.set_index([\"chain\", \"draw\"])\n", @@ -10206,9 +10206,7 @@ " tau = pm.HalfCauchy(\"tau\", beta=5)\n", " theta_tilde = pm.Normal(\"theta_tilde\", mu=0, sd=1, shape=eight_school_data[\"J\"])\n", " theta = pm.Deterministic(\"theta\", mu + tau * theta_tilde)\n", - " pm.Normal(\n", - " \"obs\", mu=theta, sd=eight_school_data[\"sigma\"], observed=eight_school_data[\"y\"]\n", - " )\n", + " pm.Normal(\"obs\", mu=theta, sd=eight_school_data[\"sigma\"], observed=eight_school_data[\"y\"])\n", "\n", " trace = pm.sample(draws, chains=chains)\n", " prior = pm.sample_prior_predictive()\n", @@ -16374,9 +16372,7 @@ "posterior_predictive = Predictive(model, posterior_samples)(\n", " eight_school_data[\"J\"], eight_school_data[\"sigma\"]\n", ")\n", - "prior = Predictive(model, num_samples=500)(\n", - " eight_school_data[\"J\"], eight_school_data[\"sigma\"]\n", - ")\n", + "prior = Predictive(model, num_samples=500)(eight_school_data[\"J\"], eight_school_data[\"sigma\"])\n", "\n", "pyro_data = az.from_pyro(\n", " mcmc,\n", @@ -24833,17 +24829,13 @@ " # use non-centered reparameterization\n", " theta = numpyro.sample(\n", " \"theta\",\n", - " dist.TransformedDistribution(\n", - " dist.Normal(np.zeros(J), 1), AffineTransform(mu, tau)\n", - " ),\n", + " dist.TransformedDistribution(dist.Normal(np.zeros(J), 1), AffineTransform(mu, tau)),\n", " )\n", " numpyro.sample(\"y\", dist.Normal(theta, sigma), obs=y)\n", "\n", "\n", "kernel = NUTS(model)\n", - "mcmc = MCMC(\n", - " kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method=\"parallel\"\n", - ")\n", + "mcmc = MCMC(kernel, num_warmup=500, num_samples=500, num_chains=4, chain_method=\"parallel\")\n", "mcmc.run(PRNGKey(0), **eight_school_data, extra_fields=[\"num_steps\", \"energy\"])\n", "posterior_samples = mcmc.get_samples()\n", "posterior_predictive = Predictive(model, posterior_samples)(\n", @@ -24916,7 +24908,7 @@ }, "outputs": [], "source": [ - "eight_school_prior_model_code = ''' \n", + "eight_school_prior_model_code = \"\"\" \n", "model {\n", " mu ~ dnorm(0.0, 1.0/25)\n", " tau ~ dt(0.0, 1.0/25, 1.0) T(0, )\n", @@ -24924,7 +24916,7 @@ " theta_tilde[j] ~ dnorm(0.0, 1.0)\n", " }\n", "}\n", - "'''" + "\"\"\"" ] }, { @@ -24945,7 +24937,7 @@ }, "outputs": [], "source": [ - "eight_school_posterior_model_code = ''' \n", + "eight_school_posterior_model_code = \"\"\" \n", "model {\n", " mu ~ dnorm(0.0, 1.0/25)\n", " tau ~ dt(0.0, 1.0/25, 1.0) T(0, )\n", @@ -24955,7 +24947,7 @@ " log_like[j] = logdensity.norm(y[j], mu + tau * theta_tilde[j], 1.0/(sigma[j]^2))\n", " }\n", "}\n", - "'''" + "\"\"\"" ] }, { @@ -24969,8 +24961,8 @@ }, "outputs": [], "source": [ - "parameters = ['mu', 'tau', 'theta_tilde']\n", - "variables = parameters + ['log_like']" + "parameters = [\"mu\", \"tau\", \"theta_tilde\"]\n", + "variables = parameters + [\"log_like\"]" ] }, { @@ -24999,11 +24991,7 @@ "outputs": [], "source": [ "jags_prior_model = pyjags.Model(\n", - " code=eight_school_prior_model_code, \n", - " data={\"J\": 8}, \n", - " chains=4, \n", - " threads=4,\n", - " chains_per_thread=1\n", + " code=eight_school_prior_model_code, data={\"J\": 8}, chains=4, threads=4, chains_per_thread=1\n", ")" ] }, @@ -25034,11 +25022,11 @@ ], "source": [ "jags_posterior_model = pyjags.Model(\n", - " code=eight_school_posterior_model_code, \n", - " data=eight_school_data, \n", - " chains=4, \n", + " code=eight_school_posterior_model_code,\n", + " data=eight_school_data,\n", + " chains=4,\n", " threads=4,\n", - " chains_per_thread=1\n", + " chains_per_thread=1,\n", ")" ] }, @@ -27887,11 +27875,11 @@ ], "source": [ "pyjags_data = az.from_pyjags(\n", - " posterior=jags_posterior_samples, \n", - " prior=jags_prior_samples, \n", - " log_likelihood={'y': 'log_like'}, \n", - " save_warmup=True, \n", - " warmup_iterations=1000\n", + " posterior=jags_posterior_samples,\n", + " prior=jags_prior_samples,\n", + " log_likelihood={\"y\": \"log_like\"},\n", + " save_warmup=True,\n", + " warmup_iterations=1000,\n", ")\n", "pyjags_data" ] diff --git a/doc/source/getting_started/Introduction.ipynb b/doc/source/getting_started/Introduction.ipynb index daa31d34b1..9e03889ffe 100644 --- a/doc/source/getting_started/Introduction.ipynb +++ b/doc/source/getting_started/Introduction.ipynb @@ -127,7 +127,7 @@ "metadata": {}, "outputs": [], "source": [ - "az.rcParams['stats.hdi_prob'] = 0.90" + "az.rcParams[\"stats.hdi_prob\"] = 0.90" ] }, { @@ -3726,7 +3726,7 @@ } ], "source": [ - "az.plot_trace(data,compact=False);" + "az.plot_trace(data, compact=False);" ] }, { @@ -3745,6 +3745,7 @@ "outputs": [], "source": [ "import nest_asyncio\n", + "\n", "nest_asyncio.apply()" ] }, diff --git a/doc/source/getting_started/WorkingWithInferenceData.ipynb b/doc/source/getting_started/WorkingWithInferenceData.ipynb index ea98e68366..e4c4d81f3d 100644 --- a/doc/source/getting_started/WorkingWithInferenceData.ipynb +++ b/doc/source/getting_started/WorkingWithInferenceData.ipynb @@ -20,6 +20,7 @@ "import arviz as az\n", "import numpy as np\n", "import xarray as xr\n", + "\n", "xr.set_options(display_expand_data=False, display_expand_attrs=False);" ] }, @@ -12611,7 +12612,7 @@ } ], "source": [ - "post.mean(dim=['chain', 'draw'])" + "post.mean(dim=[\"chain\", \"draw\"])" ] }, { @@ -12638,7 +12639,7 @@ "metadata": {}, "outputs": [], "source": [ - "post[\"mlogtau\"] = post[\"log_tau\"].rolling({'draw': 50}).mean()" + "post[\"mlogtau\"] = post[\"log_tau\"].rolling({\"draw\": 50}).mean()" ] }, { @@ -12665,7 +12666,7 @@ "metadata": {}, "outputs": [], "source": [ - "post['theta_school_diff'] = post.theta - post.theta.rename(school=\"school_bis\")" + "post[\"theta_school_diff\"] = post.theta - post.theta.rename(school=\"school_bis\")" ] }, { @@ -13569,7 +13570,7 @@ } ], "source": [ - "post['theta_school_diff'].sel(school=\"Choate\", school_bis=\"Deerfield\")" + "post[\"theta_school_diff\"].sel(school=\"Choate\", school_bis=\"Deerfield\")" ] }, { @@ -13997,8 +13998,10 @@ ], "source": [ "school_idx = xr.DataArray([\"Choate\", \"Hotchkiss\", \"Mt. Hermon\"], dims=[\"pairwise_school_diff\"])\n", - "school_bis_idx = xr.DataArray([\"Deerfield\", \"Choate\", \"Lawrenceville\"], dims=[\"pairwise_school_diff\"])\n", - "post['theta_school_diff'].sel(school=school_idx, school_bis=school_bis_idx)" + "school_bis_idx = xr.DataArray(\n", + " [\"Deerfield\", \"Choate\", \"Lawrenceville\"], dims=[\"pairwise_school_diff\"]\n", + ")\n", + "post[\"theta_school_diff\"].sel(school=school_idx, school_bis=school_bis_idx)" ] }, { @@ -14433,9 +14436,9 @@ } ], "source": [ - "post['theta_school_diff'].sel(\n", - " school=[\"Choate\", \"Hotchkiss\", \"Mt. Hermon\"], \n", - " school_bis=[\"Deerfield\", \"Choate\", \"Lawrenceville\"]\n", + "post[\"theta_school_diff\"].sel(\n", + " school=[\"Choate\", \"Hotchkiss\", \"Mt. Hermon\"],\n", + " school_bis=[\"Deerfield\", \"Choate\", \"Lawrenceville\"],\n", ")" ] }, @@ -14460,7 +14463,11 @@ "metadata": {}, "outputs": [], "source": [ - "idata_rerun = idata.sel(chain=[0, 1]).copy().assign_coords(coords={\"chain\":[4,5]},groups=\"posterior_groups\")" + "idata_rerun = (\n", + " idata.sel(chain=[0, 1])\n", + " .copy()\n", + " .assign_coords(coords={\"chain\": [4, 5]}, groups=\"posterior_groups\")\n", + ")" ] }, { @@ -17331,9 +17338,9 @@ "source": [ "rng = np.random.default_rng(3)\n", "idata.add_groups(\n", - " {\"predictions\": {\"obs\": rng.normal(size=(4, 500, 2))}}, \n", - " dims={\"obs\": [\"new_school\"]}, \n", - " coords={\"new_school\": [\"Essex College\", \"Moordale\"]}\n", + " {\"predictions\": {\"obs\": rng.normal(size=(4, 500, 2))}},\n", + " dims={\"obs\": [\"new_school\"]},\n", + " coords={\"new_school\": [\"Essex College\", \"Moordale\"]},\n", ")\n", "idata" ] diff --git a/doc/source/schema/PyMC3_schema_example.ipynb b/doc/source/schema/PyMC3_schema_example.ipynb index 802494b456..4f002225af 100644 --- a/doc/source/schema/PyMC3_schema_example.ipynb +++ b/doc/source/schema/PyMC3_schema_example.ipynb @@ -21,6 +21,7 @@ "import pandas as pd\n", "import numpy as np\n", "import xarray\n", + "\n", "xarray.set_options(display_style=\"html\");" ] }, @@ -105,7 +106,7 @@ } ], "source": [ - "#read data\n", + "# read data\n", "data = pd.read_csv(\"linear_regression_data.csv\", index_col=0)\n", "time = data.time.values\n", "slack_comments = data.comments.values\n", @@ -210,24 +211,28 @@ } ], "source": [ - "dims={\n", + "dims = {\n", " \"slack_comments\": [\"developer\"],\n", " \"github_commits\": [\"developer\"],\n", " \"time_since_joined\": [\"developer\"],\n", "}\n", "with pm.Model() as model:\n", " time_since_joined = pm.Data(\"time_since_joined\", time)\n", - " \n", - " b_sigma = pm.HalfNormal('b_sigma', sd=300)\n", - " c_sigma = pm.HalfNormal('c_sigma', sd=6)\n", + "\n", + " b_sigma = pm.HalfNormal(\"b_sigma\", sd=300)\n", + " c_sigma = pm.HalfNormal(\"c_sigma\", sd=6)\n", " b0 = pm.Normal(\"b0\", mu=0, sd=200)\n", " b1 = pm.Normal(\"b1\", mu=0, sd=200)\n", " c0 = pm.Normal(\"c0\", mu=0, sd=10)\n", " c1 = pm.Normal(\"c1\", mu=0, sd=10)\n", - " \n", - " pm.Normal(\"slack_comments\", mu=b0 + b1 * time_since_joined, sigma=b_sigma, observed=slack_comments)\n", - " pm.Normal(\"github_commits\", mu=c0 + c1 * time_since_joined, sigma=c_sigma, observed=github_commits)\n", - " \n", + "\n", + " pm.Normal(\n", + " \"slack_comments\", mu=b0 + b1 * time_since_joined, sigma=b_sigma, observed=slack_comments\n", + " )\n", + " pm.Normal(\n", + " \"github_commits\", mu=c0 + c1 * time_since_joined, sigma=c_sigma, observed=github_commits\n", + " )\n", + "\n", " trace = pm.sample(400, chains=4)\n", " posterior_predictive = pm.sample_posterior_predictive(trace)\n", " prior = pm.sample_prior_predictive(150)\n", @@ -236,7 +241,7 @@ " prior=prior,\n", " posterior_predictive=posterior_predictive,\n", " coords={\"developer\": names},\n", - " dims=dims\n", + " dims=dims,\n", " )" ] }, @@ -276,7 +281,7 @@ } ], "source": [ - "dims_pred={\n", + "dims_pred = {\n", " \"slack_comments\": [\"candidate developer\"],\n", " \"github_commits\": [\"candidate developer\"],\n", " \"time_since_joined\": [\"candidate developer\"],\n", @@ -285,8 +290,8 @@ " pm.set_data({\"time_since_joined\": candidate_devs_time})\n", " predictions = pm.sample_posterior_predictive(trace)\n", " az.from_pymc3_predictions(\n", - " predictions, \n", - " idata_orig=idata_pymc3, \n", + " predictions,\n", + " idata_orig=idata_pymc3,\n", " inplace=True,\n", " coords={\"candidate developer\": candidate_devs},\n", " dims=dims_pred,\n", diff --git a/doc/source/schema/PyStan_schema_example.ipynb b/doc/source/schema/PyStan_schema_example.ipynb index f3d483b706..9bcb66e494 100644 --- a/doc/source/schema/PyStan_schema_example.ipynb +++ b/doc/source/schema/PyStan_schema_example.ipynb @@ -21,6 +21,7 @@ "import pandas as pd\n", "import numpy as np\n", "import xarray\n", + "\n", "xarray.set_options(display_style=\"html\");" ] }, @@ -105,7 +106,7 @@ } ], "source": [ - "#read data\n", + "# read data\n", "data = pd.read_csv(\"linear_regression_data.csv\", index_col=0)\n", "time_since_joined = data.time.values\n", "slack_comments = data.comments.values\n", @@ -187,7 +188,9 @@ "outputs": [], "source": [ "linreg_prior_data_dict = {\"N\": N, \"time_since_joined\": time_since_joined}\n", - "prior = sm_prior.sampling(data=linreg_prior_data_dict, iter=150, chains=1, algorithm='Fixed_param', warmup=0)" + "prior = sm_prior.sampling(\n", + " data=linreg_prior_data_dict, iter=150, chains=1, algorithm=\"Fixed_param\", warmup=0\n", + ")" ] }, { @@ -289,7 +292,14 @@ } ], "source": [ - "linreg_data_dict = {\"N\": N, \"slack_comments\": slack_comments, \"github_commits\": github_commits, \"time_since_joined\": time_since_joined, \"N_pred\" : N_pred, \"time_since_joined_pred\" : candidate_devs_time}\n", + "linreg_data_dict = {\n", + " \"N\": N,\n", + " \"slack_comments\": slack_comments,\n", + " \"github_commits\": github_commits,\n", + " \"time_since_joined\": time_since_joined,\n", + " \"N_pred\": N_pred,\n", + " \"time_since_joined_pred\": candidate_devs_time,\n", + "}\n", "posterior = sm.sampling(data=linreg_data_dict, iter=200, chains=4)" ] }, @@ -302,27 +312,27 @@ "idata_stan = az.from_pystan(\n", " posterior=posterior,\n", " prior=prior,\n", - " posterior_predictive=[\"slack_comments_hat\",\"github_commits_hat\"],\n", - " prior_predictive=[\"slack_comments_hat\",\"github_commits_hat\"],\n", - " observed_data=[\"slack_comments\",\"github_commits\"],\n", + " posterior_predictive=[\"slack_comments_hat\", \"github_commits_hat\"],\n", + " prior_predictive=[\"slack_comments_hat\", \"github_commits_hat\"],\n", + " observed_data=[\"slack_comments\", \"github_commits\"],\n", " constant_data=[\"time_since_joined\"],\n", " log_likelihood={\n", " \"slack_comments\": \"log_likelihood_slack_comments\",\n", - " \"github_commits\": \"log_likelihood_github_commits\"\n", + " \"github_commits\": \"log_likelihood_github_commits\",\n", " },\n", " predictions=[\"slack_comments_pred\", \"github_commits_pred\"],\n", " predictions_constant_data=[\"time_since_joined_pred\"],\n", - " coords={\"developer\": names, \"candidate developer\" : candidate_devs},\n", + " coords={\"developer\": names, \"candidate developer\": candidate_devs},\n", " dims={\n", " \"slack_comments\": [\"developer\"],\n", - " \"github_commits\" : [\"developer\"],\n", + " \"github_commits\": [\"developer\"],\n", " \"slack_comments_hat\": [\"developer\"],\n", " \"github_commits_hat\": [\"developer\"],\n", " \"time_since_joined\": [\"developer\"],\n", - " \"slack_comments_pred\" : [\"candidate developer\"],\n", - " \"github_commits_pred\" : [\"candidate developer\"],\n", - " \"time_since_joined_pred\" : [\"candidate developer\"],\n", - " }\n", + " \"slack_comments_pred\": [\"candidate developer\"],\n", + " \"github_commits_pred\": [\"candidate developer\"],\n", + " \"time_since_joined_pred\": [\"candidate developer\"],\n", + " },\n", ")" ] }, diff --git a/doc/source/user_guide/Dask.ipynb b/doc/source/user_guide/Dask.ipynb index d943644fad..8b7eae3307 100644 --- a/doc/source/user_guide/Dask.ipynb +++ b/doc/source/user_guide/Dask.ipynb @@ -1655,6 +1655,7 @@ "\n", "from bokeh.resources import INLINE\n", "import bokeh.io\n", + "\n", "bokeh.io.output_notebook(INLINE)\n", "\n", "%reload_ext memory_profiler" @@ -2304,7 +2305,7 @@ } ], "source": [ - "daskdata.visualize() # Each chunk will follow lazy evaluation " + "daskdata.visualize() # Each chunk will follow lazy evaluation" ] }, { @@ -4234,7 +4235,7 @@ "%%time\n", "%%memit\n", "\n", - "rhat = az.rhat(idata_dask, dask_kwargs={\"output_dtypes\": [float]})\n", + "rhat = az.rhat(idata_dask, dask_kwargs={\"output_dtypes\": [float]})\n", "\n", "with ResourceProfiler(dt=0.25) as rprof:\n", " rhat.compute()" @@ -4331,7 +4332,7 @@ "%%time\n", "%%memit\n", "\n", - "az.hdi(idata_numpy, hdi_prob=.68)" + "az.hdi(idata_numpy, hdi_prob=0.68)" ] }, { diff --git a/doc/source/user_guide/numpyro_refitting.ipynb b/doc/source/user_guide/numpyro_refitting.ipynb index 6695eff684..92ac8e0388 100644 --- a/doc/source/user_guide/numpyro_refitting.ipynb +++ b/doc/source/user_guide/numpyro_refitting.ipynb @@ -127,11 +127,7 @@ "}\n", "kernel = NUTS(model)\n", "sample_kwargs = dict(\n", - " sampler=kernel, \n", - " num_warmup=1000, \n", - " num_samples=1000, \n", - " num_chains=4, \n", - " chain_method=\"parallel\"\n", + " sampler=kernel, num_warmup=1000, num_samples=1000, num_chains=4, chain_method=\"parallel\"\n", ")\n", "mcmc = MCMC(**sample_kwargs)\n", "mcmc.run(random.PRNGKey(0), **data_dict)" @@ -2496,10 +2492,7 @@ ], "source": [ "dims = {\"y\": [\"time\"], \"x\": [\"time\"]}\n", - "idata_kwargs = {\n", - " \"dims\": dims,\n", - " \"constant_data\": {\"x\": xdata}\n", - "}\n", + "idata_kwargs = {\"dims\": dims, \"constant_data\": {\"x\": xdata}}\n", "idata = az.from_numpyro(mcmc, **idata_kwargs)\n", "idata" ] @@ -2524,21 +2517,18 @@ "outputs": [], "source": [ "class NumPyroSamplingWrapper(az.SamplingWrapper):\n", - " def __init__(self, model, **kwargs): \n", + " def __init__(self, model, **kwargs):\n", " self.model_fun = model.sampler.model\n", " self.rng_key = kwargs.pop(\"rng_key\", random.PRNGKey(0))\n", - " \n", + "\n", " super(NumPyroSamplingWrapper, self).__init__(model, **kwargs)\n", - " \n", + "\n", " def log_likelihood__i(self, excluded_obs, idata__i):\n", " samples = {\n", " key: values.values.reshape((-1, *values.values.shape[2:]))\n", - " for key, values \n", - " in idata__i.posterior.items()\n", + " for key, values in idata__i.posterior.items()\n", " }\n", - " log_likelihood_dict = numpyro.infer.log_likelihood(\n", - " self.model_fun, samples, **excluded_obs\n", - " )\n", + " log_likelihood_dict = numpyro.infer.log_likelihood(self.model_fun, samples, **excluded_obs)\n", " if len(log_likelihood_dict) > 1:\n", " raise ValueError(\"multiple likelihoods found\")\n", " data = {}\n", @@ -2548,7 +2538,7 @@ " shape = (nchains, ndraws) + log_like.shape[1:]\n", " data[obs_name] = np.reshape(log_like.copy(), shape)\n", " return az.dict_to_dataset(data)[obs_name]\n", - " \n", + "\n", " def sample(self, modified_observed_data):\n", " self.rng_key, subkey = random.split(self.rng_key)\n", " mcmc = MCMC(**self.sample_kwargs)\n", @@ -2559,7 +2549,8 @@ " # Cloned from PyStanSamplingWrapper.\n", " idata = az.from_numpyro(mcmc, **self.idata_kwargs)\n", " return idata\n", - " \n", + "\n", + "\n", "class LinRegWrapper(NumPyroSamplingWrapper):\n", " def sel_observations(self, idx):\n", " xdata = self.idata_orig.constant_data[\"x\"].values\n", @@ -2639,11 +2630,11 @@ "outputs": [], "source": [ "numpyro_wrapper = LinRegWrapper(\n", - " mcmc, \n", + " mcmc,\n", " rng_key=random.PRNGKey(5),\n", - " idata_orig=idata, \n", - " sample_kwargs=sample_kwargs, \n", - " idata_kwargs=idata_kwargs\n", + " idata_orig=idata,\n", + " sample_kwargs=sample_kwargs,\n", + " idata_kwargs=idata_kwargs,\n", ")" ] }, diff --git a/doc/source/user_guide/numpyro_refitting_xr_lik.ipynb b/doc/source/user_guide/numpyro_refitting_xr_lik.ipynb index 8143794adb..bc36570e15 100644 --- a/doc/source/user_guide/numpyro_refitting_xr_lik.ipynb +++ b/doc/source/user_guide/numpyro_refitting_xr_lik.ipynb @@ -127,11 +127,7 @@ "}\n", "kernel = NUTS(model)\n", "sample_kwargs = dict(\n", - " sampler=kernel, \n", - " num_warmup=1000, \n", - " num_samples=1000, \n", - " num_chains=4, \n", - " chain_method=\"parallel\"\n", + " sampler=kernel, num_warmup=1000, num_samples=1000, num_chains=4, chain_method=\"parallel\"\n", ")\n", "mcmc = MCMC(**sample_kwargs)\n", "mcmc.run(random.PRNGKey(0), **data_dict)" @@ -2078,10 +2074,7 @@ ], "source": [ "dims = {\"y\": [\"time\"], \"x\": [\"time\"]}\n", - "idata_kwargs = {\n", - " \"dims\": dims,\n", - " \"constant_data\": {\"x\": xdata}\n", - "}\n", + "idata_kwargs = {\"dims\": dims, \"constant_data\": {\"x\": xdata}}\n", "idata = az.from_numpyro(mcmc, **idata_kwargs)\n", "del idata.log_likelihood\n", "idata" @@ -2170,7 +2163,7 @@ " idata.observed_data[\"y\"].values,\n", " idata.posterior[\"b0\"].values,\n", " idata.posterior[\"b1\"].values,\n", - " idata.posterior[\"sigma_e\"].values\n", + " idata.posterior[\"sigma_e\"].values,\n", ")" ] }, @@ -4554,11 +4547,11 @@ "outputs": [], "source": [ "class NumPyroSamplingWrapper(az.SamplingWrapper):\n", - " def __init__(self, model, **kwargs): \n", + " def __init__(self, model, **kwargs):\n", " self.rng_key = kwargs.pop(\"rng_key\", random.PRNGKey(0))\n", - " \n", + "\n", " super(NumPyroSamplingWrapper, self).__init__(model, **kwargs)\n", - " \n", + "\n", " def sample(self, modified_observed_data):\n", " self.rng_key, subkey = random.split(self.rng_key)\n", " mcmc = MCMC(**self.sample_kwargs)\n", @@ -4569,7 +4562,8 @@ " # Cloned from PyStanSamplingWrapper.\n", " idata = az.from_numpyro(mcmc, **self.idata_kwargs)\n", " return idata\n", - " \n", + "\n", + "\n", "class LinRegWrapper(NumPyroSamplingWrapper):\n", " def sel_observations(self, idx):\n", " xdata = self.idata_orig.constant_data[\"x\"]\n", @@ -4579,7 +4573,7 @@ " # data_ex is passed to apply_ufunc -> list of DataArray\n", " data__i = {\"x\": xdata[~mask].values, \"y\": ydata[~mask].values, \"N\": len(ydata[~mask])}\n", " data_ex = [xdata[mask], ydata[mask]]\n", - " return data__i, data_ex\n" + " return data__i, data_ex" ] }, { @@ -4655,13 +4649,13 @@ "outputs": [], "source": [ "pystan_wrapper = LinRegWrapper(\n", - " mcmc, \n", + " mcmc,\n", " rng_key=random.PRNGKey(7),\n", - " log_lik_fun=calculate_log_lik, \n", + " log_lik_fun=calculate_log_lik,\n", " posterior_vars=(\"b0\", \"b1\", \"sigma_e\"),\n", - " idata_orig=idata, \n", - " sample_kwargs=sample_kwargs, \n", - " idata_kwargs=idata_kwargs\n", + " idata_orig=idata,\n", + " sample_kwargs=sample_kwargs,\n", + " idata_kwargs=idata_kwargs,\n", ")" ] }, diff --git a/doc/source/user_guide/pymc_refitting.ipynb b/doc/source/user_guide/pymc_refitting.ipynb index 36eb5e411d..dc6ec84144 100644 --- a/doc/source/user_guide/pymc_refitting.ipynb +++ b/doc/source/user_guide/pymc_refitting.ipynb @@ -86,12 +86,10 @@ "with pm.Model() as linreg_model:\n", " # optional: add coords to \"time\" dimension\n", " linreg_model.add_coord(\"time\", np.arange(len(xdata)), mutable=True)\n", - " \n", + "\n", " x = pm.MutableData(\"x\", xdata, dims=\"time\")\n", " y_obs = pm.MutableData(\"y_obs\", ydata, dims=\"time\")\n", - " \n", - " \n", - " \n", + "\n", " b0 = pm.Normal(\"b0\", 0, 10)\n", " b1 = pm.Normal(\"b1\", 0, 10)\n", " sigma_e = pm.HalfNormal(\"sigma_e\", 10)\n", @@ -192,8 +190,8 @@ "from scipy import stats\n", "from xarray_einstats.stats import XrContinuousRV\n", "\n", + "\n", "class PyMCLinRegWrapper(az.PyMCSamplingWrapper):\n", - " \n", " def sample(self, modified_observed_data):\n", " with self.model:\n", " # if the model had coords the dim needs to be updated before\n", @@ -201,13 +199,13 @@ " # otherwise, we don't need to overwrite the sample method\n", " n__i = len(modified_observed_data[\"x\"])\n", " self.model.set_dim(\"time\", n__i, coord_values=np.arange(n__i))\n", - " \n", + "\n", " pm.set_data(modified_observed_data)\n", " idata = pm.sample(\n", " **self.sample_kwargs,\n", " )\n", " return idata\n", - " \n", + "\n", " def log_likelihood__i(self, excluded_observed_data, idata__i):\n", " post = idata__i.posterior\n", " dist = XrContinuousRV(\n", @@ -216,7 +214,7 @@ " post[\"sigma_e\"],\n", " )\n", " return dist.logpdf(excluded_observed_data[\"y_obs\"])\n", - " \n", + "\n", " def sel_observations(self, idx):\n", " xdata = self.idata_orig[\"constant_data\"][\"x\"]\n", " ydata = self.idata_orig[\"observed_data\"][\"y\"]\n", @@ -289,9 +287,7 @@ "metadata": {}, "outputs": [], "source": [ - "pymc_wrapper = PyMCLinRegWrapper(\n", - " model=linreg_model, idata_orig=idata, sample_kwargs=sample_kwargs\n", - ")" + "pymc_wrapper = PyMCLinRegWrapper(model=linreg_model, idata_orig=idata, sample_kwargs=sample_kwargs)" ] }, { diff --git a/doc/source/user_guide/pystan_refitting.ipynb b/doc/source/user_guide/pystan_refitting.ipynb index 664832dcf9..7b2f340fc9 100644 --- a/doc/source/user_guide/pystan_refitting.ipynb +++ b/doc/source/user_guide/pystan_refitting.ipynb @@ -39,6 +39,7 @@ "source": [ "# enable PyStan on Jupyter IDE\n", "import nest_asyncio\n", + "\n", "nest_asyncio.apply()" ] }, diff --git a/doc/source/user_guide/wrappers/cmdstanpy_refitting.ipynb b/doc/source/user_guide/wrappers/cmdstanpy_refitting.ipynb index 08ecf3272a..f2bbfdd021 100644 --- a/doc/source/user_guide/wrappers/cmdstanpy_refitting.ipynb +++ b/doc/source/user_guide/wrappers/cmdstanpy_refitting.ipynb @@ -306,7 +306,9 @@ " \"log_likelihood\": [\"log_lik\"],\n", " \"dims\": dims,\n", "}\n", - "idata = az.from_cmdstanpy(posterior=fit, observed_data={\"y\": ydata}, constant_data={\"x\": xdata}, **idata_kwargs)" + "idata = az.from_cmdstanpy(\n", + " posterior=fit, observed_data={\"y\": ydata}, constant_data={\"x\": xdata}, **idata_kwargs\n", + ")" ] }, { @@ -409,7 +411,7 @@ " idata_orig=idata,\n", " data_file=\"linreg_ex_data.json\",\n", " sample_kwargs=sample_kwargs,\n", - " idata_kwargs=idata_kwargs\n", + " idata_kwargs=idata_kwargs,\n", ")" ] }, diff --git a/doc/source/user_guide/wrappers/sbc_cmdstanpy.ipynb b/doc/source/user_guide/wrappers/sbc_cmdstanpy.ipynb index a3b34c4a75..4f3e605740 100644 --- a/doc/source/user_guide/wrappers/sbc_cmdstanpy.ipynb +++ b/doc/source/user_guide/wrappers/sbc_cmdstanpy.ipynb @@ -33,7 +33,7 @@ "metadata": {}, "outputs": [], "source": [ - "#read data\n", + "# read data\n", "data = pd.read_csv(\"../../schema/linear_regression_data.csv\", index_col=0)\n", "time_since_joined = data.time.values\n", "slack_comments = data.comments.values\n", @@ -2261,24 +2261,24 @@ ], "source": [ "idata_kwargs = dict(\n", - " prior_predictive=[\"slack_comments_hat\",\"github_commits_hat\"],\n", - " posterior_predictive=[\"slack_comments_hat\",\"github_commits_hat\"],\n", + " prior_predictive=[\"slack_comments_hat\", \"github_commits_hat\"],\n", + " posterior_predictive=[\"slack_comments_hat\", \"github_commits_hat\"],\n", " log_likelihood={\n", " \"slack_comments\": \"log_likelihood_slack_comments\",\n", - " \"github_commits\": \"log_likelihood_github_commits\"\n", + " \"github_commits\": \"log_likelihood_github_commits\",\n", " },\n", " coords={\"developer\": names},\n", " dims={\n", " \"slack_comments\": [\"developer\"],\n", - " \"github_commits\" : [\"developer\"],\n", + " \"github_commits\": [\"developer\"],\n", " \"slack_comments_hat\": [\"developer\"],\n", " \"github_commits_hat\": [\"developer\"],\n", " \"time_since_joined\": [\"developer\"],\n", - " }\n", + " },\n", + ")\n", + "idata_orig = az.from_cmdstanpy(prior=prior_fit, **idata_kwargs).stack(\n", + " prior_draw=[\"chain\", \"draw\"], groups=\"prior_groups\"\n", ")\n", - "idata_orig = az.from_cmdstanpy(\n", - " prior=prior_fit, **idata_kwargs\n", - ").stack(prior_draw=[\"chain\", \"draw\"], groups=\"prior_groups\")\n", "idata_orig" ] }, @@ -2300,7 +2300,7 @@ "outputs": [], "source": [ "linreg_base_data = {\n", - " \"N\": N, \n", + " \"N\": N,\n", " \"time_since_joined\": time_since_joined,\n", "}" ] @@ -18316,7 +18316,7 @@ " data_dict = {\n", " \"slack_comments\": data_s[\"slack_comments_hat\"].values,\n", " \"github_commits\": data_s[\"github_commits_hat\"].values,\n", - " **linreg_base_data\n", + " **linreg_base_data,\n", " }\n", " write_stan_json(\"linreg_data.json\", data_dict)\n", " fit = model.sample(data=\"linreg_data.json\")\n", diff --git a/setup.py b/setup.py index 4be9334510..b1224692ea 100644 --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ def get_version(): long_description=get_long_description(), long_description_content_type="text/markdown", include_package_data=True, - python_requires='>=3.8', + python_requires=">=3.8", classifiers=[ "Development Status :: 4 - Beta", "Framework :: Matplotlib",