|
| 1 | +# Compare ESS/sec using binomial model |
| 2 | +# and simulated datasets based on smaller, larger number of observations. |
| 3 | + |
| 4 | + |
| 5 | +import os |
| 6 | +import numpy as np |
| 7 | +import pandas as pd |
| 8 | +from typing import Any, Dict, List, Tuple |
| 9 | +from cmdstanpy import CmdStanModel |
| 10 | + |
| 11 | +import logging |
| 12 | +cmdstanpy_logger = logging.getLogger("cmdstanpy") |
| 13 | +cmdstanpy_logger.setLevel(logging.ERROR) |
| 14 | + |
| 15 | +import warnings |
| 16 | +warnings.filterwarnings('ignore') |
| 17 | + |
| 18 | +# Fit model and dataset for N iterations. |
| 19 | +# For each run, save wall clock time and effective samples / second (N_Eff/sec) |
| 20 | +# Return np.ndarray of size (N,2) with timing information. |
| 21 | +def time_fits(N: int, model: CmdStanModel, data: dict) -> np.ndarray: |
| 22 | + fit_times = np.ndarray(shape=(N, 2), dtype=float) |
| 23 | + for i in range(N): |
| 24 | + fit = model.sample(data=data, parallel_chains=4, |
| 25 | + show_progress=False, show_console=False, refresh=10_000) |
| 26 | + times = fit.time |
| 27 | + fit_summary = fit.summary() |
| 28 | + total_time = 0 |
| 29 | + for j in range(len(times)): |
| 30 | + total_time += times[j]['total'] |
| 31 | + |
| 32 | + fit_times[i, 0] = total_time |
| 33 | + fit_times[i, 1] = fit_summary.loc['lp__', 'ESS_bulk']/total_time |
| 34 | + return fit_times |
| 35 | + |
| 36 | + |
| 37 | +# Given a list of label, time pairs, populate dataframe |
| 38 | +# of means of time and std dev wall clock time, and N_Eff/sec |
| 39 | +def summarize_times(data_pairs: List[Tuple[str, np.ndarray]]) -> pd.DataFrame: |
| 40 | + result_data = [] |
| 41 | + for label, array in data_pairs: |
| 42 | + result_data.append({ |
| 43 | + 'label': label, |
| 44 | + 'mean': np.mean(array, axis=0)[0], |
| 45 | + 'std dev': np.std(array, axis=0)[0], |
| 46 | + 'ESS/sec': np.mean(array, axis=0)[1] |
| 47 | + }) |
| 48 | + df = pd.DataFrame(result_data) |
| 49 | + return df.set_index('label').round(2) |
| 50 | + |
| 51 | + |
| 52 | +# Runs data-generating model and returns a Dict containing simulated data and metadata. |
| 53 | +def simulate_data( |
| 54 | + N_eth: int, N_edu: int, N_age: int, |
| 55 | + baseline: float, sens: float, spec: float, |
| 56 | + N_obs_per_stratum: int, *, seed: int = 345678 |
| 57 | + ) -> Dict[str, Any]: |
| 58 | + |
| 59 | + N = 2 * N_eth * N_edu * N_age * N_obs_per_stratum |
| 60 | + |
| 61 | + inputs_dict = { |
| 62 | + 'N': N, 'N_eth': N_eth, 'N_edu': N_edu, 'N_age': N_age, |
| 63 | + 'baseline': baseline, 'sens': sens, 'spec': spec |
| 64 | + } |
| 65 | + datagen_mod = CmdStanModel(stan_file=os.path.join('stan', 'gen_binomial_4_preds.stan')) |
| 66 | + sim_data = datagen_mod.sample(data=inputs_dict, iter_warmup=1, iter_sampling=1, chains=1, |
| 67 | + show_progress=False, show_console=False, refresh=10_000, |
| 68 | + seed=45678) |
| 69 | + gen_data = { |
| 70 | + 'N':sim_data.pos_tests.shape[1], |
| 71 | + 'N_age':N_age, |
| 72 | + 'N_eth':N_eth, |
| 73 | + 'N_edu':N_edu, |
| 74 | + 'sens': sens, |
| 75 | + 'spec': spec, |
| 76 | + 'intercept_prior_mean': baseline, |
| 77 | + 'intercept_prior_scale': 2.5, |
| 78 | + 'pos_tests':sim_data.pos_tests[0].astype(int), |
| 79 | + 'tests':sim_data.tests[0].astype(int), |
| 80 | + 'sex':sim_data.sex[0].astype(int), |
| 81 | + 'age':sim_data.age[0].astype(int), |
| 82 | + 'eth':sim_data.eth[0].astype(int), |
| 83 | + 'edu':sim_data.edu[0].astype(int), |
| 84 | + 'beta_0': sim_data.beta_0[0], |
| 85 | + 'pct_sex': sim_data.pct_sex[0], |
| 86 | + 'beta_sex': sim_data.beta_sex[0], |
| 87 | + 'pct_age': sim_data.pct_age[0], |
| 88 | + 'beta_age':sim_data.beta_age[0], |
| 89 | + 'pct_eth': sim_data.pct_eth[0], |
| 90 | + 'beta_eth':sim_data.beta_eth[0], |
| 91 | + 'pct_edu': sim_data.pct_edu[0], |
| 92 | + 'beta_edu':sim_data.beta_edu[0], |
| 93 | + 'seed':sim_data.metadata.cmdstan_config['seed'], |
| 94 | + } |
| 95 | + return gen_data |
| 96 | + |
| 97 | + |
| 98 | +# Create a dataset - fix sizes, and seed |
| 99 | + |
| 100 | +N_eth = 3 |
| 101 | +N_edu = 5 |
| 102 | +N_age = 9 |
| 103 | +baseline = -3.5 |
| 104 | +sens = 0.75 |
| 105 | +spec = 0.9995 |
| 106 | +data_tiny = simulate_data(N_eth, N_edu, N_age, baseline, sens, spec, 7, seed=45678) |
| 107 | +data_small = simulate_data(N_eth, N_edu, N_age, baseline, sens, spec, 17, seed=data_tiny['seed']) |
| 108 | +data_large = simulate_data(N_eth, N_edu, N_age, baseline, sens, spec, 200, seed=data_tiny['seed']) |
| 109 | + |
| 110 | + |
| 111 | +# sum to zero vector |
| 112 | + |
| 113 | +binomial_ozs_mod = CmdStanModel(stan_file=os.path.join('stan', 'binomial_4preds_ozs.stan')) |
| 114 | +times_ozs_large = time_fits(100, binomial_ozs_mod, data_large) |
| 115 | +times_ozs_small = time_fits(100, binomial_ozs_mod, data_small) |
| 116 | + |
| 117 | +# sum to zero vector |
| 118 | + |
| 119 | +binomial_hard_mod = CmdStanModel(stan_file=os.path.join('stan', 'binomial_4preds_hard.stan')) |
| 120 | +times_hard_small = time_fits(100, binomial_hard_mod, data_small) |
| 121 | +times_hard_large = time_fits(100, binomial_hard_mod, data_large) |
| 122 | + |
| 123 | + |
| 124 | +# soft sum-to-zero constraint |
| 125 | + |
| 126 | +binomial_soft_mod = CmdStanModel(stan_file=os.path.join('stan', 'binomial_4preds_soft.stan')) |
| 127 | +times_soft_small = time_fits(100, binomial_soft_mod, data_small) |
| 128 | +times_soft_large = time_fits(100, binomial_soft_mod, data_large) |
| 129 | + |
| 130 | + |
| 131 | +df_summary = summarize_times([('ozs small', times_ozs_small), |
| 132 | + ('ozs large', times_ozs_large), |
| 133 | + ('hard small', times_hard_small), |
| 134 | + ('hard large', times_hard_large), |
| 135 | + ('soft small', times_soft_small), |
| 136 | + ('soft large', times_soft_large)]) |
| 137 | +df_summary.to_json("binomial_runtimes.json") |
| 138 | + |
0 commit comments