Skip to content

Commit

Permalink
Merge pull request #69 from Princeton-CDH/convergence-no-riskadjust
Browse files Browse the repository at this point in the history
track when agents update risk level; implement convergence check based on stable risk attitudes
  • Loading branch information
rlskoeser authored Mar 21, 2024
2 parents 5a6c84c + b7e70ac commit 2f66e01
Show file tree
Hide file tree
Showing 8 changed files with 990 additions and 26 deletions.
738 changes: 738 additions & 0 deletions notebooks/new_convergence/hdm_c7_riskdistribution.ipynb

Large diffs are not rendered by default.

25 changes: 21 additions & 4 deletions simulatingrisk/hawkdove/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ def agent_portrayal(agent):
"size": 25,
# "color": "tab:gray",
}
# specific to multiple risk attitude variant
if hasattr(agent, "risk_level_changed"):
portrayal["risk_level_changed"] = agent.risk_level_changed

# color based on risk level; risk levels are always 0-9
colors = divergent_colors_10
Expand Down Expand Up @@ -154,18 +157,32 @@ def draw_hawkdove_agent_space(model, agent_portrayal):
.scale(domain=hawkdove_domain, range=["orange", "blue"])
)

chart = (
# optionally display information from multi-risk attitude variant
if "risk_level_changed" in df.columns:
outer_color = alt.Color(
"risk_level_changed", title="adjusted risk level"
).scale(
domain=[False, True],
range=["transparent", "black"],
)
else:
outer_color = chart_color

agent_chart = (
alt.Chart(df)
.mark_point(filled=True)
.mark_point() # filled=True)
.encode(
x=alt.X("x", axis=None), # no x-axis label
y=alt.Y("y", axis=None), # no y-axis label
size=alt.Size("size", title="points rank"), # relabel size for legend
color=chart_color,
# when fill and color differ, color acts as an outline
fill=chart_color,
color=outer_color,
shape=alt.Shape( # use shape to indicate choice
"choice", scale=alt.Scale(domain=hawkdove_domain, range=shape_range)
),
)
.configure_view(strokeOpacity=0) # hide grid/chart lines
)
return solara.FigureAltair(chart)

return solara.FigureAltair(agent_chart)
14 changes: 14 additions & 0 deletions simulatingrisk/hawkdovemulti/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,20 @@ Like the base hawk/dove risk attitude game, there is also a
configuration to add some chance of agents playing hawk/dove randomly
instead of choosing based on the rules of the game.

## Convergence

The model is configured to stop automatically when it has stabilized.
Convergence is reached when an adjustment round occurs and zero agents
adjust their risk attitude.

If adjustment is not enabled, convergence logic falls back to the
implementation of the hawk/dove single-risk attitude simulation, which is
based on a stable rolling % average of agents playing hawk.

Model and agent data collection also includes reports on whether agents
updated their risk level in the last adjustment round, and model data collection
includes a status of "running" or "converged".

## Batch running

This module includes a custom batch run script to run the simulation and
Expand Down
52 changes: 52 additions & 0 deletions simulatingrisk/hawkdovemulti/analysis_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""
utility methods for analyzing data collected generated by this model
"""
import altair as alt
import polars as pl

from simulatingrisk.hawkdovemulti.model import RiskState


def groupby_population_risk_category(df):
"""takes a polars dataframe populated with model data generated
by hawk/dove multi model, groups by population risk category and
adds group labels."""
# currently written for polars dataframe

# group on risk category to get totals for the number of runs that
# ended up in each different type
poprisk_grouped = df.group_by("population_risk_category").count()
poprisk_grouped = poprisk_grouped.rename(
{"population_risk_category": "risk_category"}
)
poprisk_grouped = poprisk_grouped.sort("risk_category")

# add column with readable group labels for the numeric categories
poprisk_grouped = poprisk_grouped.with_columns(
pl.Series(
name="type",
values=poprisk_grouped["risk_category"].map_elements(RiskState.category),
)
)
return poprisk_grouped


def graph_population_risk_category(poprisk_grouped):
"""given a dataframe grouped by :meth:`groupby_population_risk_category`,
generate an altair chart graphing the number of runs in each type,
grouped and labeled by the larger categories."""
return (
alt.Chart(poprisk_grouped)
.mark_bar(width=15)
.encode(
x=alt.X(
"risk_category",
title="risk category",
axis=alt.Axis(tickCount=13), # 13 categories
scale=alt.Scale(domain=[1, 13]),
),
y=alt.Y("count", title="Number of runs"),
color=alt.Color("type", title="type"),
)
.properties(title="Distribution of runs by final population risk category")
)
67 changes: 64 additions & 3 deletions simulatingrisk/hawkdovemulti/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
neighborhood_sizes,
)
from simulatingrisk.hawkdove.model import divergent_colors_10
from simulatingrisk.hawkdove.app import plot_hawks

# start with common hawk/dove params, then add params for variable risk
jupyterviz_params_var = common_jupyterviz_params.copy()
Expand Down Expand Up @@ -93,12 +92,42 @@ def plot_agents_by_risk(model):
# distracting from the main point of this chart, which is quantitative
# color=alt.Color("risk_level:N").scale(**color_scale_opts),
)
.properties(title="Number of agents in each risk level")
)
return solara.FigureAltair(bar_chart)


def plot_agents_risklevel_changed(model):
"""plot the number of agents who updated their risk attitude on
the last adjustment round"""
model_df = model.datacollector.get_model_vars_dataframe().reset_index()
if model_df.empty:
return
# model_df = model_df[model_df.index % model.adjust_round_n == 0]
model_df = model_df[:: model.adjust_round_n]
if model_df.empty:
return

line_chart = (
alt.Chart(model_df)
.mark_line()
.encode(
y=alt.Y(
"num_agents_risk_changed",
title="# agents who updated risk level",
# axis=alt.Axis(tickCount=model.max_risk_level + 1),
scale=alt.Scale(domain=[0, model.num_agents]),
),
x=alt.X("index"),
)
.properties(title="Number of agents with adjusted risk level")
)

return solara.FigureAltair(line_chart)


def plot_hawks_by_risk(model):
"""plot rolling mean of percent of agents in each risk attitude
"""plot rolling mean of percent of agents in each risk level
who chose hawk over last several rounds"""

# in the first round, mesa returns a dataframe full of NAs; ignore that
Expand Down Expand Up @@ -146,14 +175,46 @@ def plot_hawks_by_risk(model):
),
color=alt.Color("risk_level:N").scale(**color_scale_opts),
)
.properties(title="Rolling average percent hawk by risk level")
)
return solara.FigureAltair(chart)


def plot_wealth_by_risklevel(model):
"""plot wealth distribution for each risk level"""
agent_df = model.datacollector.get_agent_vars_dataframe().reset_index().dropna()
if agent_df.empty:
return

last_step = agent_df.Step.max()
# plot current status / last round
last_round = agent_df[agent_df.Step == last_step]

wealth_chart = (
alt.Chart(last_round)
.mark_boxplot(extent="min-max")
.encode(
alt.X(
"risk_level",
scale=alt.Scale(domain=[model.min_risk_level, model.max_risk_level]),
),
alt.Y("points").scale(zero=False),
)
.properties(title="Cumulative wealth by risk level")
)
return solara.FigureAltair(wealth_chart)


page = JupyterViz(
HawkDoveMultipleRiskModel,
jupyterviz_params_var,
measures=[plot_hawks, plot_agents_by_risk, plot_hawks_by_risk],
measures=[
plot_agents_by_risk,
plot_hawks_by_risk,
plot_wealth_by_risklevel,
plot_agents_risklevel_changed,
# plot_hawks,
],
name="Hawk/Dove game with multiple risk attitudes",
agent_portrayal=agent_portrayal,
space_drawer=draw_hawkdove_agent_space,
Expand Down
72 changes: 58 additions & 14 deletions simulatingrisk/hawkdovemulti/batch_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,46 @@

neighborhood_sizes = list(HawkDoveMultipleRiskModel.neighborhood_sizes)

# NOTE: it's better to be explicit about even parameters
# instead of relying on model defaults, because
# parameters specified here are included in data exports


# combination of parameters we want to run
params = {
"grid_size": [10, 25, 50], # 100],
"risk_adjustment": ["adopt", "average"],
"play_neighborhood": neighborhood_sizes,
"observed_neighborhood": neighborhood_sizes,
"adjust_neighborhood": neighborhood_sizes,
"hawk_odds": [0.5, 0.25, 0.75],
"adjust_every": [2, 10, 20],
"risk_distribution": HawkDoveMultipleRiskModel.risk_distribution_options,
"adjust_payoff": HawkDoveMultipleRiskModel.supported_adjust_payoffs,
# random?
"default": {
"grid_size": [10, 25, 50], # 100],
"risk_adjustment": ["adopt", "average"],
"play_neighborhood": neighborhood_sizes,
"observed_neighborhood": neighborhood_sizes,
"adjust_neighborhood": neighborhood_sizes,
"hawk_odds": [0.5, 0.25, 0.75],
"adjust_every": [2, 10, 20],
"risk_distribution": HawkDoveMultipleRiskModel.risk_distribution_options,
"adjust_payoff": HawkDoveMultipleRiskModel.supported_adjust_payoffs,
# random?
},
# specific scenarios to allow paired statistical tests
"risk_adjust": {
# ary risk adjustment
"risk_adjustment": ["adopt", "average"],
"risk_distribution": "uniform",
# use model defaults; grid size must be specified
"grid_size": 10, # 25,
},
"payoff": {
"adjust_payoff": HawkDoveMultipleRiskModel.supported_adjust_payoffs,
"risk_distribution": "uniform",
# use model defaults; grid size must be specified
"grid_size": 25,
},
"distribution": {
"risk_distribution": HawkDoveMultipleRiskModel.risk_distribution_options,
# adopt tends to converge faster; LB also says it's more interesting & simpler
"risk_adjustment": "adopt",
# use model defaults; grid size must be specified
"grid_size": 10,
},
}


Expand All @@ -37,7 +65,14 @@ def run_hawkdovemulti_model(args):

model = HawkDoveMultipleRiskModel(**params)
while model.running and model.schedule.steps <= max_steps:
model.step()
try:
model.step()
# by default, signals propagate to all processes
# take advantage of that to exit and save results
except KeyboardInterrupt:
# if we get a ctrl-c / keyboard interrupt, stop looping
# and finish data collection to report on whatever was completed
break

# collect data for the last step
# (scheduler is 1-based index but data collection is 0-based)
Expand Down Expand Up @@ -72,8 +107,11 @@ def batch_run(
collect_agent_data,
file_prefix,
max_runs,
param_choice,
):
param_combinations = _make_model_kwargs(params)
run_params = params.get(param_choice)

param_combinations = _make_model_kwargs(run_params)
total_param_combinations = len(param_combinations)
total_runs = total_param_combinations * iterations
print(
Expand Down Expand Up @@ -169,7 +207,7 @@ def main():
"--max-steps",
help="Maximum steps to run simulations if they have not already "
+ "converged (default: %(default)s)",
default=125, # typically converges quickly, around step 60 without randomness
default=1000, # new convergence logic seems to converge around 400
type=int,
)
parser.add_argument(
Expand Down Expand Up @@ -203,7 +241,12 @@ def main():
type=int,
default=None,
)
# may want to add an option to configure output dir
parser.add_argument(
"--params",
help="Run a specific set of parameters",
choices=params.keys(),
default="default",
)

args = parser.parse_args()
batch_run(
Expand All @@ -215,6 +258,7 @@ def main():
args.agent_data,
args.file_prefix,
args.max_runs,
args.params,
)


Expand Down
Loading

0 comments on commit 2f66e01

Please sign in to comment.