Skip to content

Commit

Permalink
mostly through test1
Browse files Browse the repository at this point in the history
  • Loading branch information
jpn-- committed Nov 10, 2023
1 parent 5e90e65 commit 23a9e72
Show file tree
Hide file tree
Showing 5 changed files with 66 additions and 23 deletions.
6 changes: 4 additions & 2 deletions activitysim/abm/models/summarize.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import pandas as pd

from activitysim.core import expressions, workflow
from activitysim.core.configuration.base import PydanticReadable
from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable
from activitysim.core.los import Network_LOS

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -201,7 +201,7 @@ def manual_breaks(
return bins


class SummarizeSettings(PydanticReadable):
class SummarizeSettings(PydanticReadable, extra="forbid"):
"""
Settings for the `summarize` component.
"""
Expand All @@ -215,6 +215,8 @@ class SummarizeSettings(PydanticReadable):
EXPORT_PIPELINE_TABLES: bool = True
"""To export pipeline tables for expression development."""

preprocessor: PreprocessorSettings | None = None


@workflow.step
def summarize(
Expand Down
43 changes: 32 additions & 11 deletions activitysim/abm/models/trip_matrices.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from __future__ import annotations

import logging
from pathlib import Path
from typing import Any

import numpy as np
import openmatrix as omx
Expand All @@ -15,6 +17,17 @@
logger = logging.getLogger(__name__)


class MatrixTableSettings(PydanticReadable):
name: str
data_field: str


class MatrixSettings(PydanticReadable):
file_name: Path
tables: list[MatrixTableSettings] = []
is_tap: bool = False


class WriteTripMatricesSettings(PydanticReadable):
"""
Settings for the `write_trip_matrices` component.
Expand All @@ -26,6 +39,12 @@ class WriteTripMatricesSettings(PydanticReadable):
HH_EXPANSION_WEIGHT_COL: str = "sample_rate"
"""Column represents the sampling rate of households"""

MATRICES: list[MatrixSettings] = []

CONSTANTS: dict[str, Any] = {}

preprocessor: PreprocessorSettings | None = None


@workflow.step(copy_tables=["trips"])
def write_trip_matrices(
Expand Down Expand Up @@ -251,9 +270,11 @@ def write_trip_matrices(
)


@workflow.func
def annotate_trips(
state: workflow.State, trips: pd.DataFrame, network_los, model_settings
state: workflow.State,
trips: pd.DataFrame,
network_los,
model_settings: WriteTripMatricesSettings,
):
"""
Add columns to local trips table. The annotator has
Expand Down Expand Up @@ -298,7 +319,7 @@ def annotate_trips(

# Data will be expanded by an expansion weight column from
# the households pipeline table, if specified in the model settings.
hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
hh_weight_col = model_settings.HH_EXPANSION_WEIGHT_COL

if hh_weight_col and hh_weight_col not in trips_df:
logger.info("adding '%s' from households to trips table" % hh_weight_col)
Expand All @@ -314,7 +335,7 @@ def write_matrices(
zone_index,
orig_index,
dest_index,
model_settings,
model_settings: WriteTripMatricesSettings,
is_tap=False,
):
"""
Expand All @@ -329,30 +350,30 @@ def write_matrices(
but the table 'data_field's must be summable types: ints, floats, bools.
"""

matrix_settings = model_settings.get("MATRICES")
matrix_settings = model_settings.MATRICES

if not matrix_settings:
logger.error("Missing MATRICES setting in write_trip_matrices.yaml")

for matrix in matrix_settings:
matrix_is_tap = matrix.get("is_tap", False)
matrix_is_tap = matrix.is_tap

if matrix_is_tap == is_tap: # only write tap matrices to tap matrix files
filename = matrix.get("file_name")
filename = str(matrix.file_name)
filepath = state.get_output_file_path(filename)
logger.info("opening %s" % filepath)
file = omx.open_file(str(filepath), "w") # possibly overwrite existing file
table_settings = matrix.get("tables")
table_settings = matrix.tables

for table in table_settings:
table_name = table.get("name")
col = table.get("data_field")
table_name = table.name
col = table.data_field

if col not in aggregate_trips:
logger.error(f"missing {col} column in aggregate_trips DataFrame")
return

hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
hh_weight_col = model_settings.HH_EXPANSION_WEIGHT_COL
if hh_weight_col:
aggregate_trips[col] = (
aggregate_trips[col] / aggregate_trips[hh_weight_col]
Expand Down
26 changes: 18 additions & 8 deletions activitysim/abm/models/trip_mode_choice.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from __future__ import annotations

import logging
from typing import Any

import numpy as np
import pandas as pd
Expand All @@ -20,13 +21,13 @@
workflow,
)
from activitysim.core.configuration.base import PreprocessorSettings, PydanticReadable
from activitysim.core.configuration.logit import LogitComponentSettings
from activitysim.core.configuration.logit import TemplatedLogitComponentSettings
from activitysim.core.util import assign_in_place

logger = logging.getLogger(__name__)


class TripModeChoiceSettings(LogitComponentSettings):
class TripModeChoiceSettings(TemplatedLogitComponentSettings, extra="forbid"):
"""
Settings for the `trip_mode_choice` component.
"""
Expand All @@ -41,6 +42,14 @@ class TripModeChoiceSettings(LogitComponentSettings):
"""List of columns to be filtered from the dataframe to reduce memory
needs filter chooser table to these fields"""

CHOOSER_COLS_TO_KEEP: list[str] = []

tvpb_mode_path_types: dict[str, Any] = {}

FORCE_ESCORTEE_CHAUFFEUR_MODE_MATCH: bool = True

annotate_trips: PreprocessorSettings | None = None


@workflow.step
def trip_mode_choice(
Expand Down Expand Up @@ -187,9 +196,9 @@ def trip_mode_choice(
estimator.write_spec(model_settings)
estimator.write_model_settings(model_settings, model_settings_file_name)

model_spec = state.filesystem.read_model_spec(file_name=model_settings["SPEC"])
model_spec = state.filesystem.read_model_spec(file_name=model_settings.SPEC)
nest_spec = config.get_logit_model_settings(model_settings)
cols_to_keep = model_settings.get("CHOOSER_COLS_TO_KEEP", None)
cols_to_keep = model_settings.CHOOSER_COLS_TO_KEEP

choices_list = []
cols_to_keep_list = []
Expand Down Expand Up @@ -296,7 +305,7 @@ def trip_mode_choice(

# add cached tvpb_logsum tap choices for modes specified in tvpb_mode_path_types
if network_los.zone_system == los.THREE_ZONE:
tvpb_mode_path_types = model_settings.get("tvpb_mode_path_types")
tvpb_mode_path_types = model_settings.tvpb_mode_path_types
for mode, path_type in tvpb_mode_path_types.items():
skim_cache = tvpb_logsum_odt.cache[path_type]

Expand Down Expand Up @@ -325,8 +334,9 @@ def trip_mode_choice(

assign_in_place(trips_df, choices_df)

if state.is_table("school_escort_tours") & model_settings.get(
"FORCE_ESCORTEE_CHAUFFEUR_MODE_MATCH", True
if (
state.is_table("school_escort_tours")
& model_settings.FORCE_ESCORTEE_CHAUFFEUR_MODE_MATCH
):
trips_df = (
school_escort_tours_trips.force_escortee_trip_modes_to_match_chauffeur(
Expand All @@ -344,7 +354,7 @@ def trip_mode_choice(

state.add_table("trips", trips_df)

if model_settings.get("annotate_trips"):
if model_settings.annotate_trips:
annotate.annotate_trips(state, model_settings, trace_label, locals_dict)

if state.settings.trace_hh_id:
Expand Down
4 changes: 3 additions & 1 deletion activitysim/abm/models/trip_scheduling.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import logging
import warnings
from builtins import range
from typing import List, Literal
from typing import Any, List, Literal

import numpy as np
import pandas as pd
Expand Down Expand Up @@ -456,6 +456,8 @@ class TripSchedulingSettings(PydanticReadable):

logic_version: int | None = None

CONSTANTS: dict[str, Any] = {}


@workflow.step(copy_tables=False)
def trip_scheduling(
Expand Down
10 changes: 9 additions & 1 deletion activitysim/core/simulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,9 +397,17 @@ def get_segment_coefficients(
omnibus_coefficients = pd.read_csv(
legacy_coeffs_file_path, comment="#", index_col="coefficient_name"
)
try:
omnibus_coefficients_segment_name = omnibus_coefficients[segment_name]
except KeyError as err:
logger.error(f"No key {segment_name} found!")
possible_keys = "\n- ".join(omnibus_coefficients.keys())
logger.error(f"possible keys include: \n- {possible_keys}")
raise
coefficients_dict = assign.evaluate_constants(
omnibus_coefficients[segment_name], constants=constants
omnibus_coefficients_segment_name, constants=constants
)

else:
coefficients_df = filesystem.read_model_coefficients(model_settings)
template_df = read_model_coefficient_template(filesystem, model_settings)
Expand Down

0 comments on commit 23a9e72

Please sign in to comment.