Skip to content

Commit

Permalink
Support for Multiple Zone Systems and Transit Virtual Path Building (#…
Browse files Browse the repository at this point in the history
…362)

* TransitVirtualPathBuilder first cut - get_tvpb_logsum computes logsums on demand

* tour_mode_choice logsums for 3-zone working for school and workplace location

* z3 running all models, but with hacked trip_mode_choice (no coefficient templates)

* z3 running all models

* passing tests

* pcodestyle passes

* refactored test teardown

* tvpb tracing

* tvpb tracing roughout

* build multizone test data before running test

* read_settings_file support include_settings

* read_settings_file support include_settings

* move constants to configs yaml and expressions from abm.util to core

* tvpb estimate_chunk_overhead

* adaptive chunking

* fixed bug in trip scheduling when no trips in leg after iteration

* 3 zone tap_tap uniquify

* tvpb TableCache

* tvpb TableCache - all fiiels

* 3 zone with feather cache

* tidy up notes around creating the marin example

* memmap skims

* memmap skims

* correct drive transit cost expression for miles/feet.  Still need to add some additional missing expressions now that everything appears to be working correctly.

* skim docstrings

* disable 3 zone tap_tap caching for traced taps

* initialize_los

* 3 zone mp plumbing untested

* THREE_ZONE tvpb multiprocessing tests and fixes

* THREE_ZONE tvpb multiprocessing tests and fixes

* reanme core tvpb and cache modules

* reanme core tvpb and cache modules

* util.iprod alias for np.prod with dtype int64

* THREE_ZONE cache initialization bug bixes

* multizone bug fixes

* bugs

* multiprocessing mjultizone tweaks

* tvpb cacha\e as array

* complain if apporting with more process than slice rows

* complain if apporting with more process than slice rows

* chunk initialize_tvpb

* shorter trace file names because windows

* refactor pathbuilder sans DYNAMIC

* fix tour_scheduling_calc_row_size

* fix multiprocess resume across mp step bug

* pycodestyle

* minor chunk cleanup

* allow MemMapSkimFactory multiprocessing

* consolidate mtc and multizone configs

* commit updated marin example config files

* some more marin example config updates

* add validation summaries script

* some additional marin example updates

* update example skim time period settings to align with tm1 (#360)

* update example settings to align with tm1

* correct home_is_rural coding since tm1 was incorrect

* corrected skim time periods go in network_los.yaml now
  • Loading branch information
bstabler authored Dec 22, 2020
1 parent df28db7 commit 5dc9f15
Show file tree
Hide file tree
Showing 265 changed files with 45,963 additions and 8,884 deletions.
3 changes: 3 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ install:
- pip install .
- pip freeze
script:
# build 2 and 3 zone test data
- python activitysim/examples/example_multiple_zone/two_zone_example_data.py
- python activitysim/examples/example_multiple_zone/three_zone_example_data.py
- pycodestyle activitysim
- py.test --cov activitysim --cov-report term-missing
after_success:
Expand Down
4 changes: 2 additions & 2 deletions activitysim/abm/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def households_sample_size(settings, override_hh_ids):
if override_hh_ids is None:
return settings.get('households_sample_size', 0)
else:
return len(override_hh_ids)
return 0 if override_hh_ids is None else len(override_hh_ids)


@inject.injectable(cache=True)
Expand Down Expand Up @@ -79,7 +79,7 @@ def trace_od(settings):

@inject.injectable(cache=True)
def chunk_size(settings):
return int(settings.get('chunk_size', 0))
return int(settings.get('chunk_size', 0) or 0)


@inject.injectable(cache=True)
Expand Down
3 changes: 3 additions & 0 deletions activitysim/abm/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from . import cdap
from . import free_parking
from . import initialize
from . import initialize_tours
from . import initialize_los
from . import joint_tour_composition
from . import joint_tour_destination
from . import joint_tour_frequency
Expand All @@ -28,3 +30,4 @@
from . import trip_purpose_and_destination
from . import trip_scheduling
from . import trip_matrices
from . import summarize
185 changes: 99 additions & 86 deletions activitysim/abm/models/accessibility.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,95 +10,99 @@
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import pipeline
from activitysim.core import mem

from activitysim.core import los
from activitysim.core.pathbuilder import TransitVirtualPathBuilder

logger = logging.getLogger(__name__)


class AccessibilitySkims(object):
"""
Wrapper for skim arrays to facilitate use of skims by accessibility model
Parameters
----------
skims : 2D array
omx: open omx file object
this is only used to load skims on demand that were not preloaded
length: int
number of zones in skim to return in skim matrix
in case the skims contain additional external zones that should be trimmed out so skim
array is correct shape to match (flattened) O-D tiled columns in the od dataframe
transpose: bool
whether to transpose the matrix before flattening. (i.e. act as a D-O instead of O-D skim)
"""

def __init__(self, skim_dict, orig_zones, dest_zones, transpose=False):

omx_shape = skim_dict.skim_info['omx_shape']
logger.info("init AccessibilitySkims with %d dest zones %d orig zones omx_shape %s" %
(len(dest_zones), len(orig_zones), omx_shape, ))

assert len(orig_zones) <= len(dest_zones)
assert np.isin(orig_zones, dest_zones).all()
assert len(np.unique(orig_zones)) == len(orig_zones)
assert len(np.unique(dest_zones)) == len(dest_zones)

self.skim_dict = skim_dict
self.transpose = transpose

if omx_shape[0] == len(orig_zones) and skim_dict.offset_mapper.offset_series is None:
# no slicing required because whatever the offset_int, the skim data aligns with zone list
self.map_data = False
else:

if omx_shape[0] == len(orig_zones):
logger.debug("AccessibilitySkims - applying offset_mapper")

skim_index = list(range(omx_shape[0]))
orig_map = skim_dict.offset_mapper.map(orig_zones)
dest_map = skim_dict.offset_mapper.map(dest_zones)

# (we might be sliced multiprocessing)
# assert np.isin(skim_index, orig_map).all()

if np.isin(skim_index, dest_map).all():
# not using the whole skim matrix
logger.info("%s skim zones not in dest_map: %s" %
((~dest_map).sum(), np.ix_(~dest_map)))

self.map_data = True
self.orig_map = orig_map
self.dest_map = dest_map

def __getitem__(self, key):
"""
accessor to return flattened skim array with specified key
flattened array will have length length*length and will match tiled OD df used by assign
this allows the skim array to be accessed from expressions as
skim['DISTANCE'] or skim[('SOVTOLL_TIME', 'MD')]
"""

data = self.skim_dict.get(key).data

if self.transpose:
data = data.transpose()

if self.map_data:

# slice skim to include only orig rows and dest columns
# 2-d boolean slicing in numpy is a bit tricky
# data = data[orig_map, dest_map] # <- WRONG!
# data = data[orig_map, :][:, dest_map] # <- RIGHT
# data = data[np.ix_(orig_map, dest_map)] # <- ALSO RIGHT

data = data[self.orig_map, :][:, self.dest_map]

return data.flatten()
# class AccessibilitySkims(object):
# """
# Wrapper for skim arrays to facilitate use of skims by accessibility model
#
# Parameters
# ----------
# skims : 2D array
# omx: open omx file object
# this is only used to load skims on demand that were not preloaded
# length: int
# number of zones in skim to return in skim matrix
# in case the skims contain additional external zones that should be trimmed out so skim
# array is correct shape to match (flattened) O-D tiled columns in the od dataframe
# transpose: bool
# whether to transpose the matrix before flattening. (i.e. act as a D-O instead of O-D skim)
# """
#
# def __init__(self, skim_dict, orig_zones, dest_zones, transpose=False):
#
# logger.info(f"init AccessibilitySkims with {len(dest_zones)} dest zones {len(orig_zones)} orig zones")
#
# assert len(orig_zones) <= len(dest_zones)
# assert np.isin(orig_zones, dest_zones).all()
# assert len(np.unique(orig_zones)) == len(orig_zones)
# assert len(np.unique(dest_zones)) == len(dest_zones)
#
# self.skim_dict = skim_dict
# self.transpose = transpose
#
# num_skim_zones = skim_dict.get_skim_info('omx_shape')[0]
# if num_skim_zones == len(orig_zones) and skim_dict.offset_mapper.offset_series is None:
# # no slicing required because whatever the offset_int, the skim data aligns with zone list
# self.map_data = False
# else:
#
# logger.debug("AccessibilitySkims - applying offset_mapper")
#
# skim_index = list(range(num_skim_zones))
# orig_map = skim_dict.offset_mapper.map(orig_zones)
# dest_map = skim_dict.offset_mapper.map(dest_zones)
#
# # (we might be sliced multiprocessing)
# # assert np.isin(skim_index, orig_map).all()
#
# out_of_bounds = ~np.isin(skim_index, dest_map)
# # if out_of_bounds.any():
# # print(f"{(out_of_bounds).sum()} skim zones not in dest_map")
# # print(f"dest_zones {dest_zones}")
# # print(f"dest_map {dest_map}")
# # print(f"skim_index {skim_index}")
# assert not out_of_bounds.any(), \
# f"AccessibilitySkims {(out_of_bounds).sum()} skim zones not in dest_map: {np.ix_(out_of_bounds)[0]}"
#
# self.map_data = True
# self.orig_map = orig_map
# self.dest_map = dest_map
#
# def __getitem__(self, key):
# """
# accessor to return flattened skim array with specified key
# flattened array will have length length*length and will match tiled OD df used by assign
#
# this allows the skim array to be accessed from expressions as
# skim['DISTANCE'] or skim[('SOVTOLL_TIME', 'MD')]
# """
#
# data = self.skim_dict.get(key).data
#
# if self.transpose:
# data = data.transpose()
#
# if self.map_data:
# # slice skim to include only orig rows and dest columns
# # 2-d boolean slicing in numpy is a bit tricky
# # data = data[orig_map, dest_map] # <- WRONG!
# # data = data[orig_map, :][:, dest_map] # <- RIGHT
# # data = data[np.ix_(orig_map, dest_map)] # <- ALSO RIGHT
#
# data = data[self.orig_map, :][:, self.dest_map]
#
# return data.flatten()


@inject.step()
def compute_accessibility(accessibility, skim_dict, land_use, trace_od):
def compute_accessibility(accessibility, network_los, land_use, trace_od):

"""
Compute accessibility for each zone in land use file using expressions from accessibility_spec
Expand Down Expand Up @@ -143,8 +147,8 @@ def compute_accessibility(accessibility, skim_dict, land_use, trace_od):
# create OD dataframe
od_df = pd.DataFrame(
data={
'orig': np.repeat(np.asanyarray(accessibility_df.index), dest_zone_count),
'dest': np.tile(np.asanyarray(land_use_df.index), orig_zone_count)
'orig': np.repeat(orig_zones, dest_zone_count),
'dest': np.tile(dest_zones, orig_zone_count)
}
)

Expand All @@ -160,9 +164,16 @@ def compute_accessibility(accessibility, skim_dict, land_use, trace_od):
locals_d = {
'log': np.log,
'exp': np.exp,
'skim_od': AccessibilitySkims(skim_dict, orig_zones, dest_zones),
'skim_do': AccessibilitySkims(skim_dict, orig_zones, dest_zones, transpose=True)
'network_los': network_los,
}

skim_dict = network_los.get_default_skim_dict()
locals_d['skim_od'] = skim_dict.wrap('orig', 'dest').set_df(od_df)
locals_d['skim_do'] = skim_dict.wrap('dest', 'orig').set_df(od_df)

if network_los.zone_system == los.THREE_ZONE:
locals_d['tvpb'] = TransitVirtualPathBuilder(network_los)

if constants is not None:
locals_d.update(constants)

Expand All @@ -174,13 +185,15 @@ def compute_accessibility(accessibility, skim_dict, land_use, trace_od):
data.shape = (orig_zone_count, dest_zone_count) # (o,d)
accessibility_df[column] = np.log(np.sum(data, axis=1) + 1)

logger.info("{trace_label} added {len(results.columns} columns")

# - write table to pipeline
pipeline.replace_table("accessibility", accessibility_df)

if trace_od:

if not trace_od_rows.any():
logger.warning("trace_od not found origin = %s, dest = %s" % (trace_orig, trace_dest))
logger.warning(f"trace_od not found origin = {trace_orig}, dest = {trace_dest}")
else:

# add OD columns to trace results
Expand Down
Loading

0 comments on commit 5dc9f15

Please sign in to comment.