Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@inject.table()
def land_use():
df = read_input_table("land_use/taz_data")
logger.info("loaded land_use %s" % (df.shape,))
df.index.name = 'TAZ'
# replace table function with dataframe
inject.add_table('land_use', df)
return df
def open_pipeline_store(overwrite=False):
"""
Open the pipeline checkpoint store
Parameters
----------
overwrite : bool
delete file before opening (unless resuming)
"""
if _PIPELINE.pipeline_store is not None:
raise RuntimeError("Pipeline store is already open!")
pipeline_file_path = config.pipeline_file_path(inject.get_injectable('pipeline_file_name'))
if overwrite:
try:
if os.path.isfile(pipeline_file_path):
logger.debug("removing pipeline store: %s" % pipeline_file_path)
os.unlink(pipeline_file_path)
except Exception as e:
print(e)
logger.warning("Error removing %s: %s" % (pipeline_file_path, e))
_PIPELINE.pipeline_store = pd.HDFStore(pipeline_file_path, mode='a')
logger.debug("opened pipeline_store")
def annotate_jtp(model_settings, trace_label):
# - annotate persons
persons = inject.get_table('persons').to_frame()
expressions.assign_columns(
df=persons,
model_settings=model_settings.get('annotate_persons'),
trace_label=tracing.extend_trace_label(trace_label, 'annotate_persons'))
pipeline.replace_table("persons", persons)
@inject.table()
def accessibility(store):
df = store["skims/accessibility"]
# FIXME - should eventually replace when activity model is stable
# FIXME - but will break regression tests
# df.columns = ["%s_regress" % c.upper() for c in df.columns]
df.columns = [c.upper() for c in df.columns]
# replace table function with dataframe
inject.add_table('accessibility', df)
return df
# this would be accessibility around the household location - be careful with
# this one as accessibility at some other location can also matter
inject.broadcast('accessibility', 'households', cast_index=True, onto_on='TAZ')
@inject.table()
def trips_merged(trips, tours):
return inject.merge_tables(trips.name, tables=[trips, tours])
random_omaz = np.random.choice(network_los.maz_df.index.values, size=VECTOR_TEST_SIZE,
replace=True)
taps_mazs = network_los.get_taps_mazs(random_omaz, attribute=attribute)
return len(taps_mazs.index)
def set_random_seed():
np.random.seed(0)
# uncomment the line below to set random seed so that run results are reproducible
set_random_seed()
inject.add_injectable("set_random_seed", set_random_seed)
tracing.config_logger()
t0 = print_elapsed_time()
taz_skim_stack = inject.get_injectable('taz_skim_dict')
t0 = print_elapsed_time("load taz_skim_dict", t0)
tap_skim_stack = inject.get_injectable('tap_skim_dict')
t0 = print_elapsed_time("load tap_skim_dict", t0)
network_los = inject.get_injectable('network_los')
t0 = print_elapsed_time("load network_los", t0)
# test sizes for all implemented methods
VECTOR_TEST_SIZEs = (10000, 100000, 1000000, 5000000, 10000000, 20000000)
trips['start_trip'] = trips.start
trips['start_trip'][trips.INBOUND] = trips.end[trips.INBOUND]
trips['end_trip'] = trips.end
trips['end_trip'][trips.INBOUND] = trips.start[trips.INBOUND]
# create a stable (predictable) index based on tour_id and trip_num
possible_trips_count = 2
trips['trip_id'] = (trips.tour_id * possible_trips_count) + (trips.trip_num - 1)
trips.set_index('trip_id', inplace=True, verify_integrity=True)
trip_columns = ['tour_id', 'INBOUND', 'trip_num', 'OTAZ', 'DTAZ', 'start_trip', 'end_trip']
trips = trips[trip_columns]
inject.add_table('trips', trips)
tracing.register_traceable_table('trips', trips)
pipeline.get_rn_generator().add_channel(trips, 'trips')
if trace_hh_id:
tracing.trace_df(trips,
label="trips",
warn_if_empty=True)
def __init__(self, table_name, time_window_df):
self.person_windows_table_name = table_name
self.person_windows_df = time_window_df
self.person_windows = self.person_windows_df.as_matrix()
# series to map person_id to time_window ordinal index
self.row_ix = pd.Series(range(len(time_window_df.index)), index=time_window_df.index)
int_time_windows = [int(c) for c in time_window_df.columns.values]
self.time_ix = pd.Series(range(len(time_window_df.columns)), index=int_time_windows)
self.tdd_intersects_df = inject.get_injectable('tdd_intersects')
self.tdd_windows_df = inject.get_injectable('tdd_windows')
"""
compute logsums for tours using skims for alt_tdd out_period and in_period
"""
trace_label = tracing.extend_trace_label(trace_label, 'logsums')
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
choosers = alt_tdd.join(tours_merged, how='left', rsuffix='_chooser')
logger.info("%s compute_logsums for %d choosers%s alts" %
(trace_label, choosers.shape[0], alt_tdd.shape[0]))
# - setup skims
skim_dict = inject.get_injectable('skim_dict')
skim_stack = inject.get_injectable('skim_stack')
orig_col_name = 'TAZ'
dest_col_name = model_settings.get('DESTINATION_FOR_TOUR_PURPOSE').get(tour_purpose)
odt_skim_stack_wrapper = skim_stack.wrap(left_key=orig_col_name, right_key=dest_col_name,
skim_key='out_period')
dot_skim_stack_wrapper = skim_stack.wrap(left_key=dest_col_name, right_key=orig_col_name,
skim_key='in_period')
od_skim_stack_wrapper = skim_dict.wrap(orig_col_name, dest_col_name)
skims = {
"odt_skims": odt_skim_stack_wrapper,
"dot_skims": dot_skim_stack_wrapper,
"od_skims": od_skim_stack_wrapper,
'orig_col_name': orig_col_name,
'dest_col_name': dest_col_name,