Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Returns
-------
choices - pandas.Series
destination alt chosen
"""
trace_label = tracing.extend_trace_label(trace_label, 'trip_destination_simulate')
spec = get_spec_for_purpose(model_settings, 'DESTINATION_SPEC', primary_purpose)
alt_dest_col_name = model_settings["ALT_DEST"]
logger.info("Running trip_destination_simulate with %d trips", len(trips))
locals_dict = config.get_model_constants(model_settings).copy()
locals_dict.update({
'size_terms': size_term_matrix
})
locals_dict.update(skims)
destinations = interaction_sample_simulate(
choosers=trips,
alternatives=destination_sample,
spec=spec,
choice_column=alt_dest_col_name,
allow_zero_probs=True, zero_prob_choice_val=NO_DESTINATION,
skims=skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label,
trace_choice_name='trip_dest')
joint_tours = joint_tours.sort_index()
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
logger.info("Running joint_tour_destination_simulate with %d joint_tours" %
joint_tours.shape[0])
# create wrapper with keys for this lookup - in this case there is a TAZ in the choosers
# and a TAZ in the alternatives which get merged during interaction
# the skims will be available under the name "skims" for any @ expressions
skims = skim_dict.wrap("TAZ", alt_dest_col_name)
locals_d = {
'skims': skims,
}
constants = config.get_model_constants(model_settings)
if constants is not None:
locals_d.update(constants)
# choosers are tours - in a sense tours are choosing their destination
choosers = pd.merge(joint_tours, households_merged,
left_on='household_id', right_index=True, how='left')
# FIXME - MEMORY HACK - only include columns actually used in spec
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
choosers = choosers[chooser_columns]
choices_list = []
# segment by trip type and pick the right spec for each person type
# for tour_type, choosers_segment in choosers.groupby('tour_type'):
for tour_type, tour_type_id in iteritems(TOUR_TYPE_ID):
locals_d['segment'] = tour_type
"""
CDAP stands for Coordinated Daily Activity Pattern, which is a choice of
high-level activity pattern for each person, in a coordinated way with other
members of a person's household.
Because Python requires vectorization of computation, there are some specialized
routines in the cdap directory of activitysim for this purpose. This module
simply applies those utilities using the simulation framework.
"""
trace_label = 'cdap'
model_settings = config.read_model_settings('cdap.yaml')
persons_merged = persons_merged.to_frame()
constants = config.get_model_constants(model_settings)
logger.info("Running cdap_simulate with %d persons", len(persons_merged.index))
choices = run_cdap(persons=persons_merged,
cdap_indiv_spec=cdap_indiv_spec,
cdap_interaction_coefficients=cdap_interaction_coefficients,
cdap_fixed_relative_proportions=cdap_fixed_relative_proportions,
locals_d=constants,
chunk_size=chunk_size,
trace_hh_id=trace_hh_id,
trace_label=trace_label)
# - assign results to persons table and annotate
persons = persons.to_frame()
choices = choices.reindex(persons.index)
sample_size = model_settings["SAMPLE_SIZE"]
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
logger.info("Running %s with %d persons" % (trace_label, len(choosers.index)))
# create wrapper with keys for this lookup - in this case there is a TAZ in the choosers
# and a TAZ in the alternatives which get merged during interaction
# (logit.interaction_dataset suffixes duplicate chooser column with '_chooser')
# the skims will be available under the name "skims" for any @ expressions
skims = skim_dict.wrap('TAZ_chooser', 'TAZ')
locals_d = {
'skims': skims,
'segment_size': segment_name
}
constants = config.get_model_constants(model_settings)
if constants is not None:
locals_d.update(constants)
choices = interaction_sample(
choosers,
alternatives,
sample_size=sample_size,
alt_col_name=alt_dest_col_name,
spec=spec_for_segment(model_spec, segment_name),
skims=skims,
locals_d=locals_d,
chunk_size=chunk_size,
trace_label=trace_label)
return choices
persons_merged,
skim_dict,
destination_size_terms,
chunk_size, trace_hh_id):
trace_label = 'atwork_subtour_location_sample'
model_settings = config.read_model_settings('atwork_subtour_destination.yaml')
model_spec = simulate.read_model_spec(file_name='atwork_subtour_destination_sample.csv')
# merge persons into tours
choosers = pd.merge(tours, persons_merged, left_on='person_id', right_index=True)
# FIXME - MEMORY HACK - only include columns actually used in spec
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
choosers = choosers[chooser_columns]
constants = config.get_model_constants(model_settings)
sample_size = model_settings["SAMPLE_SIZE"]
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
logger.info("Running atwork_subtour_location_sample with %d tours", len(choosers))
# create wrapper with keys for this lookup - in this case there is a workplace_taz
# in the choosers and a TAZ in the alternatives which get merged during interaction
# the skims will be available under the name "skims" for any @ expressions
skims = skim_dict.wrap('workplace_taz', 'TAZ')
locals_d = {
'skims': skims
}
if constants is not None:
locals_d.update(constants)
result_list.append(purpose)
logger.info("assign purpose to %s last outbound trips", purpose.shape[0])
# - last trip of inbound tour gets home (or work for atwork subtours)
purpose = trips_df.primary_purpose[last_trip & ~trips_df.outbound]
purpose = pd.Series(np.where(purpose == 'atwork', 'Work', 'Home'), index=purpose.index)
result_list.append(purpose)
logger.info("assign purpose to %s last inbound trips", purpose.shape[0])
# - intermediate stops (non-last trips) purpose assigned by probability table
trips_df = trips_df[~last_trip]
logger.info("assign purpose to %s intermediate trips", trips_df.shape[0])
preprocessor_settings = model_settings.get('preprocessor', None)
if preprocessor_settings:
locals_dict = config.get_model_constants(model_settings)
expressions.assign_columns(
df=trips_df,
model_settings=preprocessor_settings,
locals_dict=locals_dict,
trace_label=trace_label)
rows_per_chunk = \
trip_purpose_rpc(chunk_size, trips_df, probs_spec, trace_label=trace_label)
logger.info("%s rows_per_chunk %s num_choosers %s" %
(trace_label, rows_per_chunk, len(trips_df.index)))
for i, num_chunks, trips_chunk in chunk.chunked_choosers(trips_df, rows_per_chunk):
logger.info("Running chunk %s of %s size %d", i, num_chunks, len(trips_chunk))
alt identifier (dest_taz) from alternatives[]
prob: float
the probability of the chosen alternative
pick_count : int
number of duplicate picks for chooser, alt
"""
trace_label = tracing.extend_trace_label(trace_label, 'trip_destination_sample')
spec = get_spec_for_purpose(model_settings, 'DESTINATION_SAMPLE_SPEC', primary_purpose)
sample_size = model_settings["SAMPLE_SIZE"]
alt_dest_col_name = model_settings["ALT_DEST"]
logger.info("Running %s with %d trips", trace_label, trips.shape[0])
locals_dict = config.get_model_constants(model_settings).copy()
locals_dict.update({
'size_terms': size_term_matrix
})
locals_dict.update(skims)
destination_sample = interaction_sample(
choosers=trips,
alternatives=alternatives,
sample_size=sample_size,
alt_col_name=alt_dest_col_name,
allow_zero_probs=True,
spec=spec,
skims=skims,
locals_d=locals_dict,
chunk_size=chunk_size,
trace_label=trace_label)
locals_dict = {
'person_time_window_overlap': person_time_window_overlap,
'persons': persons_merged
}
expressions.assign_columns(
df=candidates,
model_settings=preprocessor_settings,
locals_dict=locals_dict,
trace_label=trace_label)
# - simple_simulate
nest_spec = config.get_logit_model_settings(model_settings)
constants = config.get_model_constants(model_settings)
choices = simulate.simple_simulate(
choosers=candidates,
spec=model_spec,
nest_spec=nest_spec,
locals_d=constants,
chunk_size=chunk_size,
trace_label=trace_label,
trace_choice_name='participation',
custom_chooser=participants_chooser)
# choice is boolean (participate or not)
choice_col = model_settings.get('participation_choice', 'participate')
assert choice_col in model_spec.columns, \
"couldn't find participation choice column '%s' in spec"
PARTICIPATE_CHOICE = model_spec.columns.get_loc(choice_col)
left_on='person_id', right_index=True, how='left')
# FIXME - MEMORY HACK - only include columns actually used in spec
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
choosers = choosers[chooser_columns]
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
origin_col_name = model_settings['CHOOSER_ORIG_COL_NAME']
# alternatives are pre-sampled and annotated with logsums and pick_count
# but we have to merge size_terms column into alt sample list
destination_sample['size_term'] = \
reindex(destination_size_terms.size_term, destination_sample[alt_dest_col_name])
tracing.dump_df(DUMP, destination_sample, trace_label, 'alternatives')
constants = config.get_model_constants(model_settings)
logger.info("Running tour_destination_simulate with %d persons", len(choosers))
# create wrapper with keys for this lookup - in this case there is a TAZ in the choosers
# and a TAZ in the alternatives which get merged during interaction
# the skims will be available under the name "skims" for any @ expressions
skims = skim_dict.wrap(origin_col_name, alt_dest_col_name)
locals_d = {
'skims': skims,
}
if constants is not None:
locals_d.update(constants)
tracing.dump_df(DUMP, choosers, trace_label, 'choosers')