How to use the activitysim.core.tracing.print_elapsed_time function in activitysim

To help you get started, we’ve selected a few activitysim examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ActivitySim / activitysim / activitysim / abm / models / util / cdap.py View on Github external
transpose=False, slicer='NONE')

    # replace slug with coefficient
    d = interaction_coefficients.set_index('slug')['coefficient'].to_dict()
    for c in spec.columns:
        spec[c] =\
            spec[c].map(lambda x: d.get(x, x or 0.0)).fillna(0)

    if trace_spec:
        tracing.trace_df(spec, '%s.hhsize%d_spec_patched' % (trace_label, hhsize),
                         transpose=False, slicer='NONE')

    if cache:
        cache_spec(hhsize, spec)

    t0 = tracing.print_elapsed_time("build_cdap_spec hh_size %s" % hhsize, t0)

    return spec
github ActivitySim / activitysim / example_stride / coalesce.py View on Github external
# from activitysim import abm


logger = logging.getLogger('activitysim')


if __name__ == '__main__':

    inject.add_injectable('configs_dir', ['configs', '../example/configs'])

    config.handle_standard_args()

    mp_tasks.filter_warnings()
    tracing.config_logger()

    t0 = tracing.print_elapsed_time()

    coalesce_rules = config.setting('coalesce')

    mp_tasks.coalesce_pipelines(coalesce_rules['names'], coalesce_rules['slice'], use_prefix=False)

    checkpoints_df = pipeline.get_checkpoints()
    file_path = config.output_file_path('coalesce_checkpoints.csv')
    checkpoints_df.to_csv(file_path, index=True)

    t0 = tracing.print_elapsed_time("everything", t0)
github ActivitySim / activitysim / activitysim / core / mp_tasks.py View on Github external
def run_sub_task(p):
    """
    Run process p synchroneously,

    Return when sub process terminates, or raise error if exitcode is nonzero

    Parameters
    ----------
    p : multiprocessing.Process
    """
    logger.info("running sub_process %s", p.name)

    mem.trace_memory_info("%s.start" % p.name)

    t0 = tracing.print_elapsed_time()
    p.start()

    while multiprocessing.active_children():
        mem.trace_memory_info()
        time.sleep(1)

    # no need to join explicitly since multiprocessing.active_children joins completed procs
    # p.join()

    t0 = tracing.print_elapsed_time('sub_process %s' % p.name, t0)
    # logger.info('%s.exitcode = %s' % (p.name, p.exitcode))

    mem.trace_memory_info("%s.completed" % p.name)

    if p.exitcode:
        logger.error("Process %s returned exitcode %s", p.name, p.exitcode)
github ActivitySim / activitysim / verification / simulation.py View on Github external
data_dir = '/Users/jeff.doyle/work/activitysim-data/mtc_tm1/data'
    data_dir = '../example/data'

    # inject.add_injectable('data_dir', '/Users/jeff.doyle/work/activitysim-data/mtc_tm1/data')
    inject.add_injectable('data_dir', ['ancillary_data', data_dir])
    # inject.add_injectable('data_dir', ['ancillary_data', '../activitysim/abm/test/data'])
    inject.add_injectable('configs_dir', ['configs', '../example/configs'])

    injectables = config.handle_standard_args()

    tracing.config_logger()
    config.filter_warnings()

    log_settings(injectables)

    t0 = tracing.print_elapsed_time()

    # cleanup if not resuming
    if not config.setting('resume_after', False):
        cleanup_output_files()

    run_list = mp_tasks.get_run_list()

    if run_list['multiprocess']:
        # do this after config.handle_standard_args, as command line args may override injectables
        injectables = list(set(injectables) | set(['data_dir', 'configs_dir', 'output_dir']))
        injectables = {k: inject.get_injectable(k) for k in injectables}
    else:
        injectables = None

    run(run_list, injectables)
github ActivitySim / activitysim / activitysim / core / pipeline.py View on Github external
else:
        step_name = model_name
        args = {}

    # check for no_checkpoint prefix
    if step_name[0] == NO_CHECKPOINT_PREFIX:
        step_name = step_name[1:]
        checkpoint = False
    else:
        checkpoint = True

    inject.set_step_args(args)

    t0 = print_elapsed_time()
    orca.run([step_name])
    t0 = print_elapsed_time("run_model step '%s'" % model_name, t0, debug=True)

    inject.set_step_args(None)

    _PIPELINE.rng().end_step(model_name)
    if checkpoint:
        add_checkpoint(model_name)
        t0 = print_elapsed_time("run_model add_checkpoint '%s'" % model_name, t0, debug=True)
    else:
        logger.info("##### skipping %s checkpoint for %s" % (step_name, model_name))
github ActivitySim / activitysim / activitysim / core / mp_tasks.py View on Github external
resume_after = LAST_CHECKPOINT

    pipeline.open_pipeline(resume_after)
    last_checkpoint = pipeline.last_checkpoint()

    if last_checkpoint in models:
        logger.info("Resuming model run list after %s", last_checkpoint)
        models = models[models.index(last_checkpoint) + 1:]

    # preload any bulky injectables (e.g. skims) not in pipeline
    inject.get_injectable('preload_injectables', None)

    t0 = tracing.print_elapsed_time()
    for model in models:

        t1 = tracing.print_elapsed_time()

        try:
            pipeline.run_model(model)
        except Exception as e:
            logger.warning("%s exception running %s model: %s", type(e).__name__, model, str(e),
                           exc_info=True)
            raise e

        queue.put({'model': model, 'time': time.time()-t1})

    tracing.print_elapsed_time("run (%s models)" % len(models), t0)

    pipeline.close_pipeline()
github ActivitySim / activitysim / example_stride / coalesce.py View on Github external
config.handle_standard_args()

    mp_tasks.filter_warnings()
    tracing.config_logger()

    t0 = tracing.print_elapsed_time()

    coalesce_rules = config.setting('coalesce')

    mp_tasks.coalesce_pipelines(coalesce_rules['names'], coalesce_rules['slice'], use_prefix=False)

    checkpoints_df = pipeline.get_checkpoints()
    file_path = config.output_file_path('coalesce_checkpoints.csv')
    checkpoints_df.to_csv(file_path, index=True)

    t0 = tracing.print_elapsed_time("everything", t0)
github ActivitySim / activitysim / activitysim / core / simulate.py View on Github external
expression_values = np.empty((spec.shape[0], choosers.shape[0]))
    for i, expr in enumerate(exprs):
        try:
            if expr.startswith('@'):
                expression_values[i] = eval(expr[1:], globals_dict, locals_dict)
            else:
                expression_values[i] = choosers.eval(expr)
        except Exception as err:
            logger.exception("Variable evaluation failed for: %s" % str(expr))
            raise err

    # - compute_utilities
    utilities = np.dot(expression_values.transpose(), spec.astype(np.float64).values)
    utilities = pd.DataFrame(data=utilities, index=choosers.index, columns=spec.columns)

    t0 = tracing.print_elapsed_time(" eval_utilities", t0)

    if have_trace_targets:

        # get int offsets of the trace_targets (offsets of bool=True values)
        trace_targets = tracing.trace_targets(choosers)
        offsets = np.nonzero(trace_targets)[0]

        # get array of expression_values
        # expression_values.shape = (len(spec), len(choosers))
        # data.shape = (len(spec), len(offsets))
        data = expression_values[:, offsets]

        # columns is chooser index as str
        column_labels = choosers.index[trace_targets].astype(str)
        # index is utility expressions
        index = spec.index
github ActivitySim / activitysim / activitysim / core / mp_tasks.py View on Github external
def idle(seconds):
        # idle for specified number of seconds, monitoring message queue and sub process status
        log_queued_messages()
        check_proc_status()
        mem.trace_memory_info()
        for _ in range(seconds):
            time.sleep(1)
            # log queued messages as they are received
            log_queued_messages()
            # monitor sub process status and drop breadcrumbs or fail_fast as they terminate
            check_proc_status()
            mem.trace_memory_info()

    step_name = step_info['name']

    t0 = tracing.print_elapsed_time()
    logger.info('run_sub_simulations step %s models resume_after %s', step_name, resume_after)

    # if resuming and some processes completed successfully in previous run
    if previously_completed:
        assert resume_after is not None
        assert set(previously_completed).issubset(set(process_names))

        if resume_after == LAST_CHECKPOINT:
            # if we are resuming where previous run left off, then we can skip running
            # any subprocudures that successfully complete the previous run
            process_names = [name for name in process_names if name not in previously_completed]
            logger.info('step %s: skipping %s previously completed subprocedures',
                        step_name, len(previously_completed))
        else:
            # if we are resuming after a specific model, then force all subprocesses to run
            # (assuming if they specified a model, they really want everything after that to run)