How to use the esper.prelude.log.info function in esper

To help you get started, we’ve selected a few esper examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github scanner-research / esper-tv / app / esper / scanner_bench.py View on Github external
def run_config(args, db_wrapper, job_config):
        db = db_wrapper.db

        # Start the Scanner job
        log.info('Starting Scanner job')

        run_opts = {
            'io_packet_size': job_config.io_packet_size,
            'work_packet_size': job_config.work_packet_size,
        }
        ppw = job_config.pipelines_per_worker
        if ppw != -1:
            run_opts['pipeline_instances_per_node'] = ppw

        run_pipeline(db, detach=True, run_opts=run_opts, **args)

        # Wait until it succeeds or crashes
        start = now()
        log.info('Monitoring cluster')
        result, metrics = db_wrapper.cluster.monitor(db)
        end = now() - start
github scanner-research / esper-tv / app / esper / scanner_bench.py View on Github external
def try_config(job_config):
                    log.info('Job config: {}'.format(job_config))
                    try:
                        return test_config(
                            args, db_wrapper, cluster_config, job_config)
                    except TestFailure as e:
                        print(e)
                        return (str(e), None)
                    except Exception as e:
                        traceback.print_exc()
                        return (traceback.format_exc(), None)
github scanner-research / esper-tv / app / esper / scanner_bench.py View on Github external
price_per_video = (time / 3600.0) * price_per_hour / float(sample_size)
            return price_per_video, metrics
        else:
            return None

    results = []

    for (cluster_config, job_configs) in configs:

        # Only bring up the cluster if there exists a job config that hasn't been computed
        if not force and all([pcache.has(run_name(cluster_config, job_config)) for job_config in job_configs]):
            results.append([pcache.get(run_name(cluster_config, job_config)) for job_config in job_configs])

        else:
            with make_cluster(cluster_config, no_delete=no_delete) as db_wrapper:
                log.info('Cluster config: {}'.format(cluster_config))

                def try_config(job_config):
                    log.info('Job config: {}'.format(job_config))
                    try:
                        return test_config(
                            args, db_wrapper, cluster_config, job_config)
                    except TestFailure as e:
                        print(e)
                        return (str(e), None)
                    except Exception as e:
                        traceback.print_exc()
                        return (traceback.format_exc(), None)

                def try_config_cached(job_config):
                    return pcache.get(run_name(cluster_config, job_config), force=force, fn=lambda: try_config(job_config))
github scanner-research / esper-tv / app / esper / scanner_bench.py View on Github external
# Start the Scanner job
        log.info('Starting Scanner job')

        run_opts = {
            'io_packet_size': job_config.io_packet_size,
            'work_packet_size': job_config.work_packet_size,
        }
        ppw = job_config.pipelines_per_worker
        if ppw != -1:
            run_opts['pipeline_instances_per_node'] = ppw

        run_pipeline(db, detach=True, run_opts=run_opts, **args)

        # Wait until it succeeds or crashes
        start = now()
        log.info('Monitoring cluster')
        result, metrics = db_wrapper.cluster.monitor(db)
        end = now() - start

        # If we crashed:
        if not result:

            # Restart the cluster if it's in a bad state
            db_wrapper.cluster.start()

            raise TestFailure("Out of memory")

        # Write out profile if run succeeded
        outputs = run_pipeline(db, no_execute=True, **args)
        try:
            outputs[0]._column._table.profiler().write_trace(
                '/app/data/traces/{}.trace'.format(run_name(db_wrapper.cluster.config(), job_config)))