How to use the daiquiri.setup function in daiquiri

To help you get started, we’ve selected a few daiquiri examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github callowayproject / Transmogrify / transmogrify / filesystem / s3.py View on Github external
import logging
import boto3
import daiquiri

daiquiri.setup(level=logging.INFO)
logger = daiquiri.getLogger(__name__)


def _parse_s3_file(original_file):
    """
    Convert `s3://bucketname/path/to/file.txt` to ('bucketname', 'path/to/file.txt')
    """
    bits = original_file.replace('s3://', '').split("/")
    bucket = bits[0]
    object_key = "/".join(bits[1:])
    return bucket, object_key


def file_exists(original_file):
    """
    Validate the original file is in the S3 bucket
github jd / daiquiri / examples / output.py View on Github external
import logging
import sys

import daiquiri

# Log both to stdout and as JSON in a file called /dev/null. (Requires
# `python-json-logger`)
daiquiri.setup(level=logging.INFO, outputs=(
    daiquiri.output.Stream(sys.stdout),
    daiquiri.output.File("/dev/null",
                         formatter=daiquiri.formatter.JSON_FORMATTER),
    ))

logger = daiquiri.getLogger(__name__, subsystem="example")
logger.info("It works and log to stdout and /dev/null with JSON")
github alin23 / spfy / spfy / log.py View on Github external
def get_logger(level=logging.INFO):
    logdir = pathlib.Path.home().joinpath('.logs')
    if not logdir.exists():
        logdir.mkdir(parents=True)

    daiquiri.setup(outputs=(
        daiquiri.output.STDERR,
        daiquiri.output.File(directory=logdir)
    ), level=level)

    logger = daiquiri.getLogger()

    if os.getenv('FIRE_DEBUG'):
        logger.setLevel(logging.DEBUG)

    return logger
github tskit-dev / tsinfer / evaluation.py View on Github external
def setup_logging(args):
    log_level = "WARN"
    if args.verbosity > 0:
        log_level = "INFO"
    if args.verbosity > 1:
        log_level = "DEBUG"
    if args.log_section is None:
        daiquiri.setup(level=log_level)
    else:
        daiquiri.setup(level="WARN")
        logger = logging.getLogger(args.log_section)
        logger.setLevel(log_level)
github fabric8-analytics / fabric8-analytics-stack-analysis / analytics_platform / kronos / src / kronos_offline_training.py View on Github external
from analytics_platform.kronos.gnosis.src.offline_training import (
    generate_and_save_gnosis_package_topic_model_s3, train_and_save_gnosis_ref_arch_s3)
from analytics_platform.kronos.softnet.src.offline_training import (
    generate_and_save_kronos_dependency_s3, generate_and_save_cooccurrence_matrices_s3)
from analytics_platform.kronos.pgm.src.offline_training import train_and_save_kronos_list_s3
from analytics_platform.kronos.apollo.src.offline_training import (
    train_and_save_pruned_tag_list_s3,
    generate_and_save_package_frequency_dict_s3)

import sys
import time
import daiquiri
import logging

daiquiri.setup(level=logging.INFO)
_logger = daiquiri.getLogger(__name__)


if __name__ == '__main__':
    if len(sys.argv) < 2:
        training_data_url = "s3://dev-stack-analysis-clean-data/maven/github/"
        fp_min_support_count = 45
        fp_intent_topic_count_threshold = 3
        fp_num_partition = 12
        _logger.info("No env provided, using default")
    else:
        training_data_url = sys.argv[1]
        fp_min_support_count = int(sys.argv[2])
        fp_intent_topic_count_threshold = int(sys.argv[3])
        fp_num_partition = int(sys.argv[4])
        _logger.info("Env Provided")
github CermakM / jupyter-require / jupyter_require / __init__.py View on Github external
from .notebook import link_css
from .notebook import link_js
from .notebook import load_js
from .notebook import load_css

from .core import communicate

from .core import execute_with_requirements
from .core import execute
from .core import safe_execute
from .core import require

from IPython import get_ipython

daiquiri.setup(
    level=logging.DEBUG,
    outputs=[
        daiquiri.output.File(
            level=logging.DEBUG,
            filename='.log',
            formatter=daiquiri.formatter.ColorFormatter(
                fmt="%(asctime)s [%(process)d] %(color)s%(levelname)-8.8s %(name)s:"
                    "%(lineno)d: [JupyterRequire] %(message)s%(color_stop)s"
            )),
        daiquiri.output.Stream(
            level=logging.WARN,
            formatter=daiquiri.formatter.ColorFormatter(
                fmt="%(asctime)s [%(process)d] %(color)s%(levelname)-8.8s %(name)s:"
                    "%(lineno)d: [JupyterRequire] %(message)s%(color_stop)s"
            )
        ),
github yhyu13 / AlphaGOZero-python-tensorflow / model / alphagozero_resnet_model.py View on Github external
from model.resnet_model import *

import logging
import daiquiri

daiquiri.setup(level=logging.DEBUG)
logger = daiquiri.getLogger(__name__)


class AlphaGoZeroResNet(ResNet):

    def __init__(self, hps, images, labels, zs, mode):
        self.zs = zs
        self.training = tf.placeholder(tf.bool)
        super(AlphaGoZeroResNet, self).__init__(hps, images, labels, mode)

    # override _batch_norm
    def _batch_norm(self, name, x):
        """Batch normalization."""
        with tf.variable_scope(name):
            params_shape = [x.get_shape()[-1]]
github robdmc / crontabs / crontabs / crontabs.py View on Github external
"""
Module for manageing crontabs interface
"""
import datetime
import time
import traceback

import daiquiri
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from fleming import fleming
from .processes import ProcessMonitor, wrapped_target

import logging
daiquiri.setup(level=logging.INFO)


class Cron:
    def __init__(self):
        """
        A Cron object runs many "tabs" of asynchronous tasks.
        """
        self.monitor = ProcessMonitor()
        self._tab_list = []

    def schedule(self, *tabs):
        self._tab_list = list(tabs)
        # Give every tab access to the process monitor
        for tab in self._tab_list:
            tab.monitor = self.monitor
        return self
github yhyu13 / AlphaGOZero-python-tensorflow / Network.py View on Github external
import tensorflow as tf
import numpy as np
import os
import sys

import logging
import daiquiri

daiquiri.setup(level=logging.DEBUG)
logger = daiquiri.getLogger(__name__)

from model.alphagozero_resnet_model import AlphaGoZeroResNet
from model.alphagozero_resnet_elu_model import AlphaGoZeroResNetELU
from model.alphagozero_resnet_full_model import AlphaGoZeroResNetFULL


class Network:

    """
    funcs:
        @ Build graph.
        @ Training
        @ Testing
        @ Evaluating
        usage: Working with multiple Graphs