How to use the celery.utils.log.get_task_logger function in celery

To help you get started, we’ve selected a few celery examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github CodeTheChangeUBC / reBOOT / app / worker / historical_importer.py View on Github external
import csv
from celery import task
from celery.utils.log import get_task_logger
from dateutil.parser import parse
from django.utils import timezone

from app.constants.item_map import ITEM_MAP
from app.enums import DonationStatusEnum, ItemStatusEnum
from app.models import Donor, Donation, Item, ItemDevice, ItemDeviceType
from app.worker.app_celery import set_complete, update_percent


logger = get_task_logger(__name__)


@task
def historical_importer(csvfile):
    '''
    Takes 10b format file path and imports into the database using the 10x
    format into the appropriate tables

    :param str csvfile: csvfile path
    '''
    item_bulk = []
    row_count, prev_percent = 0, 0

    try:
        read_file = csv.DictReader(csvfile, delimiter=',')
        row_total = sum(1 for line in csv.DictReader(csvfile))
github unicefuganda / eums / eums / elasticsearch / mappings / __init__.py View on Github external
from django.conf import settings
import requests
from rest_framework.status import HTTP_200_OK
from celery.utils.log import get_task_logger

from eums.elasticsearch.mappings.node_mapping import DELIVERY_NODE_MAPPING

logger = get_task_logger(__name__)


def _create_index():
    url = settings.ELASTIC_SEARCH.HOST + settings.ELASTIC_SEARCH.INDEX
    response = requests.put(url)
    if response.status_code != HTTP_200_OK:
        logger.error("Index Creation Failed")


def setup_mappings():
    url = '%s/delivery_node/' % settings.ELASTIC_SEARCH.MAPPING
    try:
        _create_index()
        response = requests.post(url, json=DELIVERY_NODE_MAPPING)
        if response.status_code != HTTP_200_OK:
            logger.error("Mapping Set-up Failed")
github rapidpro / casepro / casepro / msgs / tasks.py View on Github external
from dash.orgs.tasks import org_task
from smartmin.csv_imports.models import ImportTask

from django.conf import settings
from django.db import transaction
from django.utils import timezone

from celery import shared_task
from celery.task import task
from celery.utils.log import get_task_logger

from casepro.utils import parse_csv

from .models import FAQ, Label

logger = get_task_logger(__name__)


@org_task("message-pull", lock_timeout=12 * 60 * 60)
def pull_messages(org, since, until):
    """
    Pulls new unsolicited messages for an org
    """
    backend = org.get_backend()

    # if we're running for the first time, then we'll fetch back to 1 hour ago
    if not since:
        since = until - timedelta(hours=1)

    labels_created, labels_updated, labels_deleted, ignored = backend.pull_labels(org)

    msgs_created, msgs_updated, msgs_deleted, ignored = backend.pull_messages(org, since, until)
github CSCfi / pebbles / pouta_blueprints / tasks.py View on Github external
(Variable.filtered_variables) are required. These are read from Flask config object, as these values
    cannot be modified during the runtime.
    """
    token = get_token()
    pbclient = PBClient(token, flask_config['INTERNAL_API_BASE_URL'], ssl_verify=False)

    return dict([(x['key'], x['value']) for x in pbclient.do_get('variables').json()])


def get_provisioning_queue(instance_id):
    queue_num = ((int(instance_id[-2:], 16) % flask_config['PROVISIONING_NUM_WORKERS']) + 1)
    logger.debug('selected queue %d/%d for %s' % (queue_num, flask_config['PROVISIONING_NUM_WORKERS'], instance_id))
    return 'provisioning_tasks-%d' % queue_num


logger = get_task_logger(__name__)
if flask_config['DEBUG']:
    logger.setLevel('DEBUG')


class TaskRouter(object):
    def route_for_task(self, task, args=None, kwargs=None):
        if task in (
                "pouta_blueprints.tasks.send_mails",
                "pouta_blueprints.tasks.periodic_update",
                "pouta_blueprints.tasks.send_mails",
                "pouta_blueprints.tasks.publish_plugins",
                "pouta_blueprints.tasks.housekeeping",
        ):
            return {'queue': 'system_tasks'}

        if task == "pouta_blueprints.tasks.update_user_connectivity":
github mvantellingen / localshop / src / localshop / apps / packages / tasks.py View on Github external
import logging
import mimetypes
import os

import requests
from celery.utils.log import get_task_logger
from django.conf import settings
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.utils.timezone import now

from localshop.apps.packages import forms, models, pypi
from localshop.apps.packages.utils import md5_hash_file
from localshop.celery import app
from localshop.utils import no_duplicates

logger = get_task_logger(__name__)


@app.task(ignore_result=True)
def refresh_repository_mirrors():
    qs = (
        models.Repository.objects
        .filter(enable_auto_mirroring=True)
        .values_list('pk', flat=True))

    for pk in qs:
        refresh_repository(pk)


@app.task(ignore_result=True)
def refresh_repository(repository_pk):
    repository = models.Repository.objects.get(pk=repository_pk)
github zenodo / zenodo / zenodo / modules / github / tasks.py View on Github external
from invenio.modules.webhooks.models import Event
from invenio.modules.oauth2server.models import Token as ProviderToken
from invenio.modules.oauthclient.client import oauth
#from invenio.ext.email import send_email
#from invenio.config import CFG_SITE_ADMIN_EMAIL
#from invenio.ext.template import render_template_to_string
#from invenio.modules.accounts.models import User


from .helpers import get_account, get_api
from .upload import upload
from .utils import submitted_deposition, get_zenodo_json, is_valid_sender, \
    get_contributors, init_api, revoke_token, remove_hook, get_owner


logger = get_task_logger(__name__)


@celery.task(ignore_result=True)
def disconnect_github(remote_app, access_token, extra_data):
    """ Uninstall webhooks. """
    # Note at this point the remote account and all associated data have
    # already been deleted. The celery task is passed the access_token and
    # extra_data to make some last cleanup and afterwards delete itself
    # remotely.
    remote = oauth.remote_apps[remote_app]

    try:
        gh = init_api(access_token)

        # Remove all installed hooks.
        for full_name, repo in six.iteritems(extra_data["repos"]):
github l3p-cv / lost / backend / lost / logic / pipeline / cron.py View on Github external
def celery_exec_script(pipe_element_id):
    try:
        # Collect context information for celery task
        logger = get_task_logger(__name__)
        lostconfig = LOSTConfig()
        dbm = DBMan(lostconfig)
        pipe_e = dbm.get_pipe_element(pipe_e_id=pipe_element_id)
        worker = CurrentWorker(dbm, lostconfig)
        if not worker.enough_resources(pipe_e.script):
            logger.warning('Not enough resources! Rejected {} (PipeElement ID {})'.format(pipe_e.script.path, pipe_e.idx))
            return
        pipe_e.state = state.PipeElement.IN_PROGRESS
        dbm.save_obj(pipe_e)
        file_man = FileMan(lostconfig)
        pipe = pipe_e.pipe

        cmd = gen_run_cmd("pudb3", pipe_e, lostconfig)
        debug_script_path = file_man.get_instance_path(pipe_e)
        debug_script_path = os.path.join(debug_script_path, 'debug.sh')
        with open(debug_script_path, 'w') as sfile:
github GrafeasGroup / tor / tor / role_moderator / tasks.py View on Github external
format_bot_response as _,
    message_link,
    post_comment,
    responses as bot_msg,
)
from tor.task_base import Task, InvalidUser

from celery.utils.log import get_task_logger
from celery import current_app as app, signature
from praw.models import Comment

import re
import textwrap


log = get_task_logger(__name__)


MOD_SUPPORT_PHRASES = [
    re.compile("fuck", re.IGNORECASE),
    re.compile("unclaim", re.IGNORECASE),
    re.compile("undo", re.IGNORECASE),
    re.compile("(?:good|bad) bot", re.IGNORECASE),
]


@app.task(bind=True, ignore_result=True, base=Task)
def check_inbox(self):
    """
    Checks all unread messages in the inbox, routing the responses to other queues. This
    effectively transfers tasks from Reddit's inbox to our internal task queuing system,
    reducing the required API calls.
github lavalamp- / ws-backend-community / tasknode / tasks / scanning / services / inspection / web / base.py View on Github external
from ......app import websight_app
from .....base import ServiceTask, WebServiceTask, DatabaseTask, NetworkServiceTask
from .virtualhost import discover_virtual_hosts_for_web_service
from wselasticsearch.ops import get_supported_ssl_version_for_service, update_web_service_scan_latest, \
    update_web_service_scan_not_latest, get_virtual_hosts_from_network_service_scan
from .crawling import crawl_web_service
from .imaging import screenshot_web_service
from .analysis import create_report_for_web_service_scan
from .fingerprinting import enumerate_user_agent_fingerprints_for_web_service
from wselasticsearch.ops import get_all_domains_for_ip_address
from lib.sqlalchemy.ops import get_latest_web_service_scan_uuid, check_web_service_scanning_status, \
    update_web_service_scanning_status as update_web_service_scanning_status_op, get_all_web_flags_for_organization
from lib import ConfigManager
from wselasticsearch.flags import DataFlagger

logger = get_task_logger(__name__)
config = ConfigManager.instance()


#USED
@websight_app.task(bind=True, base=NetworkServiceTask)
def inspect_http_service(
        self,
        org_uuid=None,
        network_service_scan_uuid=None,
        network_service_uuid=None,
        order_uuid=None,
):
    """
    Inspect the HTTP service running on the given network service on behalf of the given
    organization and network service scan.
    :param org_uuid: The UUID of the organization to inspect the HTTP service on behalf of.
github MacroConnections / DIVE-backend / dive / tasks / transformation / reduce.py View on Github external
import os
import pandas as pd
from flask import current_app

from dive.db import db_access
from dive.data.access import get_data
from dive.task_core import celery, task_app
from dive.tasks.pipelines import ingestion_pipeline
from dive.tasks.ingestion.upload import save_dataset
from dive.tasks.transformation.utilities import get_transformed_file_name

from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)


def reduce_dataset(project_id, dataset_id, column_ids_to_keep, new_dataset_name_prefix):
    df = get_data(project_id=project_id, dataset_id=dataset_id)

    with task_app.app_context():
        project = db_access.get_project(project_id)
        original_dataset = db_access.get_dataset(project_id, dataset_id)

    preloaded_project = project.get('preloaded', False)
    if preloaded_project:
        project_dir = os.path.join(current_app.config['PRELOADED_DIR'], project['directory'])
    else:
        project_dir = os.path.join(current_app.config['UPLOAD_DIR'], str(project_id))

    original_dataset_title = original_dataset['title']