How to use the celery.chain function in celery

To help you get started, we’ve selected a few celery examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github EventKit / eventkit-cloud / eventkit_cloud / tasks / debug_tasks.py View on Github external
def test_chain(self, subtask_queuename=None):
    """ Returns the AsyncResult of a chain of 4 test_task queued on @subtask_queuename.
    """
    assert subtask_queuename is not None

    logger.info('TestChain subtask_queuename: {}'.format(subtask_queuename))
    delivery_info = self.request.delivery_info
    logger.info('TestChain delivery_info: {}'.format(delivery_info))

    test_tasks = [test_task.s(0).set(queue=subtask_queuename)]
    test_tasks.extend([test_task.s().set(queue=subtask_queuename) for i in range(3)])
    tc = celery.chain(test_tasks)
    res = tc.delay()

    return res
github mozilla / addons-server / apps / devhub / utils.py View on Github external
self.prev_file = self.find_previous_version(addon_data['version'])
            if self.prev_file:
                # Group both tasks so the results can be merged when
                # the jobs complete.
                validate = group((validate,
                                  self.validate_file(self.prev_file)))

        # Fallback error handler to save a set of exception results, in case
        # anything unexpected happens during processing.
        on_error = save.subtask([amo.VALIDATOR_SKELETON_EXCEPTION, file_.pk],
                                {'annotate': False}, immutable=True)

        # When the validation jobs complete, pass the results to the
        # appropriate annotate/save task for the object type.
        self.task = chain(validate, save.subtask([file_.pk],
                                                 link_error=on_error))

        # Create a cache key for the task, so multiple requests to
        # validate the same object do not result in duplicate tasks.
        opts = file_._meta
        self.cache_key = 'validation-task:{0}.{1}:{2}:{3}'.format(
            opts.app_label, opts.object_name, file_.pk, listed)
github pilosus / PilosusBot / PilosusBot / tasks.py View on Github external
def celery_chain(parsed_update):
    """
    Celery chain of tasks, one task in the chain is executed after the previous one is done.

    :param parsed_update: dict
    :return: dict (with 'status_code' and 'status' keys)
    """
    chain_result = chain(assess_message_score.s(parsed_update),
                         select_db_sentiment.s(),
                         send_message_to_chat.s()).apply_async()
    return chain_result
github ceos-seo / data_cube_ui / apps / spectral_indices / tasks.py View on Github external
def run(task_id=None):
    """Responsible for launching task processing using celery asynchronous processes

    Chains the parsing of parameters, validation, chunking, and the start to data processing.
    """
    return chain(parse_parameters_from_task.s(task_id=task_id),
                 validate_parameters.s(task_id=task_id),
                 perform_task_chunking.s(task_id=task_id),
                 start_chunk_processing.s(task_id=task_id))()
github DDMAL / Rodan / rodan / helpers / workflow.py View on Github external
# this is the workflow.
    for page in pages:
        workflow_chain = []
        for workflow_job in workflow_jobs:
            is_interactive = False if workflow_job.job_type == 0 else True
            runjob = RunJob(workflow_run=workflow_run,
                            workflow_job=workflow_job,
                            job_settings=workflow_job.job_settings,  # copy the most recent settings from the workflow job (these may be modified if the job is interactive)
                            needs_input=is_interactive,      # by default this is set to be True if the job is interactive
                            page=page)
            runjob.save()

            rodan_task = registry.tasks[str(workflow_job.job_name)]
            workflow_chain.append((rodan_task, str(runjob.uuid)))
        first_job = workflow_chain[0]
        res = chain([first_job[0].si(None, first_job[1])] + [job[0].s(job[1]) for job in workflow_chain[1:]])
        res.apply_async()
        return_objects.append(res)

    # finally, update the run_num with the most recent run
    if not testing:
        workflow.runs = run_num
        workflow.save()

    return return_objects
github mozilla / build-relengapi / relengapi / blueprints / slaveloan / task_groups.py View on Github external
def group(*args, **kwargs):
    """Celery fails to chain two disparate groups together

    e.g. it starts groupB before groupA has finished in:
    groupA = group(...)
    groupB = group(...)
    all_tasks = chain(groupA, groupB)

    By chaining a group with a `do nothing` task we can achieve the desired affect
    This function replaces our use of group for simplicity
    c.f. http://stackoverflow.com/questions/15123772/
    """
    return chain(group_(*args, **kwargs),
                 tasks.dummy_task.si())
github pomo-mondreganto / ForcAD / backend / celery_tasks / tasks.py View on Github external
def run_checker(team_json, task_json, round):
    """Run check, put and get"""
    chained = chain(
        check_action.s(team_json, task_json, round),
        put_action.s(team_json, task_json, round),
        get_action.s(team_json, task_json, round),
    )

    chained.apply_async()
github google / timesketch / timesketch / lib / tasks.py View on Github external
file_path, timeline_name, index_name, file_extension)

    if only_index:
        return index_task

    if sketch_id:
        sketch_analyzer_chain = build_sketch_analysis_pipeline(
            sketch_id, searchindex.id, user_id=None)

    # If there are no analyzers just run the indexer.
    if not index_analyzer_chain and not sketch_analyzer_chain:
        return index_task

    if sketch_analyzer_chain:
        if not index_analyzer_chain:
            return chain(
                index_task, run_sketch_init.s(), sketch_analyzer_chain)
        return chain(
            index_task, index_analyzer_chain, run_sketch_init.s(),
            sketch_analyzer_chain)

    if current_app.config.get('ENABLE_EMAIL_NOTIFICATIONS'):
        return chain(
            index_task,
            index_analyzer_chain,
            run_email_result_task.s()
        )

    return chain(index_task, index_analyzer_chain)
github jumpserver / jumpserver / apps / ops / tasks.py View on Github external
def add_m(x):
    from celery import chain
    a = range(x)
    b = [a[i:i + 10] for i in range(0, len(a), 10)]
    s = list()
    s.append(add.s(b[0], b[1]))
    for i in b[1:]:
        s.append(add.s(i))
    res = chain(*tuple(s))()
    return res
github ESGF / esgf-compute-wps / compute / wps / backends / cdat.py View on Github external
axes_sig = '-'.join(axes.values)
        temp_paths = []
        process_paths = []
        process_chains = []

        for paths, task in ingress:
            temp_paths.extend(paths)

            filename = '{}-{:08}-{}.nc'.format(op.name, index, axes_sig)

            index += 1

            process_paths.append(os.path.join(settings.WPS_INGRESS_PATH,
                                              filename))
            
            process_chains.append(celery.chain(task, process.s(paths, op,
                                                               var.var_name,
                                                               base_units,
                                                               chunk_axis,
                                                               axes.values,
                                                               process_paths[-1],
                                                               job_id=job.id).set(
                                                                   **helpers.DEFAULT_QUEUE)))

        job.steps_inc_total((len(process_chains)*2)+1)

        return {
            'temp_paths': temp_paths,
            'cache': cache,
            'chunk_axis': chunk_axis,
            'process_paths': process_paths,
            'process_chains': process_chains,