How to use the rq.get_current_job function in rq

To help you get started, we’ve selected a few rq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rq / rq / tests / fixtures.py View on Github external
def modify_self_and_error(meta):
    j = get_current_job()
    j.meta.update(meta)
    j.save()
    return 1 / 0
github Antergos / antbs / antbs / database / build.py View on Github external
else:
            self.version_str = self._pkg_obj.version_str

        pkg_link = '<a href="/package/{0}">{0}</a>'.format(self._pkg_obj.pkgname)

        tpl = 'Build <a href="/build/{0}">{0}</a> for {1} <strong>{2}</strong> started.'

        tlmsg = tpl.format(self.bnum, pkg_link, self.version_str)

        get_timeline_object(msg=tlmsg, tl_type=3, ret=False)

        self._pkg_obj.builds.append(self.bnum)
        status.now_building.append(self.bnum)

        with Connection(self.db):
            current_job = get_current_job()
            current_job.meta['building_num'] = self.bnum
            current_job.save()
github opencv / cvat / cvat / apps / auto_segmentation / views.py View on Github external
def create_thread(tid, labels_mapping, user):
    try:
        # If detected object accuracy bigger than threshold it will returend
        TRESHOLD = 0.5
        # Init rq job
        job = rq.get_current_job()
        job.meta['progress'] = 0
        job.save_meta()
        # Get job indexes and segment length
        db_task = TaskModel.objects.get(pk=tid)
        # Get image list
        image_list = make_image_list(db_task.get_data_dirname())

        # Run auto segmentation by tf
        result = None
        slogger.glob.info("auto segmentation with tensorflow framework for task {}".format(tid))
        result = run_tensorflow_auto_segmentation(image_list, labels_mapping, TRESHOLD)

        if result is None:
            slogger.glob.info('auto segmentation for task {} canceled by user'.format(tid))
            return
github richstoner / tornado-application-framework / app / rqtasks.py View on Github external
def processDropboxImage(files):

    job = get_current_job()

    job.meta['handled_by'] = socket.gethostname()
    job.meta['state'] = 'start'
    job.save()

    print 'Current job: %s' % (job.id)
    #print job.meta

    for file in files:

        import uuid
        url_to_grab = file['link']
        image_path = '/vagrant/app/static/uploads/%s%s' % (uuid.uuid4(), os.path.splitext(file['link'])[1])

        urllib.urlretrieve(url_to_grab,image_path)
        job.meta['state'] = 'download complete'
github produvia / kryptos / core / kryptos / strategy / strategy.py View on Github external
extra_results = self.get_extra_results(context, results)
        except Exception:
            self.log.error("Failed to get extra results")

        for i in self._ml_models:
            i.analyze(self.name, self.state.DATA_FREQ, extra_results)

        # need to catch all exceptions because algo will end either way
        # except Exception as e:
        #     self.log.error("Error during shutdown/analyze()")
        #     self.log.error(str(e))

        try:
            url = outputs.save_analysis_to_storage(self, results)
            if self.in_job:
                job = get_current_job()

                job.meta["analysis_url"] = url
                job.save_meta()
                self.notify(f"You can view your strategy's analysis at {url}")

        except Exception as e:
            raise e
            self.log.error("Failed to upload strat analysis to storage", exec_info=True)

        self.state.dump_to_context(context)
github morlandi / django-task / django_task / job.py View on Github external
def run(job_class, task_class, task_id):

        from django_task.job import job_trace
        from rq import get_current_job
        from django_task.app_settings import REDIS_URL

        job_trace('job.run() enter')
        task = None
        result = 'SUCCESS'
        failure_reason = ''

        try:

            # this raises a "Could not resolve a Redis connection" exception in sync mode
            #job = get_current_job()
            job = get_current_job(connection=redis.Redis.from_url(REDIS_URL))

            # Retrieve task obj and set as Started
            task = task_class.get_task_from_id(task_id)
            task.set_status(status='STARTED', job_id=job.get_id())

            # Execute job passing by task
            job_class.execute(job, task)

        except Exception as e:
            job_trace('ERROR: %s' % str(e))
            job_trace(traceback.format_exc())

            if task:
                task.log(logging.ERROR, str(e))
                task.log(logging.ERROR, traceback.format_exc())
            result = 'FAILURE'
github ucfopen / quiz-extensions / views.py View on Github external
:param extension_dict: A dictionary that includes the percent of
        time and a list of canvas user ids.

        Example:
        {
            'percent': '300',
            'user_ids': [
                '0123456',
                '1234567',
                '9867543',
                '5555555'
            ]
        }
    :type extension_dict: dict
    """
    job = get_current_job()

    update_job(job, 0, "Starting...", "started")

    with app.app_context():
        if not extension_dict:
            update_job(job, 0, "Invalid Request", "failed", error=True)
            logger.warning("Invalid Request: {}".format(extension_dict))
            return job.meta

        try:
            course_json = get_course(course_id)
        except requests.exceptions.HTTPError:
            update_job(job, 0, "Course not found.", "failed", error=True)
            logger.exception("Unable to find course #{}".format(course_id))
            return job.meta