How to use the rq.registry.StartedJobRegistry function in rq

To help you get started, we’ve selected a few rq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rq / rq / tests / test_worker.py View on Github external
def test_statistics(self):
        """Successful and failed job counts are saved properly"""
        queue = Queue()
        job = queue.enqueue(div_by_zero)
        worker = Worker([queue])
        worker.register_birth()

        self.assertEqual(worker.failed_job_count, 0)
        self.assertEqual(worker.successful_job_count, 0)
        self.assertEqual(worker.total_working_time, 0)

        registry = StartedJobRegistry(connection=worker.connection)
        job.started_at = utcnow()
        job.ended_at = job.started_at + timedelta(seconds=0.75)
        worker.handle_job_failure(job)
        worker.handle_job_success(job, queue, registry)

        worker.refresh()
        self.assertEqual(worker.failed_job_count, 1)
        self.assertEqual(worker.successful_job_count, 1)
        self.assertEqual(worker.total_working_time, 1.5)  # 1.5 seconds

        worker.handle_job_failure(job)
        worker.handle_job_success(job, queue, registry)

        worker.refresh()
        self.assertEqual(worker.failed_job_count, 2)
        self.assertEqual(worker.successful_job_count, 2)
github f0cker / crackq / utils / rq_queryqueue.py View on Github external
import rq
import sys

from rq import use_connection, Queue
from redis import Redis

if len(sys.argv) < 2:
    print('Usage: ./{} ')
    exit(1)

redis_con = Redis('redis', 6379)
redis_q = Queue(sys.argv[1], connection=redis_con)

base = rq.registry.BaseRegistry(sys.argv[1],
                                         connection=redis_con)
started = rq.registry.StartedJobRegistry(sys.argv[1],
                                         connection=redis_con)
failed = rq.registry.FailedJobRegistry(sys.argv[1],
                                       connection=redis_con)
comp = rq.registry.FinishedJobRegistry(sys.argv[1],
                                       connection=redis_con)
comp_list = comp.get_job_ids()
cur_list = started.get_job_ids()
failed_list = failed.get_job_ids()
queue = redis_q.job_ids

print('Complete: {}'.format(comp_list))
print('Failed: {}'.format(failed_list))
print('Current: {}'.format(cur_list))
print('Queued: {}'.format(queue))
github DistriNet / tranco-list / job_handler.py View on Github external
def current_jobs(self):
        """ Track currently active and queued jobs """
        registry = StartedJobRegistry(queue=self.generate_queue)
        jobs = registry.get_job_ids() + self.current_jobs()

        return jobs
github rq / rq / rq / worker.py View on Github external
def perform_job(self, job, queue, heartbeat_ttl=None):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job, heartbeat_ttl)
        push_connection(self.connection)

        started_job_registry = StartedJobRegistry(job.origin,
                                                  self.connection,
                                                  job_class=self.job_class)

        try:
            job.started_at = utcnow()
            timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT
            with self.death_penalty_class(timeout, JobTimeoutException, job_id=job.id):
                rv = job.perform()

            job.ended_at = utcnow()

            # Pickle the result in the same try-except block since we need
            # to use the same exc handling when pickling fails
            job._result = rv
            self.handle_job_success(job=job,
                                    queue=queue,
github rq / rq / rq / worker.py View on Github external
"""Performs misc bookkeeping like updating states prior to
        job execution.
        """
        if job.timeout == -1:
            timeout = -1
        else:
            timeout = job.timeout or 180

        if heartbeat_ttl is None:
            heartbeat_ttl = self.job_monitoring_interval + 5

        with self.connection.pipeline() as pipeline:
            self.set_state(WorkerStatus.BUSY, pipeline=pipeline)
            self.set_current_job_id(job.id, pipeline=pipeline)
            self.heartbeat(heartbeat_ttl, pipeline=pipeline)
            registry = StartedJobRegistry(job.origin, self.connection,
                                          job_class=self.job_class)
            registry.add(job, timeout, pipeline=pipeline)
            job.set_status(JobStatus.STARTED, pipeline=pipeline)
            pipeline.hset(job.key, 'started_at', utcformat(utcnow()))
            pipeline.execute()

        msg = 'Processing {0} from {1} since {2}'
        self.procline(msg.format(job.func_name, job.origin, time.time()))
github rq / django-rq / django_rq / views.py View on Github external
def started_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = StartedJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)

        for job_id in job_ids:
            try:
                jobs.append(Job.fetch(job_id, connection=queue.connection))
            except NoSuchJobError:
github cloud-custodian / cloud-custodian / tools / c7n_salactus / c7n_salactus / cli.py View on Github external
def _repr(q):
        return "running:%d pending:%d finished:%d" % (
            StartedJobRegistry(q.name, conn).count,
            q.count,
            FinishedJobRegistry(q.name, conn).count)
    for q in Queue.all(conn):
github f0cker / crackq / crackq / run_hashcat.py View on Github external
"""
        Callback function to take action on hashcat signal.
        Action is to write the latest cracked hashes
        """
        logger.debug('Callback Triggered: Cracked')
        status_dict = self.status(sender)
        logger.debug('Hashcat status: {}'.format(status_dict))
        mail_server = CRACK_CONF['notify']['mail_server']
        mail_port = CRACK_CONF['notify']['mail_port']
        email_src = CRACK_CONF['notify']['src']
        inactive_time = CRACK_CONF['notify']['inactive_time']
        tls = CRACK_CONF['notify']['tls']
        rconf = CRACK_CONF['redis']
        redis_con = Redis(rconf['host'], rconf['port'])
        redis_q = Queue(connection=redis_con)
        started = rq.registry.StartedJobRegistry('default',
                                                 connection=redis_con)
        session = started.get_job_ids()[0]
        job = redis_q.fetch_job(session)
        if 'notify' in job.meta.keys():
            if job.meta['notify']:
                if 'email' in job.meta.keys():
                    user_email = job.meta['email']
                    try:
                        now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        now = datetime.strptime(now,
                                                '%Y-%m-%d %H:%M:%S')
                        last = datetime.strptime(job.meta['last_seen'],
                                                 '%Y-%m-%d %H:%M:%S')
                        inactive_time = timedelta(minutes=int(inactive_time))
                        activity = now - last
                        if (activity > inactive_time
github mhfowler / open-source-feeds / osf_scraper_api / osf_scraper_api / utilities / rq_helper.py View on Github external
def get_running_rq_jobs(queue_name):
    redis_conn = get_redis_connection()
    registry = StartedJobRegistry(queue_name, connection=redis_conn)
    running_job_ids = registry.get_job_ids() # Jobs which are exactly running.
    jobs = []
    for job_id in running_job_ids:
        try:
            job = Job.fetch(job_id, connection=redis_conn)
            jobs.append(job)
        except:
            continue
    return jobs
github f0cker / crackq / crackq / run_hashcat.py View on Github external
"""
        Callback function to take action on hashcat finished signal.
        Action is to reset hashcat???
        #changed to just writing restul file for now
        """
        logger.debug('Callback Triggered: Cracking Finished')
        status_dict = self.status(sender)
        mail_server = CRACK_CONF['notify']['mail_server']
        mail_port = CRACK_CONF['notify']['mail_port']
        email_src = CRACK_CONF['notify']['src']
        inactive_time = CRACK_CONF['notify']['inactive_time']
        tls = CRACK_CONF['notify']['tls']
        rconf = CRACK_CONF['redis']
        redis_con = Redis(rconf['host'], rconf['port'])
        redis_q = Queue(connection=redis_con)
        started = rq.registry.StartedJobRegistry('default',
                                                 connection=redis_con)
        #try:
        #    session = started.get_job_ids()[0]
        #except KeyError:
            #logger.debug('Problem getting stopped session for notify')
        session = sender.session
        logger.debug('Sending notification')
        job = redis_q.fetch_job(session)
        if 'notify' in job.meta.keys():
            if job.meta['notify']:
                if 'email' in job.meta.keys():
                    user_email = job.meta['email']
                    try:
                        now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        now = datetime.strptime(now,
                                                '%Y-%m-%d %H:%M:%S')