How to use the rq.Queue function in rq

To help you get started, we’ve selected a few rq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github getsentry / sentry-python / tests / integrations / rq / test_rq.py View on Github external
def test_transport_shutdown(sentry_init, capture_events_forksafe):
    sentry_init(integrations=[RqIntegration()])

    events = capture_events_forksafe()

    queue = rq.Queue(connection=FakeStrictRedis())
    worker = rq.Worker([queue], connection=queue.connection)

    queue.enqueue(crashing_job, foo=42)
    worker.work(burst=True)

    event = events.read_event()
    events.read_flush()

    (exception,) = event["exception"]["values"]
    assert exception["type"] == "ZeroDivisionError"
github rq / rq / tests / test_queue.py View on Github external
def test_all_custom_job(self):
        class CustomJob(Job):
            pass

        q = Queue('all-queue')
        q.enqueue(say_hello)
        queues = Queue.all(job_class=CustomJob)
        self.assertEqual(len(queues), 1)
        self.assertIs(queues[0].job_class, CustomJob)
github rq / rq / tests / test_scheduler.py View on Github external
def test_enqueue_scheduled_jobs(self):
        """Scheduler can enqueue scheduled jobs"""
        queue = Queue(connection=self.testconn)
        registry = ScheduledJobRegistry(queue=queue)
        job = Job.create('myfunc', connection=self.testconn)
        job.save()
        registry.schedule(job, datetime(2019, 1, 1, tzinfo=utc))
        scheduler = RQScheduler([queue], connection=self.testconn)
        scheduler.acquire_locks()
        scheduler.enqueue_scheduled_jobs()
        self.assertEqual(len(queue), 1)

        # After job is scheduled, registry should be empty
        self.assertEqual(len(registry), 0)

        # Jobs scheduled in the far future should not be affected
        registry.schedule(job, datetime(2100, 1, 1, tzinfo=utc))
        scheduler.enqueue_scheduled_jobs()
        self.assertEqual(len(queue), 1)
github rq / rq / tests / test_queue.py View on Github external
def test_enqueue_job_with_invalid_dependency(self):
        """Enqueuing a job fails, if the dependency does not exist at all."""
        parent_job = Job.create(func=say_hello)
        # without save() the job is not visible to others

        q = Queue()
        with self.assertRaises(NoSuchJobError):
            q.enqueue_call(say_hello, depends_on=parent_job)

        with self.assertRaises(NoSuchJobError):
            q.enqueue_call(say_hello, depends_on=parent_job.id)

        self.assertEqual(q.job_ids, [])
github mgk / rq-retry / tests / test_retry_worker.py View on Github external
def test_failed_job_max_tries_2__retry_once_then_move_to_dlq():
    q = Queue()
    q2 = Queue('not_used')
    failed_q = get_failed_queue()
    dlq = Queue('dead_letter_queue')

    w = Worker([q])

    # Here the RetryWorker not listening on an active queue: it will not
    # run any jobs, just look to requeue failed jobs.
    rw = RetryWorker([q2], retry_config=dict(
        max_tries=2, maint_interval=0, delays=[]))

    # run job that will fail
    job = q.enqueue(error_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 1

    # run retry worker
    rw.work(burst=True)
github dracos / split-ticket / web.py View on Github external
def split_ajax(fr, to, day, time, time_ret):
    bottle.response.set_header('Cache-Control', 'max-age=0')
    context = context_init(fr, to, day, time, time_ret)

    q = Queue(connection=R)
    job = q.fetch_job(get_job_id(context))

    done = job and (job.is_finished or job.is_failed)
    include_me = 1 if job else 0
    busy_workers = len([ w for w in Worker.all(connection=R) if w.get_state() == 'busy' ]) - include_me
    busy_workers += q.count
    return {
        'done': done,
        'refresh': max(1, busy_workers),
        'queue_size': max(0, busy_workers),
    }
github Parallels / rq-dashboard / rq_dashboard / web.py View on Github external
def empty_queue(queue_name):
    q = Queue(queue_name)
    q.empty()
    return dict(status='OK')
github f5devcentral / aws-deployments / src / f5_aws / job_manager.py View on Github external
def __init__(self):
		self.has_redis = True
		try:
			self.redis = Redis()

			# simple test that we can connect
			self.redis.keys()

			# connection okay, setup rq
			self.job_queue = rq.Queue(connection=self.redis)
		except ConnectionError:
			# redis isn't running 
			self.has_redis = False
			print 'WARNING: Redis connection failed.  Status not available for service catalog. '
github kentsay / falcon-api-redis / worker.py View on Github external
import os
import redis
from app import config
from rq import Worker, Queue, Connection


"""RQ workers listen to document_write and document_delete queue."""
listen = ['document_write', 'document_delete']
redis_url = os.getenv('REDISTOGO_URL', 'redis://'+ config.REDIS_URL + ':' + config.REDIS_PORT)

conn = redis.from_url(redis_url)

if __name__ == '__main__':
    with Connection(conn):
        worker = Worker(list(map(Queue, listen)))
        worker.work()
github Doreenruirui / okralact / app / __init__.py View on Github external
from flask import Flask
import rq
from redis import Redis
from app.lib.job import Job
from app.config import Config


app = Flask(__name__)
app.config.from_object(Config)
app.redis = Redis.from_url('redis://')
app.task_queue = rq.Queue('ocr-tasks', connection=app.redis, default_timeout=43200)
app.task_queue.empty()
app.eval_queue = rq.Queue('ocr-evals', connection=app.redis, default_timeout=43200)
app.eval_queue.empty()
app.valid_queue = rq.Queue('ocr-tasks-valids', connection=app.redis, default_timeout=43200)
app.valid_queue.empty()

app.job_id2file = {}
app.job_file2id = {}
app.job_id2err = {}

app.eval_id2file = {}
app.eval_file2id = {}
app.eval_id2err = {}

app.valid_id2file = {}
app.valid_file2id = {}