How to use the kombu.Consumer function in kombu

To help you get started, we’ve selected a few kombu examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github celery / kombu / t / unit / transport / test_redis.py View on Github external
def test_publish__consume(self):
        connection = Connection(transport=Transport)
        channel = connection.channel()
        producer = Producer(channel, self.exchange, routing_key='test_Redis')
        consumer = Consumer(channel, queues=[self.queue])

        producer.publish({'hello2': 'world2'})
        _received = []

        def callback(message_data, message):
            _received.append(message_data)
            message.ack()

        consumer.register_callback(callback)
        consumer.consume()

        assert channel in channel.connection.cycle._channels
        try:
            connection.drain_events(timeout=1)
            assert _received
            with pytest.raises(socket.timeout):
github celery / kombu / t / unit / test_messaging.py View on Github external
def test_manual_declare(self):
        channel = self.connection.channel()
        queue = Queue('qname', self.exchange, 'rkey')
        consumer = Consumer(channel, queue, auto_declare=False)
        assert consumer.queues[0] is not queue
        assert consumer.queues[0].is_bound
        assert consumer.queues[0].exchange.is_bound
        assert consumer.queues[0].exchange is not self.exchange

        for meth in ('exchange_declare',
                     'queue_declare',
                     'basic_consume'):
            assert meth not in channel

        consumer.declare()
        for meth in ('exchange_declare',
                     'queue_declare',
                     'queue_bind'):
            assert meth in channel
        assert 'basic_consume' not in channel
github FORTH-ICS-INSPIRE / artemis / backend / core / database.py View on Github external
reply_to=callback_queue.name,
                correlation_id=self.correlation_id,
                retry=True,
                declare=[
                    Queue(
                        "config-request-queue",
                        durable=False,
                        max_priority=4,
                        consumer_arguments={"x-priority": 4},
                    ),
                    callback_queue,
                ],
                priority=4,
                serializer="ujson",
            )
            with Consumer(
                self.connection,
                on_message=self.handle_config_request_reply,
                queues=[callback_queue],
                accept=["ujson"],
            ):
                while self.rules is None:
                    self.connection.drain_events()
github FORTH-ICS-INSPIRE / artemis / backend / core / monitor.py View on Github external
def config_request_rpc(self):
            self.correlation_id = uuid()
            callback_queue = Queue(uuid(), durable=False, max_priority=2,
                    consumer_arguments={'x-priority': 2})

            self.producer.publish(
                '',
                exchange = '',
                routing_key = 'config_request_queue',
                reply_to = callback_queue.name,
                correlation_id = self.correlation_id,
                retry = True,
                declare = [callback_queue, Queue('config_request_queue', durable=False, max_priority=2)],
                priority = 2
            )
            with Consumer(self.connection,
                        on_message=self.handle_config_request_reply,
                        queues=[callback_queue], no_ack=True):
                while self.rules is None and self.monitors is None:
                    self.connection.drain_events()
github viatoriche / microservices / microservices / queues / service.py View on Github external
:param autoack: if True message.ack() after callback
        :type handler: callable object
        :param name: name of queue
        :type name: str
        """

        if self.with_pool:
            if self.workers_override_prefetch:
                prefetch_count = self.workers
            rule = Rule(name, handler, self.logger, autoack=autoack,
                        deferred_callbacks=self.deferred_callbacks,
                        pool=self.pool, **kwargs)
        else:
            rule = Rule(name, handler, self.logger, autoack=autoack, **kwargs)
        self.connect()
        consumer = Consumer(self.connection, queues=[Queue(rule.name)],
                            callbacks=[rule.callback], auto_declare=True)
        consumer.qos(prefetch_count=prefetch_count, prefetch_size=prefetch_size)
        self.consumers.append(consumer)
        self.logger.debug('Rule "%s" added!', rule.name)
github BrighterCommand / Brighter / Brighter / brightmntr / brightmntr / worker.py View on Github external
def _read_message(body, message):
            self._logger.debug("Monitoring event received at: %s headers: %s payload: %s", datetime.utcnow().isoformat(), message.headers, message.payload)
            now = datetime.utcnow().isoformat()
            activity = body
            print("{time}: {event}".format(time=now, event=activity))
            message.ack()

        # read the next batch number of monitoring messages from the control bus
        # evaluate for color coding (error is red)
        # print to stdout

        connection = BrokerConnection(hostname=self._amqp_uri)
        with connections[connection].acquire(block=True) as conn:
            self._logger.debug('Got connection: %s', conn.as_uri())
            with Consumer(conn, [self._monitoring_queue], callbacks=[_read_message], accept=['json', 'text/plain']) as consumer:
                self._running.set()
                ensure_kwargs = self.RETRY_OPTIONS.copy()
                ensure_kwargs['errback'] = _drain_errors
                lines = 0
                updates = 0
                while self._running.is_set():
                    # page size number before we sleep
                    safe_drain = conn.ensure(consumer, _drain, **ensure_kwargs)
                    safe_drain(conn, DRAIN_EVENTS_TIMEOUT)
                    lines += 1
                    if lines == self.page_size:
                        if self.limit != -1 and updates > self.limit:
                            self._running.clear()
                        else:
                            sleep(self.delay_between_refreshes)
                            lines = 0
github FORTH-ICS-INSPIRE / artemis / backend / core / postgresql_db.py View on Github external
def config_request_rpc(self):
            self.correlation_id = uuid()
            callback_queue = Queue(uuid(), durable=False, max_priority=2,
                    consumer_arguments={'x-priority': 2})

            self.producer.publish(
                '',
                exchange = '',
                routing_key = 'config_request_queue',
                reply_to = callback_queue.name,
                correlation_id = self.correlation_id,
                retry = True,
                declare = [callback_queue, Queue('config_request_queue', durable=False, max_priority=2)],
                priority = 2
            )
            with Consumer(self.connection,
                        on_message=self.handle_config_request_reply,
                        queues=[callback_queue],
                        no_ack=True):
                while self.rules is None:
                    self.connection.drain_events()
github DataIntegrationAlliance / data_integration_celery / tasks / utils / check.py View on Github external
with conn.channel() as channel:
            producer = Producer(channel)
            producer.publish({'hello': 'world'},
                             retry=True,
                             exchange=task_queue.exchange,
                             routing_key=task_queue.routing_key,
                             declare=[task_queue])

    def get_message(body, message):
        print("receive message: %s" % body)
        # message.ack()

    # 消费者
    with Connection(url) as conn:
        with conn.channel() as channel:
            consumer = Consumer(channel, queues=task_queue, callbacks=[get_message, ], prefetch_count=10)
            consumer.consume(no_ack=True)
github FORTH-ICS-INSPIRE / artemis / backend / core / mitigation.py View on Github external
reply_to=callback_queue.name,
                correlation_id=self.correlation_id,
                retry=True,
                declare=[
                    Queue(
                        "config-request-queue",
                        durable=False,
                        max_priority=4,
                        consumer_arguments={"x-priority": 4},
                    ),
                    callback_queue,
                ],
                priority=4,
                serializer="ujson",
            )
            with Consumer(
                self.connection,
                on_message=self.handle_config_request_reply,
                queues=[callback_queue],
                accept=["ujson"],
            ):
                while self.rules is None:
                    self.connection.drain_events()
github BrighterCommand / Brighter / Brightside / arame / gateway.py View on Github external
def purge(self, timeout: int = 5) -> None:

        def _purge_errors(exc, interval):
            self._logger.error('Purging error: %s, will retry triggering in %s seconds', exc, interval, exc_info=True)

        def _purge_messages(cnsmr: BrightsideConsumer):
            cnsmr.purge()
            self._message = None

        connection = BrokerConnection(hostname=self._amqp_uri)
        with connections[connection].acquire(block=True) as conn:
            self._logger.debug('Got connection: %s', conn.as_uri())
            with Consumer([self._queue], callbacks=[_purge_messages]) as consumer:
                ensure_kwargs = self.RETRY_OPTIONS.copy()
                ensure_kwargs['errback'] = _purge_errors
                safe_purge = conn.ensure(consumer, _purge_messages, **ensure_kwargs)
                safe_purge(consumer)