Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_publish_to_rabbit(rabbit_manager, rabbit_config, mock_container):
vhost = rabbit_config['vhost']
container = mock_container
container.service_name = "service"
container.config = rabbit_config
ctx_data = {'language': 'en', 'customheader': 'customvalue'}
service = Mock()
worker_ctx = WorkerContext(
container, service, DummyProvider('method'), data=ctx_data
)
publisher = Publisher(
exchange=foobar_ex, declare=[foobar_queue]
).bind(container, "publish")
publisher.setup()
publisher.start()
# test queue, exchange and binding created in rabbit
exchanges = rabbit_manager.get_exchanges(vhost)
queues = rabbit_manager.get_queues(vhost)
bindings = rabbit_manager.get_queue_bindings(vhost, foobar_queue.name)
assert "foobar_ex" in [exchange['name'] for exchange in exchanges]
def test_consume_from_rabbit(rabbit_manager, rabbit_config, mock_container):
vhost = rabbit_config['vhost']
container = mock_container
container.shared_extensions = {}
container.worker_ctx_cls = WorkerContext
container.service_name = "service"
container.config = rabbit_config
container.max_workers = 10
content_type = 'application/data'
container.accept = [content_type]
def spawn_managed_thread(method, identifier=None):
return eventlet.spawn(method)
container.spawn_managed_thread = spawn_managed_thread
worker_ctx = WorkerContext(container, None, DummyProvider())
consumer = Consumer(
queue=foobar_queue, requeue_on_error=False).bind(container, "publish")
def test_dispatch_to_rabbit(rabbit_manager, rabbit_config, mock_container):
vhost = rabbit_config['vhost']
container = mock_container
container.shared_extensions = {}
container.service_name = "srcservice"
container.config = rabbit_config
service = Mock()
worker_ctx = WorkerContext(container, service, DummyProvider())
dispatcher = EventDispatcher().bind(container, 'dispatch')
dispatcher.setup()
dispatcher.start()
# we should have an exchange but no queues
exchanges = rabbit_manager.get_exchanges(vhost)
queues = rabbit_manager.get_queues(vhost)
assert "srcservice.events" in [exchange['name'] for exchange in exchanges]
assert queues == []
# manually add a queue to capture the events
rabbit_manager.create_queue(vhost, "event-sink", auto_delete=True)
rabbit_manager.create_queue_binding(
vhost, "srcservice.events", "event-sink", routing_key="eventtype")
def test_headers(self, mock_container, producer):
""" Headers can be provided at instantiation time, and are merged with
Nameko headers.
"""
mock_container.config = {'AMQP_URI': 'memory://localhost'}
mock_container.shared_extensions = {}
mock_container.service_name = "service-name"
# use a real worker context so nameko headers are generated
service = Mock()
entrypoint = Mock(method_name="method")
worker_ctx = WorkerContext(
mock_container, service, entrypoint, data={'context': 'data'}
)
nameko_headers = {
'nameko.context': 'data',
'nameko.call_id_stack': ['service-name.method.0'],
}
value = {'foo': Mock()}
rpc_proxy = RpcProxy(
"service-name", **{'headers': value}
).bind(mock_container, "service_rpc")
rpc_proxy.setup()
rpc_proxy.rpc_reply_listener.setup()
def test_without_origin(self, mock_container):
mock_container.service_name = "foo"
service = Mock()
entrypoint = DummyProvider("bar")
context_data = {}
worker_ctx = WorkerContext(
mock_container, service, entrypoint, data=context_data
)
assert worker_ctx.origin_call_id is None
def test_publish_to_exchange(
patch_maybe_declare, mock_channel, mock_producer, mock_container
):
container = mock_container
container.config = {'AMQP_URI': 'memory://'}
container.service_name = "srcservice"
service = Mock()
worker_ctx = WorkerContext(container, service, DummyProvider("publish"))
publisher = Publisher(exchange=foobar_ex).bind(container, "publish")
# test declarations
publisher.setup()
assert patch_maybe_declare.call_args_list == [
call(foobar_ex, mock_channel)
]
# test publish
msg = "msg"
service.publish = publisher.get_dependency(worker_ctx)
service.publish(msg, publish_kwarg="value")
headers = {
'nameko.call_id_stack': ['srcservice.publish.0']
def test_parallel_executor_injection():
config = Mock()
container = ServiceContainer(ExampleService, WorkerContext, config)
providers = container.injections
assert len(providers) == 1
provider = providers[0]
assert provider.name == "parallel"
assert isinstance(provider, ParallelProvider)
context = WorkerContext(container, service, DummyProvider("bar"))
assert context.call_id == 'baz.bar.0'
assert context.call_id_stack == ['baz.bar.0']
# Build stack
context = WorkerContext(
container, service, DummyProvider("foo"),
data={'call_id_stack': context.call_id_stack}
)
assert context.call_id == 'baz.foo.1'
assert context.call_id_stack == ['baz.bar.0', 'baz.foo.1']
# Long stack
many_ids = [str(i) for i in range(10)]
context = WorkerContext(
container, service, DummyProvider("long"),
data={'call_id_stack': many_ids}
)
expected = many_ids + ['baz.long.2']
assert context.call_id_stack == expected
@rpc
def get_context_data(self, name):
return self.get_context_value(name)
@rpc
def sleep(self, seconds=0):
eventlet.sleep(seconds)
return seconds
class ExampleError(Exception):
pass
class CustomWorkerContext(WorkerContext):
context_keys = ("custom_header",)
def test_proxy(container_factory, rabbit_config):
container = container_factory(FooService, rabbit_config)
container.start()
with ServiceRpcProxy('foobar', rabbit_config) as foo:
assert foo.spam(ham='eggs') == 'eggs'
assert foo.spam(ham='eggs') == 'eggs' # test re-use
def test_proxy_manual_start_stop(container_factory, rabbit_config):
container = container_factory(FooService, rabbit_config)
``context_data`` is used to initialize a ``WorkerContext``.
``handle_result`` is an optional function which may be passed
in by the entrypoint. It is called with the result returned
or error raised by the service method. If provided it must return a
value for ``result`` and ``exc_info`` to propagate to dependencies;
these may be different to those returned by the service method.
"""
if self._being_killed:
_log.info("Worker spawn prevented due to being killed")
raise ContainerBeingKilled()
service = self.service_cls()
worker_ctx = WorkerContext(
self, service, entrypoint, args, kwargs, data=context_data
)
_log.debug('spawning %s', worker_ctx)
gt = self._worker_pool.spawn(
self._run_worker, worker_ctx, handle_result
)
gt.link(self._handle_worker_thread_exited, worker_ctx)
self._worker_threads[worker_ctx] = gt
return worker_ctx