Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def handle_signal(loop, term_ev):
if term_ev.is_set():
log.warning('Forced shutdown!')
sys.exit(1)
else:
term_ev.set()
loop.stop()
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
log.info('Sorna Manager {}'.format(__version__))
server = loop.run_until_complete(
aiozmq.create_zmq_stream(zmq.REP, bind='tcp://*:{0}'.format(config.manager_port),
loop=loop))
my_ip = loop.run_until_complete(get_instance_ip())
log.info(_f('serving on tcp://{0}:{1}', my_ip, config.manager_port))
log.info(_f('using redis on tcp://{0}:{1}', *config.redis_addr))
kernel_ip_override = config.kernel_ip_override
registry = InstanceRegistry(config.redis_addr, loop=loop)
loop.run_until_complete(registry.init())
term_ev = asyncio.Event()
term_barrier = AsyncBarrier(3)
loop.add_signal_handler(signal.SIGINT, handle_signal, loop, term_ev)
loop.add_signal_handler(signal.SIGTERM, handle_signal, loop, term_ev)
asyncio.ensure_future(handle_api(loop, term_ev, term_barrier, server, registry))
asyncio.ensure_future(handle_notifications(loop, term_ev, term_barrier, registry))
async def connect_streams(kernel):
kernel_ip = urlparse(kernel.agent_addr).hostname
stdin_addr = f'tcp://{kernel_ip}:{kernel.stdin_port}'
log.debug(f'stream_pty({stream_key}): stdin: {stdin_addr}')
stdin_sock = await aiozmq_sock(zmq.PUB, connect=stdin_addr)
stdin_sock.transport.setsockopt(zmq.LINGER, 100)
stdout_addr = f'tcp://{kernel_ip}:{kernel.stdout_port}'
log.debug(f'stream_pty({stream_key}): stdout: {stdout_addr}')
stdout_sock = await aiozmq_sock(zmq.SUB, connect=stdout_addr)
stdout_sock.transport.setsockopt(zmq.LINGER, 100)
stdout_sock.transport.subscribe(b'')
return stdin_sock, stdout_sock
self.started_at = time.monotonic()
scale = await self.scale(1)
if scale.to_dict()['spec']['replicas'] == 0:
log.error('Scaling failed! Response body: {0}', scale)
raise ScaleFailedError
if scale.to_dict()['status']['replicas'] == 0:
while not await self.is_scaled():
asyncio.sleep(0.5)
self.input_stream = await aiozmq.create_zmq_stream(
zmq.PUSH, connect=f'tcp://{self.cluster_ip}:{self.repl_in_port}')
self.input_stream.transport.setsockopt(zmq.LINGER, 50)
self.output_stream = await aiozmq.create_zmq_stream(
zmq.PULL, connect=f'tcp://{self.cluster_ip}:{self.repl_out_port}')
self.output_stream.transport.setsockopt(zmq.LINGER, 50)
log.debug('Connected to {0} repl ({1})', self.deployment_name, self.cluster_ip)
self.read_task = asyncio.ensure_future(self.read_output())
if self.exec_timeout > 0:
self.watchdog_task = asyncio.ensure_future(self.watchdog())
else:
self.watchdog_task = None
async def subscribe(self, remote_host, remote_cpu_idx):
self.remote_addr = 'tcp://{}:{}'.format(remote_host, 54000 + remote_cpu_idx)
self._conn = await aiozmq.create_zmq_stream(zmq.SUB, loop=self._loop,
connect=self.remote_addr)
self._conn.transport.setsockopt(zmq.SUBSCRIBE, b'')
while True:
try:
recv_data = await self._conn.read()
except asyncio.CancelledError:
self._conn.close()
break
cpu_idx = int(recv_data[0].decode())
elapsed_sec = int(recv_data[1].decode())
# TODO: convert to sparse DataFrame
histogram = recv_data[2].decode().splitlines()
self.records.append((cpu_idx, elapsed_sec, histogram))
async def connect_streams(kernel):
kernel_ip = urlparse(kernel.agent_addr).hostname
stdin_addr = f'tcp://{kernel_ip}:{kernel.stdin_port}'
log.debug(f'stream_pty({stream_key}): stdin: {stdin_addr}')
stdin_sock = await aiozmq_sock(zmq.PUB, connect=stdin_addr)
stdin_sock.transport.setsockopt(zmq.LINGER, 100)
stdout_addr = f'tcp://{kernel_ip}:{kernel.stdout_port}'
log.debug(f'stream_pty({stream_key}): stdout: {stdout_addr}')
stdout_sock = await aiozmq_sock(zmq.SUB, connect=stdout_addr)
stdout_sock.transport.setsockopt(zmq.LINGER, 100)
stdout_sock.transport.subscribe(b'')
return stdin_sock, stdout_sock
await self.update_status('starting')
if 'vfolder-pv' in self.config.keys():
await self.create_vfolder_pv()
if self.config['registry']['type'] == 'ecr':
await self.get_aws_credentials()
self.redis_stat_pool = await aioredis.create_redis_pool(
self.config['redis']['addr'].as_sockaddr(), password=(self.config['redis']['password']
if self.config['redis']['password'] else None),
timeout=3.0,
encoding='utf8',
db=0) # REDIS_STAT_DB in backend.ai-manager
self.event_sock = await aiozmq.create_zmq_stream(
zmq.PUSH, connect=f"tcp://{self.config['event']['addr']}")
self.event_sock.transport.setsockopt(zmq.LINGER, 50)
await self.scan_images(None)
# TODO: Create k8s compatible stat collector
await self.check_krunner_pv_status()
# Send the first heartbeat.
self.hb_timer = aiotools.create_timer(self.heartbeat, 3.0)
self.clean_timer = aiotools.create_timer(self.clean_old_kernels, 10.0)
self.scan_images_timer = aiotools.create_timer(self.scan_images, 60.0)
# Start serving requests.
rpc_addr = self.config['agent']['rpc-listen-addr']
agent_addr = f"tcp://{rpc_addr}"
self.rpc_server = await aiozmq.rpc.serve_rpc(self, bind=agent_addr)
def main_router(addr, name=None):
"""Asynchronous client routine to process read files.
:param addr: server address.
:param name: a name for the client.
:returns: None, sends results back to server
"""
if name is None:
name = uuid.uuid4()
logging.info("Starting client {} connected to {}".format(name, addr))
router = yield from aiozmq.create_zmq_stream(
zmq.ROUTER
)
yield from router.transport.connect(addr)
event_loop = asyncio.get_event_loop()
executor = PoolExecutor(max_workers=1)
event_loop.set_default_executor(executor)
while True:
try:
data = yield from router.read()
except aiozmq.ZmqStreamClosed:
logging.info('Client {} got stream closed'.format(name))
break
if data[1] == b'FINISHED':
logging.info('Client {} got finished signal'.format(name))