Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if sqlalchemy is not None:
# Set up gevent compatibility in psycopg2
import psycogreen.gevent
psycogreen.gevent.patch_psycopg()
if manhole is not None:
# Enable manhole for debugging. Use oneshot mode
# for gevent compatibility
manhole.cry = lambda message: log.info("MANHOLE: %s" % message)
manhole.install(oneshot_on=signal.SIGUSR1)
m = Manager()
m.start()
complete = gevent.event.Event()
def shutdown():
log.info("Signal handler: stopping")
complete.set()
gevent.signal(signal.SIGTERM, shutdown)
gevent.signal(signal.SIGINT, shutdown)
while not complete.is_set():
complete.wait(timeout=1)
def _get(self):
if not len(self._data):
if not self._event:
self._event = gevent.event.Event()
self._event.wait()
return self._get()
return self._data.pop(0)
def start_transfers(idx, curr_asset, num_transfers):
curr_app = apps[idx]
asset_manager = curr_app.raiden.get_manager_by_asset_address(curr_asset)
all_paths = asset_manager.channelgraph.get_paths_of_length(
source=curr_app.raiden.address,
num_hops=2,
)
path = all_paths[0]
target = path[-1]
finished = gevent.event.Event()
def _transfer():
api = curr_app.raiden.api
for i in range(num_transfers):
async_result = api.transfer_async(curr_asset, amount, target)
async_result.wait()
finished.set()
gevent.spawn(_transfer)
return finished
def start(self, timeout):
"""Start the backend and wait until it is initialized."""
self._startup_pipe = os.pipe()
pid = os.fork()
if pid == 0:
# child
self.main()
os._exit(0)
# parent
self.backend = pid
self.listener.close()
os.close(self._startup_pipe[1])
self._sigchld_event = Event()
event = get_hub().loop.signal(signal.SIGCHLD)
event.start(self._signal_handler, signal.SIGCHLD)
self.logger.debug('forked backend process, pid = %s', pid)
self.logger.debug('waiting for it to initialize')
status = self._get_startup_status(timeout)
self.logger.debug('initialization status = %s', status)
if not status:
self.logger.debug('backend failed to start up after %d seconds', timeout)
self.error_name = 'Timeout'
self.error_message = 'A timeout occurred'
self.error_detail = 'Backend failed to start up after %d seconds' % timeout
elif status and status.startswith('ERROR:'):
p1 = status.find(':')
p2 = status.find(':', p1+1)
p3 = status.find(':', p2+1)
self.error_name = status[p1+1:p2]
self.jobs = OrderedDict()
self.verbose = verbose
# Keeps track of resource usage
self.resources = {
# TODO: break this into CPU cores, memory usage, IO usage, etc.
'parse_folder_task_pool': [Resource()],
'create_db_task_pool': [Resource(max_value=2)],
'analyze_db_task_pool': [Resource(max_value=4)],
'inference_task_pool': [Resource(max_value=4)],
'gpus': [Resource(identifier=index)
for index in gpu_list.split(',')] if gpu_list else [],
}
self.running = False
self.shutdown = gevent.event.Event()
def __init__(self):
super(ProcessMonitorThread, self).__init__()
self._complete = gevent.event.Event()
self._socket = None
def __init__(self, client, config):
self.client = client
self.config = config
self.ready = Event()
self.guilds_waiting_sync = 0
self.me = None
self.dms = HashMap()
self.guilds = HashMap()
self.channels = HashMap(weakref.WeakValueDictionary())
self.users = HashMap(weakref.WeakValueDictionary())
self.voice_clients = HashMap(weakref.WeakValueDictionary())
self.voice_states = HashMap(weakref.WeakValueDictionary())
# If message tracking is enabled, listen to those events
if self.config.track_messages:
self.messages = DefaultHashMap(lambda: deque(maxlen=self.config.track_messages_size))
self.EVENTS += ['MessageDelete', 'MessageDeleteBulk']
# The bound listener objects
def __init__(self, config, msg_reader, datastore_writer, hosts_ipset):
super(DatastoreReader, self).__init__()
self._config = config
self.hosts_ipset = hosts_ipset
self._msg_reader = msg_reader
self._datastore_writer = datastore_writer
# Whether we've been in sync with etcd at some point.
self._been_in_sync = False
# Keep track of the config loaded from etcd so we can spot if it
# changes.
self.last_global_config = None
self.last_host_config = None
# Events triggered by the DatastoreAPI Actor to tell us to load the
# config and start polling. These are one-way flags.
self.load_config = Event()
self.begin_polling = Event()
# Event that we trigger once the config is loaded.
self.configured = Event()
# Polling state initialized at poll start time.
self.splitter = None
# Next-hop IP addresses of our hosts, if populated in etcd.
self.ipv4_by_hostname = {}
# Forces a resync after the current poll if set. Safe to set from
# another thread. Automatically reset to False after the resync is
# triggered.
self.resync_requested = False
# True if we've been shut down.
self.killed = False
# Stats.
self.read_count = 0
self.ip_upd_count = 0
def __init__(self):
self._data = []
self._event = gevent.event.Event()