Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if dashboard:
try:
from distributed.dashboard import BokehWorker
except ImportError:
pass
else:
if dashboard_prefix:
result = (BokehWorker, {"prefix": dashboard_prefix})
else:
result = BokehWorker
services[("dashboard", dashboard_address)] = result
if resources:
resources = resources.replace(",", " ").split()
resources = dict(pair.split("=") for pair in resources)
resources = valmap(float, resources)
else:
resources = None
loop = IOLoop.current()
preload_argv = kwargs.get("preload_argv", [])
kwargs = {"worker_port": None, "listen_address": None}
t = Nanny
if not scheduler and not scheduler_file and "scheduler-address" not in config:
raise ValueError(
"Need to provide scheduler address like\n"
"dask-worker SCHEDULER_ADDRESS:8786"
)
if interface and host:
flatkeys = list(map(tokey, keys))
futures = {key: Future(key, self) for key in keyset}
values = {k for k, v in dsk.items() if isinstance(v, Future)
and k not in keyset}
if values:
dsk = dask.optimize.inline(dsk, keys=values)
d = {k: unpack_remotedata(v) for k, v in dsk.items()}
extra_keys = set.union(*[v[1] for v in d.values()]) if d else set()
dsk2 = str_graph({k: v[0] for k, v in d.items()}, extra_keys)
dsk3 = {k: v for k, v in dsk2.items() if k is not v}
if restrictions:
restrictions = keymap(tokey, restrictions)
restrictions = valmap(list, restrictions)
if loose_restrictions is not None:
loose_restrictions = list(map(tokey, loose_restrictions))
dependencies = {tokey(k): set(map(tokey, v[1])) for k, v in d.items()}
for s in dependencies.values():
for v in s:
if v not in self.futures:
raise CancelledError(v)
for k, v in dsk3.items():
dependencies[k] |= set(_deps(dsk3, v))
self._send_to_scheduler({'op': 'update-graph',
'tasks': valmap(dumps_task, dsk3),
def merge_prototypes(config):
return t.valmap(full_config(config), config)
def md(template, *args, **kwargs):
"""Wraps string.format with naive markdown escaping"""
def escape(s):
for char in ('*', '#', '_', '~', '`', '>'):
s = s.replace(char, '\\' + char)
return s
return template.format(*map(escape, args), **toolz.valmap(escape, kwargs))
to_recipients = defaultdict(lambda: defaultdict(list))
to_senders = defaultdict(list)
for sender, recipient, key in msgs:
to_recipients[recipient][key].append(sender)
to_senders[sender].append(key)
result = yield {r: self.rpc(addr=r).gather(who_has=v)
for r, v in to_recipients.items()}
for r, v in to_recipients.items():
self.log_event(r, {'action': 'rebalance',
'who_has': v})
self.log_event('all', {'action': 'rebalance',
'total-keys': len(keys),
'senders': valmap(len, to_senders),
'recipients': valmap(len, to_recipients),
'moved_keys': len(msgs)})
if not all(r['status'] == 'OK' for r in result.values()):
raise Return({'status': 'missing-data',
'keys': sum([r['keys'] for r in result
if 'keys' in r], [])})
for sender, recipient, key in msgs:
self.who_has[key].add(recipient)
self.has_what[recipient].add(key)
self.worker_bytes[recipient] += self.nbytes.get(key,
DEFAULT_DATA_SIZE)
self.transition_log.append((key, 'memory', 'memory', {},
self._transition_counter, sender,
recipient))
if loose_restrictions is not None:
loose_restrictions = list(map(tokey, loose_restrictions))
dependencies = {tokey(k): set(map(tokey, v[1])) for k, v in d.items()}
for s in dependencies.values():
for v in s:
if v not in self.futures:
raise CancelledError(v)
for k, v in dsk3.items():
dependencies[k] |= set(_deps(dsk3, v))
self._send_to_scheduler({'op': 'update-graph',
'tasks': valmap(dumps_task, dsk3),
'dependencies': valmap(list, dependencies),
'keys': list(flatkeys),
'restrictions': restrictions or {},
'loose_restrictions': loose_restrictions,
'client': self.id,
'priority': priority})
return futures
def _inputs_json(inputs):
expanded = t.valmap(_transform, inputs['kargs'])
expanded['__varargs'] = list(t.map(_transform, inputs['varargs']))
return expanded
if loose_restrictions is not None:
loose_restrictions = list(map(tokey, loose_restrictions))
dependencies = {tokey(k): set(map(tokey, v[1])) for k, v in d.items()}
for s in dependencies.values():
for v in s:
if v not in self.futures:
raise CancelledError(v)
for k, v in dsk3.items():
dependencies[k] |= set(_deps(dsk3, v))
self._send_to_scheduler({'op': 'update-graph',
'tasks': valmap(dumps_task, dsk3),
'dependencies': valmap(list, dependencies),
'keys': list(flatkeys),
'restrictions': restrictions or {},
'loose_restrictions': loose_restrictions,
'client': self.id,
'priority': priority})
return futures
def visualize(results, palette='GnBu', file_path="profile.html",
tools="hover,save,reset,xwheel_zoom,xpan", **kwargs):
output_file(file_path)
key, task, start, end, id = zip(*results)
id_group = groupby(itemgetter(4), results)
diff = lambda v: v[3] - v[2]
f = lambda val: sum(map(diff, val))
total_id = [i[0] for i in reversed(sorted(valmap(f, id_group).items(), key=itemgetter(1)))]
name = map(label, task)
left = min(start)
right = max(end)
p = figure(title="Profile Results", y_range=map(str, range(len(total_id))),
x_range=[0, right - left],
tools=tools, **kwargs)
data = {}
data['x'] = [(e - s)/2 + s - left for (s, e) in zip(start, end)]
data['y'] = [total_id.index(i) + 1 for i in id]
data['height'] = [1 for i in id]
data['width'] = [e - s for (s, e) in zip(start, end)]
def build_dc_product_map(metadata_json, products_json):
from datacube.model import metadata_from_doc, DatasetType
mm = toolz.valmap(metadata_from_doc, metadata_json)
def mk_product(doc, name):
mt = doc.get('metadata_type')
if mt is None:
raise ValueError('Missing metadata_type key in product definition')
metadata = mm.get(mt)
if metadata is None:
raise ValueError('No such metadata %s for product %s' % (mt, name))
return DatasetType(metadata, doc)
return mm, {k: mk_product(doc, k) for k, doc in products_json.items()}