Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async_exception: Exception
Default is botocore.exceptions.ReadTimeoutException. This is an exception thrown by the async function in case
it time out waiting for a return. In our case, this is a success. The default is chosen to work with the
podpac.managers.Lambda node.
Notes
------
In some cases where the input and output coordinates of the source node is not the same (such as reduce nodes)
and fill_output is True, the user may need to specify 'output' as part of the eval call.
"""
source = NodeTrait().tag(attr=True)
chunks = tl.Dict().tag(attr=True)
fill_output = tl.Bool(True).tag(attr=True)
sleep_time = tl.Float(1).tag(attr=True)
no_worker_exception = tl.Type(botocore.exceptions.ClientError).tag(attr=True)
async_exception = tl.Type(botocore.exceptions.ReadTimeoutError).tag(attr=True)
def check_worker_available(self):
return True
def eval_source(self, coordinates, coordinates_index, out, i, source=None):
if source is None:
source = self.source
# Make a copy to prevent any possibility of memory corruption
source = Node.from_definition(source.definition)
success = False
o = None
while not success:
if self.check_worker_available():
try:
o = source.eval(coordinates, out)
is chosen to work with the podpac.managers.Lambda node.
async_exception: Exception
Default is botocore.exceptions.ReadTimeoutException. This is an exception thrown by the async function in case
it time out waiting for a return. In our case, this is a success. The default is chosen to work with the
podpac.managers.Lambda node.
Notes
------
In some cases where the input and output coordinates of the source node is not the same (such as reduce nodes)
and fill_output is True, the user may need to specify 'output' as part of the eval call.
"""
source = NodeTrait().tag(attr=True)
chunks = tl.Dict().tag(attr=True)
fill_output = tl.Bool(True).tag(attr=True)
sleep_time = tl.Float(1).tag(attr=True)
no_worker_exception = tl.Type(botocore.exceptions.ClientError).tag(attr=True)
async_exception = tl.Type(botocore.exceptions.ReadTimeoutError).tag(attr=True)
def check_worker_available(self):
return True
def eval_source(self, coordinates, coordinates_index, out, i, source=None):
if source is None:
source = self.source
# Make a copy to prevent any possibility of memory corruption
source = Node.from_definition(source.definition)
success = False
o = None
while not success:
if self.check_worker_available():
try:
class InProcessKernelClient(KernelClient):
"""A client for an in-process kernel.
This class implements the interface of
`jupyter_client.clientabc.KernelClientABC` and allows
(asynchronous) frontends to be used seamlessly with an in-process kernel.
See `jupyter_client.client.KernelClient` for docstrings.
"""
# The classes to use for the various channels.
shell_channel_class = Type(InProcessChannel)
iopub_channel_class = Type(InProcessChannel)
stdin_channel_class = Type(InProcessChannel)
control_channel_class = Type(InProcessChannel)
hb_channel_class = Type(InProcessHBChannel)
kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
@default('blocking_class')
def _default_blocking_class(self):
from .blocking import BlockingInProcessKernelClient
return BlockingInProcessKernelClient
def get_connection_info(self):
d = super(InProcessKernelClient, self).get_connection_info()
d['kernel'] = self.kernel
Path to host's private SSH Key.
If set to None, an ephemeral key is generated for this session
""",
config=True
)
debug = Bool(
False,
help="""
Turn on debug logging
""",
config=True
)
authenticator_class = Type(
GitHubAuthenticator,
klass=Authenticator,
config=True,
help="""
Class used to perform authentication.
Should be a subclass of kubessh.authentication.Authenticator.
"""
)
default_namespace = Unicode(
help="""
Default namespace to spawn user shells to
""",
config=True
)
from traitlets import default
from typing import Dict as Dictionary
from typing import List as Listing
from typing import Type, Optional, Callable, Iterator
from yuuno.clip import Clip, T
class Registry(HasTraits):
"""
Stores which Clip-Type is responsible for wrapping
specific applications.
"""
clip_types: Dictionary = Dict(value_trait=Class(klass=Clip), key_trait=Class())
sub_registries: Listing['Registry'] = List(This())
@default("clip_types")
def _init_cliptypes(self) -> Dictionary[Type[Clip], Type[T]]:
return {}
@default("sub_registries")
def _init_subregistries(self) -> Listing['Registry']:
return []
def all_types(self) -> Iterator[Type]:
"""
A generator that returns all supported types.
"""
yield from self.clip_types.keys()
for registry in self.sub_registries:
from traitlets import Type
from traitlets.config import LoggingConfigurable
from nbgrader.exchange import default, abc
class ExchangeFactory(LoggingConfigurable):
exchange = Type(
default.Exchange,
klass=abc.Exchange,
help="A plugin for exchange."
).tag(config=True)
fetch_assignment = Type(
default.ExchangeFetchAssignment,
klass=abc.ExchangeFetchAssignment,
help="A plugin for fetching assignments."
).tag(config=True)
fetch_feedback = Type(
default.ExchangeFetchFeedback,
klass=abc.ExchangeFetchFeedback,
help="A plugin for fetching feedback."
).tag(config=True)
# javascript wants milliseconds
milliseconds = 1000 * interval
display(Javascript("yap_ipython.notebook.set_autosave_interval(%i)" % milliseconds),
include=['application/javascript']
)
if interval:
print("Autosaving every %i seconds" % interval)
else:
print("Autosave disabled")
class ZMQInteractiveShell(InteractiveShell):
"""A subclass of InteractiveShell for ZMQ."""
displayhook_class = Type(ZMQShellDisplayHook)
display_pub_class = Type(ZMQDisplayPublisher)
data_pub_class = Type('yap_kernel.datapub.ZMQDataPublisher')
kernel = Any()
parent_header = Any()
@default('banner1')
def _default_banner1(self):
return default_banner
# Override the traitlet in the parent class, because there's no point using
# readline for the kernel. Can be removed when the readline code is moved
# to the terminal frontend.
colors_force = CBool(True)
readline_use = CBool(False)
# autoindent has no meaning in a zmqshell, and attempting to enable it
# will print a warning in the absence of readline.
autoindent = CBool(False)
if not self.is_alive():
raise RuntimeError('Kernel died before replying to kernel_info')
# Check if current time is ready check time plus timeout
if time.time() > abs_timeout:
raise RuntimeError("Kernel didn't respond in %d seconds" % timeout)
# Flush IOPub channel
while True:
try:
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
except Empty:
break
# The classes to use for the various channels
shell_channel_class = Type(ZMQSocketChannel)
iopub_channel_class = Type(ZMQSocketChannel)
stdin_channel_class = Type(ZMQSocketChannel)
hb_channel_class = Type(HBChannel)
control_channel_class = Type(ZMQSocketChannel)
def _recv_reply(self, msg_id, timeout=None, channel='shell'):
"""Receive and return the reply for a given request"""
if timeout is not None:
deadline = monotonic() + timeout
while True:
if timeout is not None:
timeout = max(0, deadline - monotonic())
try:
if channel == 'control':
reply = self.get_control_msg(timeout=timeout)
----------
min: Date or None (default: None)
if not None, min is the minimal value of the domain
max: Date (default: None)
if not None, max is the maximal value of the domain
domain_class: type (default: Date)
traitlet type used to validate values in of the domain of the scale.
rtype: string (class-level attribute)
This attribute should not be modifed by the user.
The range type of a linear scale is numerical.
dtype: type (class-level attribute)
the associated data type / domain type
"""
rtype = 'Number'
dtype = np.datetime64
domain_class = Type(Date)
min = Date(default_value=None, allow_none=True).tag(sync=True)
max = Date(default_value=None, allow_none=True).tag(sync=True)
_view_name = Unicode('DateScale').tag(sync=True)
_model_name = Unicode('DateScaleModel').tag(sync=True)
@register_scale('bqplot.OrdinalScale')
class OrdinalScale(Scale):
"""An ordinal scale.
A mapping from a discrete set of values to a numerical range.
Attributes
----------
This methods simply calls raw_input directly.
"""
msg_type = msg['header']['msg_type']
if msg_type == 'input_request':
_raw_input = self.client.kernel._sys_raw_input
prompt = msg['content']['prompt']
print(prompt, end='', file=sys.__stdout__)
sys.__stdout__.flush()
self.client.input(_raw_input())
class BlockingInProcessKernelClient(InProcessKernelClient):
# The classes to use for the various channels.
shell_channel_class = Type(BlockingInProcessChannel)
iopub_channel_class = Type(BlockingInProcessChannel)
stdin_channel_class = Type(BlockingInProcessStdInChannel)
def wait_for_ready(self):
# Wait for kernel info reply on shell channel
while True:
msg = self.shell_channel.get_msg(block=True)
if msg['msg_type'] == 'kernel_info_reply':
self._handle_kernel_info_reply(msg)
break
# Flush IOPub channel
while True:
try:
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
print(msg['msg_type'])
except Empty:
break