Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async def test_adaptive_scaling():
# XXX: we should be able to use `InProcessClusterManager` here, but due to
# https://github.com/dask/distributed/issues/3251 this results in periodic
# failures.
config = Config()
config.DaskGateway.backend_class = LocalTestingBackend
config.ClusterConfig.adaptive_period = 0.25
async with temp_gateway(config=config) as g:
async with g.gateway_client() as gateway:
async with gateway.new_cluster() as cluster:
# Turn on adaptive scaling
await cluster.adapt()
# Worker is automatically requested
async with cluster.get_client(set_as_default=False) as client:
res = await client.submit(lambda x: x + 1, 1)
assert res == 2
# Scales down automatically
await wait_for_workers(cluster, exact=0)
created_files,
'enable should create files in {}'.format(dirs['conf']))
# a bit of a hack to allow initializing a new app instance
for klass in app_classes:
reset_app_class(klass)
# do disable
main_app(argv=['disable'] + argv)
# check the config directory
conf_enabled = [
path for path in created_files
if path.startswith(conf_dir) and os.path.exists(path)]
for path in conf_enabled:
with open(path, 'r') as f:
conf = Config(json.load(f))
nbapp = conf.get('NotebookApp', {})
if 'server_extensions' in nbapp:
nt.assert_not_in(
'jupyter_nbextensions_configurator',
nbapp.server_extensions,
'conf after disable should empty'
'server_extensions list in file {}'.format(path))
nbservext = nbapp.get('nbserver_extensions', {})
nt.assert_false(
{k: v for k, v in nbservext.items() if v},
'disable command should disable all '
'nbserver_extensions in file {}'.format(path))
reset_app_class(DisableJupyterNbextensionsConfiguratorApp)
async def test_slurm_backend():
c = Config()
c.SlurmClusterConfig.scheduler_cmd = "/opt/miniconda/bin/dask-gateway-scheduler"
c.SlurmClusterConfig.worker_cmd = "/opt/miniconda/bin/dask-gateway-worker"
c.SlurmClusterConfig.scheduler_memory = "256M"
c.SlurmClusterConfig.worker_memory = "256M"
c.SlurmClusterConfig.scheduler_cores = 1
c.SlurmClusterConfig.worker_cores = 1
c.DaskGateway.backend_class = SlurmTestingBackend
async with temp_gateway(config=c) as g:
auth = BasicAuth(username="alice")
async with g.gateway_client(auth=auth) as gateway:
async with gateway.new_cluster() as cluster:
db_cluster = g.gateway.backend.db.get_cluster(cluster.name)
async def test_idle_timeout(tmpdir):
config = Config()
config.DaskGateway.cluster_manager_class = InProcessClusterManager
config.DaskGateway.temp_dir = str(tmpdir)
config.InProcessClusterManager.idle_timeout = 2
async with temp_gateway(config=config) as gateway_proc:
async with Gateway(
address=gateway_proc.public_urls.connect_url,
proxy_address=gateway_proc.gateway_urls.connect_url,
asynchronous=True,
) as gateway:
# Start a cluster
cluster = await gateway.new_cluster()
# Add some workers
await cluster.scale(2)
await wait_for_workers(cluster, atleast=1)
waited = 0
.format(filename),
r'\1 data:{0};base64,{1}'.format(mime_type, data),
text, flags=re.MULTILINE)
del attachment_storage[:]
return text
self._execute = execute
self._kernel_name = kernel_name
self._execute_arguments = execute_arguments
self._allow_errors = allow_errors
self._timeout = timeout
self._codecell_lexer = codecell_lexer
loader = jinja2.DictLoader({'nbsphinx-rst.tpl': RST_TEMPLATE})
super(Exporter, self).__init__(
template_file='nbsphinx-rst.tpl', extra_loaders=[loader],
config=traitlets.config.Config({
'HighlightMagicsPreprocessor': {'enabled': True},
# Work around https://github.com/jupyter/nbconvert/issues/720:
'RegexRemovePreprocessor': {'enabled': False},
}),
filters={
'convert_pandoc': convert_pandoc,
'markdown2rst': markdown2rst,
'get_empty_lines': _get_empty_lines,
'extract_toctree': _extract_toctree,
'save_attachments': save_attachments,
'replace_attachments': replace_attachments,
'get_output_type': _get_output_type,
'json_dumps': json.dumps,
})
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
atexit.register(self.cleanup)
def __init__(self, coursedir=None, **kwargs):
super(Feedback, self).__init__(coursedir=coursedir, **kwargs)
c = Config()
if 'template_file' not in self.config.HTMLExporter:
c.HTMLExporter.template_file = 'feedback.tpl'
if 'template_path' not in self.config.HTMLExporter:
template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'server_extensions', 'formgrader', 'templates'))
c.HTMLExporter.template_path = ['.', template_path]
self.update_config(c)
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
atexit.register(self.cleanup)
def gen_exporter():
config = TraitletsConfig()
config.htmlexporter.preprocessors = [
"nbconvert.preprocessors.extractoutputpreprocessor"
]
html_exporter = HTMLExporter(config=config)
html_exporter.template_file = "basic"
return html_exporter
nb_source_dict.update(
{"cells": [nb_source_dict["cells"][idx] for idx in indices]})
if clear_markdown:
nb_source_dict.update(
{"cells": [cell for cell in nb_source_dict["cells"]
if cell['cell_type'] != "markdown"]})
nb_source_dict.update({"cells": nb_source_dict["cells"]})
import json
ipynb_source = json.dumps(nb_source_dict)
notebook = nbformat.reads(ipynb_source, as_version=4)
from traitlets.config import Config
c = Config()
# This is to prevent execution of arbitrary code from note book
c.ExecutePreprocessor.enabled = False
if clear_output:
c.ClearOutputPreprocessor.enabled = True
c.CSSHTMLHeaderPreprocessor.enabled = False
c.HighlightMagicsPreprocessor.enabled = False
import os
# Place the template in course template dir
import course
template_path = os.path.join(
os.path.dirname(course.__file__),
"templates", "course", "jinja2")