How to use the datalab.utils.commands.parse_config function in datalab

To help you get started, we’ve selected a few datalab examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github googledatalab / pydatalab / datalab / mlalpha / commands / _mlalpha.py View on Github external
def _train(args, cell):
  """ Train a model. """
  if not cell:
    return _output_train_template()

  env = datalab.utils.commands.notebook_environment()
  config = datalab.utils.commands.parse_config(cell, env)
  if args['cloud']:
    datalab.utils.commands.validate_config_must_have(config,
        ['package_uris', 'python_module', 'scale_tier', 'region'])
    runner = datalab.mlalpha.CloudRunner(config)
    job_info = runner.run()
    job_short_name = job_info['jobId']
    html = '<p>Job "%s" was submitted successfully.<br>' % job_short_name
    html += 'Run "%%mlalpha jobs --name %s" to view the status of the job.</p>' % job_short_name
    log_url_query_strings = {
      'project': datalab.context.Context.default().project_id,
      'resource': 'ml.googleapis.com/job_id/' + job_short_name
    }
    log_url = 'https://console.developers.google.com/logs/viewer?' + \
        urllib.urlencode(log_url_query_strings)
    html += '<p>Click <a href="%s">here</a> to view cloud log. <br>' % log_url
    html += 'Start TensorBoard by running "%tensorboard start --logdir=&lt;YourLogDir&gt;".</p>'
github googledatalab / pydatalab / datalab / mlalpha / commands / _ml.py View on Github external
%s
%s
[Description]:
%s

Cloud Run Command:

%s
%s
[Description]:
%s
""" % (command_local, args_local, docstring_local, command_cloud, args_cloud, docstring_cloud)
      return datalab.utils.commands.render_text(output, preformatted=True)

    env = datalab.utils.commands.notebook_environment()
    func_args = datalab.utils.commands.parse_config(cell, env)
    if args['cloud'] is True:
      return pr.run_func(cloud_func_name, func_args)
    else:
      return pr.run_func(local_func_name, func_args)
github googledatalab / pydatalab / datalab / mlalpha / commands / _mlalpha.py View on Github external
def _dataset(args, cell):
  if not cell:
    _output_dataset_template(args['name'])
    return
  env = datalab.utils.commands.notebook_environment()
  config = datalab.utils.commands.parse_config(cell, env)
  datalab.utils.commands.validate_config(config, ['source', 'featureset'],
      optional_keys=['format'])
  if config['featureset'] not in env:
    raise Exception('"%s" is not defined.' % config['featureset'])
  featureset_class = env[config['featureset']]
  format = config.get('format', 'csv')
  ds = datalab.mlalpha.DataSet(featureset_class(), config['source'], format=format)
  env[args['name']] = ds
github googledatalab / pydatalab / datalab / mlalpha / commands / _mlalpha.py View on Github external
def _preprocess(args, cell):
  if not cell:
    _output_preprocess_template(args['cloud'])
    return

  env = datalab.utils.commands.notebook_environment()
  config = datalab.utils.commands.parse_config(cell, env)
  datalab.utils.commands.validate_config(config,
     ['train_data_path', 'data_format', 'output_dir', 'feature_set_class_name'],
     optional_keys=['eval_data_path'])
  datalab.utils.commands.validate_config_value(config['data_format'], ['CSV', 'JSON'])
  command = '%%mlalpha preprocess'
  if args['cloud']:
    command += ' --cloud'
  command += '\n' + cell
  _output_preprocess_code_template(command, args['cloud'], config['data_format'],
      config['train_data_path'], config['output_dir'], config['feature_set_class_name'],
      eval_data_path=config.get('eval_data_path', None))
github googledatalab / pydatalab / datalab / bigquery / commands / _bigquery.py View on Github external
Returns:
    A Query object.
  """
  sql_arg = args.get('query', None)
  if sql_arg is None:
    # Assume we have inline SQL in the cell
    if not isinstance(cell, basestring):
      raise Exception('Expected a --query argument or inline SQL')
    return datalab.bigquery.Query(cell, values=env)

  item = datalab.utils.commands.get_notebook_item(sql_arg)
  if isinstance(item, datalab.bigquery.Query):  # Queries are already expanded.
    return item

  # Create an expanded BQ Query.
  config = datalab.utils.commands.parse_config(cell, env)
  item, env = datalab.data.SqlModule.get_sql_statement_with_environment(item, config)
  if cell:
    env.update(config)  # config is both a fallback and an override.
  return datalab.bigquery.Query(item, values=env)