How to use the notebook.connectors.base.QueryError function in notebook

To help you get started, we’ve selected a few notebook examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cloudera / hue / desktop / libs / notebook / src / notebook / api.py View on Github external
_snippet['result']['handle'] = response['handle']
          _snippet['result']['statements_count'] = response['handle'].get('statements_count', 1)
          _snippet['result']['statement_id'] = response['handle'].get('statement_id', 0)
          _snippet['result']['handle']['statement'] = response['handle'].get('statement', snippet['statement']).strip() # For non HS2, as non multi query yet
        else:
          _snippet['status'] = 'failed'

        if history: # If _historify failed, history will be None. If we get Atomic block exception, something underneath interpreter.execute() crashed and is not handled.
          history.update_data(notebook)
          history.save()

          response['history_id'] = history.id
          response['history_uuid'] = history.uuid
          if notebook['isSaved']: # Keep track of history of saved queries
            response['history_parent_uuid'] = history.dependencies.filter(type__startswith='query-').latest('last_modified').uuid
  except QueryError as ex: # We inject the history information from _historify() to the failed queries
    if response.get('history_id'):
      ex.extra['history_id'] = response['history_id']
    if response.get('history_uuid'):
      ex.extra['history_uuid'] = response['history_uuid']
    if response.get('history_parent_uuid'):
      ex.extra['history_parent_uuid'] = response['history_parent_uuid']
    raise ex

  # Inject and HTML escape results
  if result is not None:
    response['result'] = result
    response['result']['data'] = escape_rows(result['data'])

  response['status'] = 0

  return response
github cloudera / hue / apps / metastore / src / metastore / views.py View on Github external
name=_('Load data in %s.%s') % (database, table.name),
            editor_type=source_type,
            statement=query_history.strip(),
            status='ready',
            database=database,
            on_success_url='assist.db.refresh',
            is_task=True,
            last_executed=last_executed
          )
          response = job.execute(request)
        else:
          url = reverse('beeswax:watch_query_history', kwargs={'query_history_id': query_history.id}) + '?on_success_url=' + on_success_url
          response['status'] = 0
          response['data'] = url
          response['query_history_id'] = query_history.id
      except QueryError, ex:
        response['status'] = 1
        response['data'] = _("Can't load the data: ") + ex.message
      except Exception, e:
        response['status'] = 1
        response['data'] = _("Can't load the data: ") + str(e)
  else:
    load_form = LoadDataForm(table)

  if response['status'] == -1:
    popup = render('popups/load_data.mako', request, {
           'table': table,
           'load_form': load_form,
           'database': database,
           'app_name': 'beeswax'
       }, force_template=True).content
    response['data'] = popup
github cloudera / hue / desktop / libs / notebook / src / notebook / connectors / pig_batch.py View on Github external
def check_status(self, notebook, snippet):
    job_id = snippet['result']['handle']['id']

    oozie_workflow = check_job_access_permission(self.request, job_id)
    logs, workflow_actions, is_really_done = self._get_log_output(oozie_workflow)
    results = self._get_results(logs)

    if is_really_done and not oozie_workflow.is_running():
      if oozie_workflow.status in ('KILLED', 'FAILED'):
        raise QueryError(_('The script failed to run and was stopped'))
      if results:
        status = 'available'
      else:
        status = 'running' # Tricky case when the logs are being moved by YARN at job completion
    elif oozie_workflow.is_running():
      status = 'running'
    else:
      status = 'failed'

    return {
        'status': status
    }
github cloudera / hue / desktop / libs / notebook / src / notebook / decorators.py View on Github external
if e.message and isinstance(e.message, basestring):
        response['message'] = e.message
    except AuthenticationRequired as e:
      response['status'] = 401
      if e.message and isinstance(e.message, basestring):
        response['message'] = e.message
    except ValidationError as e:
      LOG.exception('Error validation %s' % f)
      response['status'] = -1
      response['message'] = e.message
    except OperationTimeout as e:
      response['status'] = -4
    except FilesystemException as e:
      response['status'] = 2
      response['message'] = e.message
    except QueryError as e:
      LOG.exception('Error running %s' % f.__name__)
      response['status'] = 1
      response['message'] = smart_unicode(e)
      if response['message'].index("max_row_size"):
        size = re.search(r"(\d+.?\d*) (.B)", response['message'])
        if size and size.group(1):
          response['help'] = {
            'setting': {
              'name': 'max_row_size',
              'value':str(int(_closest_power_of_2(_to_size_in_bytes(size.group(1), size.group(2)))))
            }
          }
      if e.handle:
        response['handle'] = e.handle
      if e.extra:
        response.update(e.extra)
github cloudera / hue / desktop / libs / notebook / src / notebook / connectors / rdbms.py View on Github external
def decorator(*args, **kwargs):
    try:
      return func(*args, **kwargs)
    except Exception as e:
      message = force_unicode(e)
      if 'Invalid query handle' in message or 'Invalid OperationHandle' in message:
        raise QueryExpired(e)
      else:
        raise QueryError, message, sys.exc_info()[2]
  return decorator
github cloudera / hue / desktop / libs / notebook / src / notebook / connectors / solr.py View on Github external
def execute(self, notebook, snippet):
    from search.conf import SOLR_URL

    api = NativeSolrApi(SOLR_URL.get(), self.user.username)

    collection = self.options.get('collection') or snippet.get('database')
    if not collection or collection == 'default':
      collection = api.collections2()[0]

    response = api.sql(collection, snippet['statement'])

    info = response['result-set']['docs'].pop(-1) # EOF, RESPONSE_TIME, EXCEPTION
    if info.get('EXCEPTION'):
      raise QueryError(info['EXCEPTION'])

    headers = []
    for row in response['result-set']['docs']:
      for col in list(row.keys()):
        if col not in headers:
          headers.append(col)

    data = [[doc.get(col) for col in headers] for doc in response['result-set']['docs']]
    has_result_set = bool(data)

    return {
      'sync': True,
      'has_result_set': has_result_set,
      'modified_row_count': 0,
      'result': {
        'has_more': False,
github cloudera / hue / desktop / libs / notebook / src / notebook / connectors / spark_shell.py View on Github external
api = get_spark_api(self.user)

    response = api.create_session(**props)

    status = api.get_session(response['id'])
    count = 0

    while status['state'] == 'starting' and count < 120:
      status = api.get_session(response['id'])
      count += 1
      time.sleep(1)

    if status['state'] != 'idle':
      info = '\n'.join(status['log']) if status['log'] else 'timeout'
      raise QueryError(_('The Spark session could not be created in the cluster: %s') % info)

    return {
        'type': lang,
        'id': response['id'],
        'properties': properties
    }
github cloudera / hue / desktop / libs / notebook / src / notebook / connectors / spark_shell.py View on Github external
'meta': meta,
          'type': type
      }
    elif content['status'] == 'error':
      tb = content.get('traceback', None)

      if tb is None or not tb:
        msg = content.get('ename', 'unknown error')

        evalue = content.get('evalue')
        if evalue is not None:
          msg = '%s: %s' % (msg, evalue)
      else:
        msg = ''.join(tb)

      raise QueryError(msg)
github cloudera / hue / desktop / libs / notebook / src / notebook / connectors / altus_adb.py View on Github external
"value": "%7B%22id%22%3A%22dd5755a3-e8db-82d9-4f98-9f4fb5a99a06%22%2C%22type%22%3A%22impala%22%2C%22status%22%3A%22running%22%2C%22statementType%22%3A%22text%22%2C%22statement%22%3A%22SELECT+*+FROM+web_logs+LIMIT+100%3B%22%2C%22aceCursorPosition%22%3A%7B%22column%22%3A33%2C%22row%22%3A0%7D%2C%22statementPath%22%3A%22%22%2C%22associatedDocumentUuid%22%3Anull%2C%22properties%22%3A%7B%22files%22%3A%5B%5D%2C%22functions%22%3A%5B%5D%2C%22arguments%22%3A%5B%5D%2C%22settings%22%3A%5B%5D%7D%2C%22result%22%3A%7B%22id%22%3A%2206840534-9434-33b5-5eca-2cd08432ceb3%22%2C%22type%22%3A%22table%22%2C%22handle%22%3A%7B%22has_more_statements%22%3Afalse%2C%22statement_id%22%3A0%2C%22statements_count%22%3A1%2C%22previous_statement_hash%22%3A%22acb6478fcf28c31b5e76d49de7d77bbe46fe5e4f9436c16c0ca8ed5f%22%7D%7D%2C%22database%22%3A%22default%22%2C%22compute%22%3A%7B%22interface%22%3A%22impala%22%2C%22type%22%3A%22direct%22%2C%22namespace%22%3A%22default-romain%22%2C%22id%22%3A%22default-romain%22%2C%22name%22%3A%22default-romain%22%7D%2C%22wasBatchExecuted%22%3Afalse%7D"
                  }
                ]
              }
            }'''

    payload = payload.replace('SELECT+*+FROM+web_logs+LIMIT+100', urllib_quote_plus(query.replace('\n', ' ')))

    resp = self.api.submit_hue_query(self.cluster_crn, payload)

    if 'payload' in resp:
      resp_payload = json.loads(resp['payload'])
      if 'handle' in resp_payload:
        return resp_payload['handle']
      else:
        raise QueryError(resp_payload.get('message'))
    else:
      raise QueryError(resp.get('message'))
github cloudera / hue / desktop / libs / notebook / src / notebook / connectors / dataeng.py View on Github external
def check_status(self, notebook, snippet):
    response = {'status': 'running'}

    job_id = snippet['result']['handle']['id']

    handle = DataEng(self.user).list_jobs(job_ids=[job_id])
    job = handle['jobs'][0]

    if job['status'] in RUNNING_STATES:
      return response
    elif job['status'] in ('failed', 'terminated'):
      raise QueryError(_('Job was %s') % job['status'])
    else:
      response['status'] = 'available'

    return response