How to use the opentuner.resultsdb.models.TuningRun function in opentuner

To help you get started, we’ve selected a few opentuner examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jansel / opentuner / opentuner / tuningrunmain.py View on Github external
def init(self):
    if self.tuning_run is None:
      program_version = (self.measurement_interface
                         .db_program_version(self.session))
      self.session.flush()
      self.measurement_interface.prefix_hook(self.session)
      self.tuning_run = (
        resultsdb.models.TuningRun(
          uuid=uuid.uuid4().hex,
          name=self.args.label,
          args=self.args,
          start_date=datetime.now(),
          program_version=program_version,
          objective=self.objective_copy,
        ))
      self.session.add(self.tuning_run)

      driver_kwargs = {
        'args': self.args,
        'input_manager': self.input_manager,
        'manipulator': self.manipulator,
        'measurement_interface': self.measurement_interface,
        'objective': self.objective,
        'session': self.session,
github jansel / opentuner / opentuner / utils / stats.py View on Github external
def main(self):
    dir_label_runs = defaultdict(lambda: defaultdict(list))
    for session in self.dbs:
      q = (session.query(resultsdb.models.TuningRun)
          .filter_by(state='COMPLETE')
          .order_by('name'))

      if self.args.label:
        q = q.filter(TuningRun.name.in_(
          list(map(str.strip,self.args.label.split(',')))))

      for tr in q:
        d = run_dir(self.args.stats_dir, tr)
        d = os.path.normpath(d)
        dir_label_runs[d][run_label(tr)].append((tr, session))

    summary_report = defaultdict(lambda: defaultdict(list))
    for d, label_runs in list(dir_label_runs.items()):
      if not os.path.isdir(d):
        os.makedirs(d)
github jansel / opentuner / opentuner / utils / stats_matplotlib.py View on Github external
def get_values(labels):
  """
  Arguments,
    labels: List of labels whose values are of interest
  Returns,
    A list of (mean, percentile) tuples, corresponding to the
    provided list of labels
  """
  dbs = get_dbs(os.getcwd())
  dir_label_runs = defaultdict(lambda: defaultdict(list))
  for db in dbs:
    q = (db.query(resultsdb.models.TuningRun)
            .filter_by(state='COMPLETE')
            .order_by('name'))
    if labels:
      q = q.filter(resultsdb.models.TuningRun.name.in_(labels))
    for tr in q:
      dir_label_runs[run_label(tr)][run_label(tr)].append((tr, db))
  all_run_ids = list()
  returned_values = {}
  for d, label_runs in list(dir_label_runs.items()):
    all_run_ids = list(map(_[0].id, itertools.chain(*list(label_runs.values()))))
    session = list(label_runs.values())[0][0][1]
    objective = list(label_runs.values())[0][0][0].objective

    q = (session.query(resultsdb.models.Result)
         .filter(resultsdb.models.Result.tuning_run_id.in_(all_run_ids))
         .filter(resultsdb.models.Result.time < float('inf'))
github jansel / opentuner / opentuner / resultsdb / models.py View on Github external
return self.program_version.program


class Result(Base):
  #set by MeasurementDriver:
  configuration_id = Column(ForeignKey(Configuration.id))
  configuration = relationship(Configuration)

  machine_id = Column(ForeignKey(Machine.id))
  machine = relationship(Machine, backref='results')

  input_id = Column(ForeignKey(Input.id))
  input = relationship(Input, backref='results')

  tuning_run_id = Column(ForeignKey(TuningRun.id), index=True)
  tuning_run = relationship(TuningRun, backref='results')

  collection_date = Column(DateTime, default=func.now())
  collection_cost = Column(Float)

  #set by MeasurementInterface:
  state = Column(Enum('OK', 'TIMEOUT', 'ERROR',
                      name='t_result_state'),
                 default='OK')
  time = Column(Float)
  accuracy = Column(Float)
  energy = Column(Float)
  size = Column(Float)
  confidence = Column(Float)
  #extra = Column(PickleType)

  #set by SearchDriver
github jansel / opentuner / opentuner / utils / stats_matplotlib.py View on Github external
def get_values(labels):
  """
  Arguments,
    labels: List of labels whose values are of interest
  Returns,
    A list of (mean, percentile) tuples, corresponding to the
    provided list of labels
  """
  dbs = get_dbs(os.getcwd())
  dir_label_runs = defaultdict(lambda: defaultdict(list))
  for db in dbs:
    q = (db.query(resultsdb.models.TuningRun)
            .filter_by(state='COMPLETE')
            .order_by('name'))
    if labels:
      q = q.filter(resultsdb.models.TuningRun.name.in_(labels))
    for tr in q:
      dir_label_runs[run_label(tr)][run_label(tr)].append((tr, db))
  all_run_ids = list()
  returned_values = {}
  for d, label_runs in dir_label_runs.iteritems():
    all_run_ids = map(_[0].id, itertools.chain(*label_runs.values()))
    session = label_runs.values()[0][0][1]
    objective = label_runs.values()[0][0][0].objective

    q = (session.query(resultsdb.models.Result)
         .filter(resultsdb.models.Result.tuning_run_id.in_(all_run_ids))
         .filter(resultsdb.models.Result.time < float('inf'))
         .filter_by(was_new_best=True, state='OK'))
    total = q.count()
    q = objective.filter_acceptable(q)
    acceptable = q.count()
github jansel / opentuner / opentuner / resultsdb / models.py View on Github external
def program(self):
    return self.program_version.program


class Result(Base):
  #set by MeasurementDriver:
  configuration_id = Column(ForeignKey(Configuration.id))
  configuration = relationship(Configuration)

  machine_id = Column(ForeignKey(Machine.id))
  machine = relationship(Machine, backref='results')

  input_id = Column(ForeignKey(Input.id))
  input = relationship(Input, backref='results')

  tuning_run_id = Column(ForeignKey(TuningRun.id), index=True)
  tuning_run = relationship(TuningRun, backref='results')

  collection_date = Column(DateTime, default=func.now())
  collection_cost = Column(Float)

  #set by MeasurementInterface:
  state = Column(Enum('OK', 'TIMEOUT', 'ERROR',
                      name='t_result_state'),
                 default='OK')
  time = Column(Float)
  accuracy = Column(Float)
  energy = Column(Float)
  size = Column(Float)
  confidence = Column(Float)
  #extra = Column(PickleType)
github jansel / opentuner / opentuner / utils / stats_matplotlib.py View on Github external
def get_values(labels):
  """
  Arguments,
    labels: List of labels whose values are of interest
  Returns,
    A list of (mean, percentile) tuples, corresponding to the
    provided list of labels
  """
  dbs = get_dbs(os.getcwd())
  dir_label_runs = defaultdict(lambda: defaultdict(list))
  for db in dbs:
    q = (db.query(resultsdb.models.TuningRun)
            .filter_by(state='COMPLETE')
            .order_by('name'))
    if labels:
      q = q.filter(resultsdb.models.TuningRun.name.in_(labels))
    for tr in q:
      dir_label_runs[run_label(tr)][run_label(tr)].append((tr, db))
  all_run_ids = list()
  returned_values = {}
  for d, label_runs in dir_label_runs.iteritems():
    all_run_ids = map(_[0].id, itertools.chain(*label_runs.values()))
    session = label_runs.values()[0][0][1]
    objective = label_runs.values()[0][0][0].objective

    q = (session.query(resultsdb.models.Result)
         .filter(resultsdb.models.Result.tuning_run_id.in_(all_run_ids))
         .filter(resultsdb.models.Result.time < float('inf'))