How to use the twitter.common.log function in twitter

To help you get started, we’ve selected a few twitter examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github apache / aurora / src / main / python / twitter / aurora / executor / common / status_checker.py View on Github external
def status(self):
    if self._status is None:
      for status_checker in self._status_checkers:
        log.debug('Checking status from %s' % status_checker.__class__.__name__)
        status_checker_status = status_checker.status
        if status_checker_status is not None:
          log.info('%s reported %s' % (status_checker.__class__.__name__, status_checker_status))
          if not isinstance(status_checker_status, StatusResult):
            raise TypeError('StatusChecker returned something other than a StatusResult: got %s' %
                type(status_checker_status))
          self._status = status_checker_status
          break
    return self._status
github apache / aurora / src / python / twitter / aurora / executor / common / sandbox.py View on Github external
def create(self):
    log.debug('DirectorySandbox: mkdir %s' % self.root)
    safe_mkdir(self.root)
    pwent = pwd.getpwnam(self._user)
    grent = grp.getgrgid(pwent.pw_gid)
    log.debug('DirectorySandbox: chown %s:%s %s' % (self._user, grent.gr_name, self.root))
    os.chown(self.root, pwent.pw_uid, pwent.pw_gid)
    log.debug('DirectorySandbox: chmod 700 %s' % self.root)
    os.chmod(self.root, 0700)
github wickman / rainman / src / rainman / scheduler.py View on Github external
# don't bother scheduling unless there are unchoked peers or peers
        # we have not told we are interested
        if len([peer for peer in owners if not peer.choked or not peer.interested]) == 0:
          continue
        if self._requests.outstanding > self.MAX_REQUESTS:
          log.debug('Hit max requests, waiting.')
          yield gen.Task(self._session.io_loop.add_timeout, self.INNER_YIELD)

        # subpiece nomenclature is "block"
        request_size = self.REQUEST_SIZE.as_(Data.BYTES)
        for block in self._session.piece_broker.iter_blocks(piece_index, request_size):
          if block not in self._session.piece_broker and block not in self._requests:
            random_peer = random.choice(owners)
            if random_peer.choked:
              if not random_peer.interested:
                log.debug('Want to request %s from %s but we are choked, setting interested.' % (
                  block, random_peer))
              random_peer.interested = True
              continue
            log.debug('Scheduler requesting %s from peer [%s].' % (block, random_peer))
            # XXX sync
            random_peer.send_request(block)
            self._requests.add(block, random_peer)

      yield gen.Task(self._session.io_loop.add_timeout, self.OUTER_YIELD)
github apache / aurora / src / main / python / apache / aurora / admin / admin.py View on Github external
def increase_quota(cluster, role, cpu_str, ram_str, disk_str):
  """usage: increase_quota cluster role cpu ram[unit] disk[unit]

  Increases the amount of production quota allocated to a user.
  """
  cpu = float(cpu_str)
  ram = parse_data(ram_str).as_(Data.MB)
  disk = parse_data(disk_str).as_(Data.MB)

  client = make_admin_client_with_options(cluster)
  resp = client.get_quota(role)
  quota = resp.result.getQuotaResult.quota
  resource_details = ResourceManager.resource_details_from_quota(quota)
  log.info('Current quota for %s:\n\t%s' % (
      role,
      '\n\t'.join('%s\t%s%s' % (
          r.resource_type.display_name,
          r.value,
          r.resource_type.display_unit) for r in resource_details)))

  new_cpu = ResourceType.CPUS.value_type(
    cpu + ResourceManager.quantity_of(resource_details, ResourceType.CPUS))
  new_ram = ResourceType.RAM_MB.value_type(
    ram + ResourceManager.quantity_of(resource_details, ResourceType.RAM_MB))
  new_disk = ResourceType.DISK_MB.value_type(
    disk + ResourceManager.quantity_of(resource_details, ResourceType.DISK_MB))

  log.info('Attempting to update quota for %s to\n\tCPU\t%s\n\tRAM\t%s MB\n\tDisk\t%s MB' %
           (role, new_cpu, new_ram, new_disk))
github apache / aurora / src / main / python / apache / aurora / executor / http_lifecycle.py View on Github external
def _terminate_http(self):
    http_signaler = HttpSignaler(self._lifecycle_port)

    for endpoint, wait_time in self._escalation_endpoints:
      handled, _ = http_signaler(endpoint, use_post_method=True)
      log.info('Killing task, calling %s and waiting %s, handled is %s' % (
          endpoint, str(wait_time), str(handled)))

      waited = Amount(0, Time.SECONDS)
      while handled:
        if self._runner.status is not None:
          return True
        if waited >= wait_time:
          break

        self._clock.sleep(self.WAIT_POLL_INTERVAL.as_(Time.SECONDS))
        waited += self.WAIT_POLL_INTERVAL
github wickman / rainman / rainman / peer_driver.py View on Github external
def send(self, command, *args):
    # TODO(wickman) Put this behind a verbosity flag.
    log.debug('Sending %s(%s) to %s' % (
        Command.to_string(command), ', '.join(map(str, args)), self))
    yield gen.Task(self.iostream.write, self._encode(command, *args))
github apache / aurora / src / python / twitter / mesos / bin / mesos_admin.py View on Github external
def scheduler_print_recovery_tasks():
  """usage: scheduler_print_recovery_tasks --cluster=CLUSTER

  Prints all active tasks in a staged recovery.
  """
  options = app.get_options()
  resp = MesosClientAPI(options.cluster, options.verbosity).query_recovery(
      TaskQuery(statuses=ACTIVE_STATES))
  check_and_log_response(resp)
  log.info('Role\tJob\tShard\tStatus\tTask ID')
  for task in resp.tasks:
    assigned = task.assignedTask
    conf = assigned.task
    log.info('\t'.join((conf.owner.role,
                        conf.jobName,
                        str(conf.shardId),
                        ScheduleStatus._VALUES_TO_NAMES[task.status],
                        assigned.taskId)))
github apache / aurora / src / main / python / apache / aurora / client / api / instance_watcher.py View on Github external
def watch(self, instance_ids, health_check=None):
    """Watches a set of instances and detects failures based on a delegated health check.

    Arguments:
    instance_ids -- set of instances to watch.

    Returns a set of instances that are considered failed.
    """
    log.info('Watching instances: %s' % instance_ids)
    instance_ids = set(instance_ids)
    health_check = health_check or StatusHealthCheck()

    instance_states = {}

    def finished_instances():
      return dict((s_id, s) for s_id, s in instance_states.items() if s.finished)

    def set_instance_healthy(instance_id, now):
      if instance_id not in instance_states:
        instance_states[instance_id] = Instance(now)
      instance = instance_states.get(instance_id)
      if now > (instance.birthday + self._watch_secs):
        log.info('Instance %s has been up and healthy for at least %d seconds' % (
          instance_id, self._watch_secs))
        instance.set_healthy(True)
github apache / aurora / src / main / python / apache / aurora / client / api / instance_watcher.py View on Github external
def set_instance_unhealthy(instance_id):
      log.info('Instance %s is unhealthy' % instance_id)
      if instance_id in instance_states:
        # An instance that was previously healthy and currently unhealthy has failed.
        instance_states[instance_id].set_healthy(False)
      else:
        # An instance never passed a health check (e.g.: failed before the first health check).
        instance_states[instance_id] = Instance(finished=True)
github apache / aurora / src / python / twitter / mesos / client / client_wrapper.py View on Github external
def create_job(self, config):
    log.info('Creating job %s' % config.name())
    log.debug('Full configuration: %s' % config.job())
    return self._scheduler.createJob(config.job())