How to use the twitter.common.quantity.Time function in twitter

To help you get started, we’ve selected a few twitter examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github apache / aurora / src / main / python / twitter / aurora / executor / thermos_task_runner.py View on Github external
def _terminate_http(self):
    if 'health' not in self._ports:
      return

    http_signaler = HttpSignaler(self._ports['health'])

    # pass 1
    http_signaler.quitquitquit()
    self._clock.sleep(self.ESCALATION_WAIT.as_(Time.SECONDS))
    if self.status is not None:
      return True

    # pass 2
    http_signaler.abortabortabort()
    self._clock.sleep(self.ESCALATION_WAIT.as_(Time.SECONDS))
    if self.status is not None:
      return True
github twitter-archive / commons / src / python / twitter / common / metrics / rate.py View on Github external
  def __init__(self, name, gauge, window = Amount(1, Time.SECONDS), clock = time):
    """
      Create a gauge using name as a base for a _per_ sampling gauge.

        name: The base name of the gauge.
        gauge: The gauge to sample
        window: The window over which the samples should be measured (default 1 second.)
    """
    self._clock = clock
    self._gauge = gauge
    self._samples = []
    self._window = window
    NamedGauge.__init__(self, '%s_per_%s%s' % (name, window.amount(), window.unit()))
github apache / aurora / src / main / python / apache / aurora / executor / common / announcer.py View on Github external
initial_interval = mesos_task.health_check_config().initial_interval_secs().get()
    interval = mesos_task.health_check_config().interval_secs().get()
    consecutive_failures = mesos_task.health_check_config().max_consecutive_failures().get()
    timeout_secs = initial_interval + (consecutive_failures * interval)

    return AnnouncerChecker(
      client, path, timeout_secs, endpoint, additional=additional, shard=assigned_task.instanceId,
      name=self.name)


class DefaultAnnouncerCheckerProvider(AnnouncerCheckerProvider):
  DEFAULT_RETRY_MAX_DELAY = Amount(5, Time.MINUTES)
  DEFAULT_RETRY_POLICY = KazooRetry(
      max_tries=None,
      ignore_expire=True,
      max_delay=DEFAULT_RETRY_MAX_DELAY.as_(Time.SECONDS),
  )

  def __init__(self, ensemble, root='/aurora', allow_custom_serverset_path=False,
               hostname=None, zk_auth=None):
    self._ensemble = ensemble
    self._root = root
    self._zk_auth = zk_auth
    super(DefaultAnnouncerCheckerProvider, self).__init__(allow_custom_serverset_path, hostname)

  def make_zk_client(self):
    if self._zk_auth is None:
      auth_data = None
      default_acl = None
    else:
      auth_data = [(a.scheme().get(), a.credential().get()) for a in self._zk_auth.auth()]
      default_acl = [to_acl(a) for a in self._zk_auth.acl()]
github apache / aurora / src / main / python / apache / aurora / executor / aurora_executor.py View on Github external
from .common.sandbox import DefaultSandboxProvider
from .common.status_checker import ChainedStatusChecker
from .common.task_info import assigned_task_from_mesos_task
from .common.task_runner import TaskError, TaskRunner, TaskRunnerProvider
from .executor_base import ExecutorBase
from .status_manager import StatusManager


def propagate_deadline(*args, **kw):
  return deadline(*args, daemon=True, propagate=True, **kw)


class AuroraExecutor(ExecutorBase, Observable):
  PERSISTENCE_WAIT = Amount(5, Time.SECONDS)
  SANDBOX_INITIALIZATION_TIMEOUT = Amount(10, Time.MINUTES)
  START_TIMEOUT = Amount(2, Time.MINUTES)
  STOP_WAIT = Amount(5, Time.SECONDS)

  def __init__(
      self,
      runner_provider,
      status_manager_class=StatusManager,
      sandbox_provider=DefaultSandboxProvider(),
      status_providers=(),
      clock=time,
      no_sandbox_create_user=False,
      sandbox_mount_point=None,
      stop_timeout_in_secs=120):

    ExecutorBase.__init__(self)
    if not isinstance(runner_provider, TaskRunnerProvider):
      raise TypeError('runner_provider must be a TaskRunnerProvider, got %s' %
github apache / aurora / src / python / twitter / aurora / client / quickrun.py View on Github external
Constraint,
    Job,
    Packer,
    Process,
    Resources,
    SequentialTask)
from twitter.aurora.config import AuroraConfig
from twitter.aurora.common_internal.packer_client import TwitterPacker

from gen.twitter.aurora.ttypes import ResponseCode, ScheduleStatus

from .job_monitor import JobMonitor


class Quickrun(object):
  QUERY_INTERVAL = Amount(30, Time.SECONDS)
  WAIT_STATES = frozenset([
      ScheduleStatus.PENDING,
      ScheduleStatus.ASSIGNED,
      ScheduleStatus.STARTING])
  ACTIVE_STATES = frozenset([
      ScheduleStatus.RUNNING])
  FINISHED_STATES = frozenset([
      ScheduleStatus.FAILED,
      ScheduleStatus.FINISHED,
      ScheduleStatus.KILLED])
  PACKAGE_NAME = '__quickrun'

  def __init__(self,
               cluster_name,
               command,
               name,
github apache / aurora / src / python / twitter / thermos / monitoring / disk.py View on Github external
def run(self):
    """ Loop indefinitely, periodically processing watchdog/inotify events. """
    self._initialize()
    log.debug("Initialization complete. Moving to handling events.")
    while True:
      next = time.time() + self.COLLECTION_INTERVAL.as_(Time.SECONDS)
      if not self._queue.empty():
        self._to_process, self._queue = self._queue, Queue()
        self._process_events()
      time.sleep(max(0, next - time.time()))
github apache / aurora / src / main / python / apache / aurora / client / api / job_monitor.py View on Github external
from gen.apache.aurora.constants import (
    LIVE_STATES,
    TERMINAL_STATES
)
from gen.apache.aurora.ttypes import (
    Identity,
    TaskQuery
)

from thrift.transport import TTransport


class JobMonitor(object):
  MIN_POLL_INTERVAL = Amount(10, Time.SECONDS)
  MAX_POLL_INTERVAL = Amount(2, Time.MINUTES)

  @classmethod
  def running_or_finished(cls, status):
    return status in (LIVE_STATES | TERMINAL_STATES)

  @classmethod
  def terminal(cls, status):
    return status in TERMINAL_STATES

  # TODO(ksweeney): Make this use the AuroraJobKey
  def __init__(self, client, role, env, jobname):
    self._client = client
    self._query = TaskQuery(owner=Identity(role=role), environment=env, jobName=jobname)
    self._initial_tasks = set()
    self._initial_tasks = set(task.assignedTask.taskId for task in self.iter_query())
github wickman / rainman / src / rainman / scheduler.py View on Github external
import datetime
import random
import time

from tornado import gen
from twitter.common import log
from twitter.common.quantity import Amount, Data, Time


class TimeDecayMap(object):
  """A time-decaying map that drops entries older than a timeout."""

  DEFAULT_WINDOW = Amount(15, Time.SECONDS)

  def __init__(self, window=DEFAULT_WINDOW, clock=time):
    self._window = window.as_(Time.SECONDS)
    self._clock = clock
    self._slices = {}  # slice => list(peer)
    self._outstanding = 0

  @property
  def outstanding(self):
    """The number of outstanding requests.  This number is only an estimate
       since request filtering takes place on __getitem__/__contains__"""
    return self._outstanding

  def add(self, slice_, peer_id):
    """Indicate that we have requested slice_ from peer_id"""
    if slice_ not in self._slices:
github apache / aurora / src / main / python / apache / thermos / monitoring / resource.py View on Github external
if enable_mesos_disk_collector:
      self.disk_collector_class = MesosDiskCollector

  def provides(self, sandbox):
    return self.disk_collector_class(sandbox, settings=self.settings)


class TaskResourceMonitor(ResourceMonitorBase, ExceptionalThread):
  """ Lightweight thread to aggregate resource consumption for a task's constituent processes.
      Actual resource calculation is delegated to collectors; this class periodically polls the
      collectors and aggregates into a representation for the entire task. Also maintains a limited
      history of previous sample results.
  """

  PROCESS_COLLECTION_INTERVAL = Amount(20, Time.SECONDS)
  HISTORY_TIME = Amount(1, Time.HOURS)

  def __init__(
      self,
      task_id,
      task_monitor,
      disk_collector_provider=DiskCollectorProvider(),
      process_collection_interval=PROCESS_COLLECTION_INTERVAL,
      disk_collection_interval=DiskCollectorSettings.DISK_COLLECTION_INTERVAL,
      history_time=HISTORY_TIME,
      history_provider=HistoryProvider()):

    """
      task_monitor: TaskMonitor object specifying the task whose resources should be monitored
      sandbox: Directory for which to monitor disk utilisation
    """
    self._task_monitor = task_monitor  # exposes PIDs, sandbox
github apache / aurora / src / python / twitter / aurora / client / api / scheduler_client.py View on Github external
def _connect(self):
    return self._connect_scheduler(self._host, self._port, with_ssl=self._ssl)

  @property
  def url(self):
    # TODO(wickman) This is broken -- make this tunable in MESOS-3005
    return 'http://%s:8081' % self._host


class SchedulerProxy(object):
  """
    This class is responsible for creating a reliable thrift client to the
    twitter scheduler.  Basically all the dirty work needed by the
    AuroraClientAPI.
  """
  CONNECT_MAXIMUM_WAIT = Amount(1, Time.MINUTES)
  RPC_RETRY_INTERVAL = Amount(5, Time.SECONDS)
  RPC_MAXIMUM_WAIT = Amount(10, Time.MINUTES)
  UNAUTHENTICATED_RPCS = frozenset([
    'populateJobConfig',
    'getTasksStatus',
    'getJobs',
    'getQuota',
    'getVersion',
  ])

  class Error(Exception): pass
  class TimeoutError(Error): pass
  class AuthenticationError(Error): pass
  class APIVersionError(Error): pass

  def __init__(self, cluster, verbose=False, session_key_factory=make_session_key):