How to use the amplify.agent.common.context.context function in amplify

To help you get started, we’ve selected a few amplify examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nginxinc / nginx-amplify-agent / test / unit / ext / phpfpm / collectors / pool / metrics.py View on Github external
def teardown_method(self, method):
        context._setup_object_tank()
        super(PHPFPMPoolMetricsCollectorTestCase, self).teardown_method(method)
github nginxinc / nginx-amplify-agent / test / unit / ext / mysql / objects.py View on Github external
def test_init_ipv4(self):
        del context.app_config['mysql']['unix_socket']
        context.app_config['mysql']['host'] = '10.10.10.10'
        context.app_config['mysql']['port'] = '3306'

        assert_that(isinstance(context.app_config['mysql']['port'], str), equal_to(True))

        mysql_obj = MySQLObject(
            local_id=123,
            pid=2,
            cmd='/usr/sbin/mysqld --basedir=/usr '
                '--datadir=/var/lib/mysql --plugin-dir=/usr/lib/mysql/plugin '
                '--user=mysql --log-error=/var/log/mysql/error.log '
                '--pid-file=/var/run/mysqld/mysqld.pid --socket=/var/run/mysqld/mysqld.sock --port=3306',
            conf_path='/etc/mysql/my.cnf'
        )
        assert_that(mysql_obj, not_none())

        assert_that(mysql_obj.connection_args, equal_to(
github nginxinc / nginx-amplify-agent / amplify / ext / mysql / util.py View on Github external
:param bin_path: str bin path
    :return: str version
    """
    try:
        raw_stdout, _ = subp.call(VERSION_CMD % bin_path)

        # also trying to get the first line of output
        # here's the line that we are interested in::
        # mysqld  Ver 5.5.55-0ubuntu0.14.04.1 for debian-linux-gnu on x86_64 ((Ubuntu))
        raw_line = raw_stdout[0]
    except Exception as e:
        exc_name = e.__class__.__name__
        # this is being logged as debug only since we will rely on bin_path
        # collection error to tip off support as to what is going wrong with
        # version detection
        context.log.debug(
            'failed to get version info from "%s" due to %s' %
            (bin_path, exc_name)
        )
        context.log.debug('additional info:', exc_info=True)
    else:
        raw_version = raw_line.split()[2]  # 5.5.55-0ubuntu0.14.04.1

        version = []
        for char in raw_version:
            if char.isdigit() or char == '.':
                version.append(char)
            else:
                break

        return ''.join(version), raw_line
github nginxinc / nginx-amplify-agent / amplify / agent / pipelines / syslog.py View on Github external
def start(self):
        current_thread().name = self.name
        context.setup_thread_id()

        self.running = True

        while self.running:
            self._wait(0.1)
            # This means that we don't increment every time a UDP message is handled, but rather every listen "period"
            context.inc_action_id()
            asyncore.loop(timeout=self.interval, count=10)
            # count is arbitrary since timeout is unreliable at breaking asyncore.loop
github nginxinc / nginx-amplify-agent / amplify / agent / common / util / container.py View on Github external
https://docs.docker.com/engine/reference/api/docker_remote_api/

    The problem is that this API is optional and does not have a standard location from within a container (or rather it
    can be configured to listen anywhere).  Instead, this check will parse the `/proc` filesystem trying to parse the
    docker ID from the output.  If we find an ID, we will assume that we are in a docker container.

    :return: Bool True if docker ID is found, False otherwise.
    """
    try:
        stdout, _ = subp.call('cat /proc/self/cgroup | fgrep -e docker | head -n 1 | sed "s/.*docker\/\(.*\)/\\1/"')
        docker_id = stdout[0]
        return len(docker_id) == 64 and ' ' not in docker_id

    except Exception as e:
        context.log.error('failed to find docker id due to %s' % e.__class__.__name__)
        context.log.debug('additional info:', exc_info=True)
        return False
github nginxinc / nginx-amplify-agent / amplify / agent / managers / nginx.py View on Github external
def _find_all():
        """
        Tries to find all master processes

        :return: list of dict: nginx object definitions
        """
        # get ps info
        ps_cmd = "ps xao pid,ppid,command | grep 'nginx[:]'"
        try:
            ps, _ = subp.call(ps_cmd)
            context.log.debug('ps nginx output: %s' % ps)
        except:
            context.log.debug('failed to find running nginx via %s' % ps_cmd)
            context.log.debug('additional info:', exc_info=True)
            if context.objects.root_object:
                context.objects.root_object.eventd.event(
                    level=INFO,
                    message='no nginx found'
                )
            return []

        # return an empty list if there are no master processes
        if not any('nginx: master process' in line for line in ps):
            context.log.debug('nginx masters amount is zero')
            return []

        # collect all info about processes
        masters = {}
        try:
            for line in ps:
                # parse ps response line:
github nginxinc / nginx-amplify-agent / amplify / agent / collectors / nginx / metrics.py View on Github external
nginx.http.conn.current = connections.active + connections.idle
        nginx.http.conn.idle = connections.idle
        nginx.http.request.count = requests.total  ## counter
        nginx.http.request.current = requests.current

        plus.http.ssl.handshakes = ssl.handshakes
        plus.http.ssl.failed = ssl.handshakes_failed
        plus.http.ssl.reuses = ssl.session_reuses

        also here we run plus metrics collection
        """
        stamp = int(time.time())

        # get plus status body
        try:
            status = context.http_client.get(self.object.plus_status_internal_url, timeout=1, log=False)

            # modify status to move stream data up a level
            if 'stream' in status:
                status['streams'] = status['stream'].get('server_zones', {})
                status['stream_upstreams'] = status['stream'].get('upstreams', {})

            # Add the status payload to plus_cache so it can be parsed by other collectors (plus objects)
            context.plus_cache.put(self.object.plus_status_internal_url, (status, stamp))
        except GreenletExit:
            raise
        except:
            context.log.error('failed to check plus_status url %s' % self.object.plus_status_internal_url)
            context.log.debug('additional info', exc_info=True)
            status = None

        if not status:
github nginxinc / nginx-amplify-agent / amplify / agent / managers / bridge.py View on Github external
context.log.debug(
            'modified payload; current payload stats: '
            'meta - %s, metrics - %s, events - %s, configs - %s' % (
                len(self.payload['meta']),
                len(self.payload['metrics']),
                len(self.payload['events']),
                len(self.payload['configs'])
            )
        )

        # Send payload to backend.
        try:
            self.last_http_attempt = time.time()

            self._pre_process_payload()  # Convert deques to lists for encoding
            context.http_client.post('update/', data=self.payload)
            context.default_log.debug(self.payload)
            self._reset_payload()  # Clear payload after successful

            if self.first_run:
                self.first_run = False  # Set first_run to False after first successful send

            if self.http_delay:
                self.http_fail_count = 0
                self.http_delay = 0  # Reset HTTP delay on success
                context.log.debug('successful update, reset http delay')
        except Exception as e:
            self._post_process_payload()  # Convert lists to deques since send failed

            if isinstance(e, HTTPError) and e.response.status_code == 503:
                backpressure_error = HTTP503Error(e)
                context.backpressure_time = int(time.time() + backpressure_error.delay)