Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def submit_and_assess_metric_payload(self, serie, attach_host_name=True):
"""
Helper to assess the metric payload format.
"""
now = time()
if isinstance(serie, dict):
Metric.send(attach_host_name=attach_host_name, **deepcopy(serie))
serie = [serie]
else:
Metric.send(deepcopy(serie), attach_host_name=attach_host_name)
payload = self.get_request_data()
for i, metric in enumerate(payload['series']):
if attach_host_name:
self.assertEqual(set(metric.keys()), set(['metric', 'points', 'host']))
self.assertEqual(metric['host'], api._host_name)
else:
self.assertEqual(set(metric.keys()), set(['metric', 'points']))
self.assertEqual(metric['metric'], serie[i]['metric'])
# points is a list of 1 point
def submit_and_assess_metric_payload(self, serie, attach_host_name=True):
"""
Helper to assess the metric payload format.
"""
now = time()
if isinstance(serie, dict):
Metric.send(attach_host_name=attach_host_name, **deepcopy(serie))
serie = [serie]
else:
Metric.send(deepcopy(serie), attach_host_name=attach_host_name)
payload = self.get_request_data()
for i, metric in enumerate(payload['series']):
if attach_host_name:
self.assertEqual(set(metric.keys()), set(['metric', 'points', 'host']))
self.assertEqual(metric['host'], api._host_name)
else:
self.assertEqual(set(metric.keys()), set(['metric', 'points']))
self.assertEqual(metric['metric'], serie[i]['metric'])
# points is a list of 1 point
self.assertTrue(isinstance(metric['points'], list))
self.assertEqual(len(metric['points']), 1)
# it consists of a [time, value] pair
def test_metrics(self):
now = datetime.datetime.now()
now_ts = int(time.mktime(now.timetuple()))
metric_name_single = "test.metric_single." + str(now_ts)
metric_name_list = "test.metric_list." + str(now_ts)
metric_name_tuple = "test.metric_tuple." + str(now_ts)
host_name = "test.host." + str(now_ts)
def retry_condition(r):
return not r["series"]
# Send metrics with single and multi points, and with compression
assert (
dog.Metric.send(metric=metric_name_single, points=1, host=host_name)[
"status"
]
== "ok"
)
points = [(now_ts - 60, 1), (now_ts, 2)]
assert (
dog.Metric.send(metric=metric_name_list, points=points, host=host_name)[
"status"
]
== "ok"
)
points = (now_ts - 60, 1)
assert (
dog.Metric.send(
metric=metric_name_tuple,
points=points,
def send_metric(self, metric, value, tags=None, host=None):
if tags is None:
tags = []
tags.extend(self.default_tags)
host = self._generate_datadog_hostname(host)
try:
datadog.api.Metric.send(
metric="ansible.{0}".format(metric),
points=value,
tags=tags,
host=host,
)
except Exception as e:
# We don't want Ansible to fail on an API error
print('Couldn\'t send metric "{0}" to Datadog'.format(metric))
print(e)
res = requests.get(url, params=params)
# print "res.headers"
# print res.headers
for qu in API_QUANTITIES:
payload.append({
'metric': qu,
'points': int(res.headers[qu]),
'tags': ["endpoint:%s"%endpoint]
})
# Post metrics
datadog.api.Metric.send(payload)
from datadog import initialize, api
import time
options = {
'api_key': '9775a026f1ca7d1c6c5af9d94d9595a4',
'app_key': '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'
}
initialize(**options)
# Taking the last 24hours
from_time = int(time.time()) - 60 * 60 * 24 * 1
result = api.Metric.list(from_time)
print(result)
results = sorted(self.stats.items(), key=lambda value: value[1][1], reverse=True)
datadog_api_key = os.getenv('DATADOG_API_KEY')
datadog_app_key = os.getenv('DATADOG_APP_KEY')
datadog_api_initialized = True
if datadog_api_key and datadog_app_key:
datadog.initialize(api_key=datadog_api_key,
app_key=datadog_app_key)
else:
datadog_api_initialized = False
# send the metric to datadog
if datadog_api_initialized:
for name, points in results:
datadog.api.Metric.send(
metric="edx.ansible.{0}.task_duration".format(name.replace(" | ", ".").replace(" ", "-").lower()),
date_happened=[0],
points=points[1],
)
# Log the time of each task
for name, elapsed in results:
logger.info(
"{0:-<80}{1:->8}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed[1]),
)
)
# Total time to run the complete playbook
total_seconds = sum([x[1][1] for x in self.stats.items()])
if not self.datadog_api_initialized:
return
datadog_tasks_metrics = []
for name, timestamp in results.items():
datadog_tasks_metrics.append({
'metric': 'edx.ansible.task_duration',
'date_happened': time.mktime(timestamp.start.timetuple()),
'points': timestamp.duration.total_seconds(),
'tags': [
'task:{0}'.format(self.clean_tag_value(name)),
'playbook:{0}'.format(self.clean_tag_value(playbook_name))
]
})
try:
datadog.api.Metric.send(datadog_tasks_metrics)
datadog.api.Metric.send(
metric="edx.ansible.playbook_duration",
date_happened=time.mktime(playbook_timestamp.start.timetuple()),
points=playbook_timestamp.duration.total_seconds(),
tags=["playbook:{0}".format(self.clean_tag_value(playbook_name))]
)
except Exception:
LOGGER.exception("Failed to log timing data to datadog")