Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_init(self):
# Check for required arg errors
with self.assertRaises(ValueError):
metric.Metric(Mock(), None)
with self.assertRaises(ValueError):
metric.Metric(None, Mock())
mock_time_series = Mock(spec=time_series.TimeSeries)
mock_time_series.check_points_type.return_value = True
mock_descriptor = Mock(spec=metric_descriptor.MetricDescriptor)
mock_descriptor.type = (metric_descriptor
.MetricDescriptorType.GAUGE_INT64)
mm = metric.Metric(mock_descriptor, [mock_time_series],)
self.assertEqual(mm.time_series, [mock_time_series])
self.assertEqual(mm.descriptor, mock_descriptor)
def test_view_data_to_metric(self):
args_list = [
[
aggregation.SumAggregation,
value.ValueDouble,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE
],
[
aggregation.CountAggregation,
value.ValueLong,
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64
],
[
aggregation.DistributionAggregation,
value.ValueDistribution,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION
]
]
for args in args_list:
self.do_test_view_data_to_metric(*args)
def __init__(self, name, description, unit, type_, label_keys):
if type_ not in MetricDescriptorType:
raise ValueError("Invalid type")
if label_keys is None:
raise ValueError("label_keys must not be None")
if any(key is None for key in label_keys):
raise ValueError("label_keys must not contain null keys")
self._name = name
self._description = description
self._unit = unit
self._type = type_
self._label_keys = label_keys
def __repr__(self):
type_name = MetricDescriptorType.to_type_class(self.type).__name__
return ('{}(name="{}", description="{}", unit={}, type={})'
.format(
type(self).__name__,
self.name,
self.description,
self.unit,
type_name,
))
)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
# TODO: handle SUMMARY metrics, #567
else: # pragma: NO COVER
raise TypeError("Unsupported metric type: {}"
.format(metric.descriptor.type))
end = point.timestamp
if ts.start_timestamp is None:
start = end
else:
start = datetime.strptime(ts.start_timestamp, EPOCH_PATTERN)
timestamp_start = (start - EPOCH_DATETIME).total_seconds()
timestamp_end = (end - EPOCH_DATETIME).total_seconds()
def __contains__(cls, item):
return item in {
MetricDescriptorType.GAUGE_INT64,
MetricDescriptorType.GAUGE_DOUBLE,
MetricDescriptorType.GAUGE_DISTRIBUTION,
MetricDescriptorType.CUMULATIVE_INT64,
MetricDescriptorType.CUMULATIVE_DOUBLE,
MetricDescriptorType.CUMULATIVE_DISTRIBUTION
}
:type val: float
:param val: Value to add.
"""
if val > 0:
super(CumulativePointDouble, self).add(val)
class LongCumulativeMixin(object):
"""Type mixin for long-valued cumulative measures."""
descriptor_type = metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64
point_type = CumulativePointLong
class DoubleCumulativeMixin(object):
"""Type mixin for float-valued cumulative measures."""
descriptor_type = metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE
point_type = CumulativePointDouble
class LongCumulative(LongCumulativeMixin, gauge.Gauge):
"""Records cumulative int-valued measurements."""
class DoubleCumulative(DoubleCumulativeMixin, gauge.Gauge):
"""Records cumulative float-valued measurements."""
class DerivedLongCumulative(LongCumulativeMixin, gauge.DerivedGauge):
"""Records derived cumulative int-valued measurements."""
class DerivedDoubleCumulative(DoubleCumulativeMixin, gauge.DerivedGauge):
def __contains__(cls, item):
return item in {
MetricDescriptorType.GAUGE_INT64,
MetricDescriptorType.GAUGE_DOUBLE,
MetricDescriptorType.GAUGE_DISTRIBUTION,
MetricDescriptorType.CUMULATIVE_INT64,
MetricDescriptorType.CUMULATIVE_DOUBLE,
MetricDescriptorType.CUMULATIVE_DISTRIBUTION
}
def get_metric_type(measure):
"""Get the MetricDescriptorType for the metric produced by this
aggregation and measure.
"""
if isinstance(measure, measure_module.MeasureInt):
return MetricDescriptorType.CUMULATIVE_INT64
if isinstance(measure, measure_module.MeasureFloat):
return MetricDescriptorType.CUMULATIVE_DOUBLE
raise ValueError
def export_metrics(self, metrics):
if metrics:
envelopes = []
for metric in metrics:
# No support for histogram aggregations
type_ = metric.descriptor.type
if type_ != MetricDescriptorType.CUMULATIVE_DISTRIBUTION:
md = metric.descriptor
# Each time series will be uniquely identified by its
# label values
for time_series in metric.time_series:
# Using stats, time_series should only have one point
# which contains the aggregated value
data_point = self.create_data_points(
time_series, md)[0]
# The timestamp is when the metric was recorded
time_stamp = time_series.points[0].timestamp
# Get the properties using label keys from metric and
# label values of the time series
properties = self.create_properties(time_series, md)
envelopes.append(self.create_envelope(data_point,
time_stamp,
properties))