Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# debug
print "sending message"
print '-' * 70
print message
# send!
sock.sendall(message)
time.sleep(2) # NB - allows file operations to complete
# check if data file was created
tagFile = os.path.join(temp_dir, "storage","whisper","folder", tag + ".wsp")
self.assertTrue(os.path.exists(tagFile))
print(whisper.fetch(tagFile, now - self.step*(num_data_points), now))
data_period_info, stored_data = whisper.fetch(tagFile, now - self.step*(num_data_points), now)
for whisper_data, sent_data in zip(reversed(stored_data), reversed(data)):
self.assertAlmostEquals(whisper_data, sent_data[1])
time.sleep(stime)
aggregated_data = aggregate(to_aggregate)
data.append( aggregated_data )
print(aggregated_data)
print('')
print('')
time.sleep(2) # NB - allows file operations to complete
tagFile = os.path.join(temp_dir, "storage","whisper","folder", tag + ".wsp")
self.assertTrue(os.path.exists(tagFile))
data_period_info, stored_data = whisper.fetch(tagFile, start-1, time.time())
print('Whisper data period : ' + str(data_period_info))
print('Whisper data : ' + str(stored_data))
print('Data expected: ' + str(data))
print len(stored_data)
print(zip(stored_data, data))
for whisper_data, sent_data in zip(stored_data, data)[:-1]: # :D
self.assertAlmostEquals(whisper_data, sent_data)
archive_step = 1
whisper.create(wsp, [(archive_step, archive_len)])
# given too many points than the db can hold
excess_len = 1
num_input_points = archive_len + excess_len
test_now = int(time.time())
input_start = test_now - num_input_points + archive_step
input_points = [(input_start + i, random.random() * 10)
for i in range(num_input_points)]
# when the db is updated with too many points
whisper.update_many(wsp, input_points, now=test_now)
# then only the most recent input points (those at the end) were written
actual_time_info = whisper.fetch(wsp, 0, now=test_now)[0]
self.assertEqual(actual_time_info,
(input_points[-archive_len][0],
input_points[-1][0] + archive_step, # untilInterval = newest + step
archive_step))
def test_heal_target_missing(self):
testdb = "test-%s" % self.db
try:
os.unlink(testdb)
except (IOError, OSError):
pass
self._removedb()
schema = [(1, 20)]
self._createdb(self.db, schema)
original_data = whisper.fetch(self.db, 0)
# This should log complaints but exit successfully as it cannot
# heal its target /dev/null
heal_metric(self.db, testdb)
data = whisper.fetch(testdb, 0)
self.assertEqual(original_data, data)
def test_update_single_archive(self):
"""
Update with a single leveled archive
"""
retention_schema = [(1, 20)]
data = self._update(schema=retention_schema)
# fetch the data
fetch = whisper.fetch(self.filename, 0) # all data
fetch_data = fetch[1]
for i, (timestamp, value) in enumerate(data):
# is value in the fetched data?
self.assertEqual(value, fetch_data[i])
# check TimestampNotCovered
with AssertRaisesException(
whisper.TimestampNotCovered(
'Timestamp not covered by any archives in this database.')):
# in the futur
whisper.update(self.filename, 1.337, time.time() + 1)
with AssertRaisesException(
whisper.TimestampNotCovered(
'Timestamp not covered by any archives in this database.')):
def test_heal_source_corrupt(self):
testdb = "/dev/null"
self._removedb()
schema = [(1, 20)]
self._createdb(self.db, schema)
original_data = whisper.fetch(self.db, 0)
# This should log complaints but exit successfully as it cannot
# read from the source /dev/null
heal_metric(testdb, self.db)
data = whisper.fetch(self.db, 0)
self.assertEqual(original_data, data)
pass
schema = [(1, 20)]
emptyData = []
self._createdb(self.db, schema)
self._createdb(testdb, schema, emptyData)
heal_metric(self.db, testdb)
original_data = whisper.fetch(self.db, 0)
filled_data = whisper.fetch(testdb, 0)
self.assertEqual(original_data, filled_data)
# Heal again, should still be equal
heal_metric(self.db, testdb)
filled_data = whisper.fetch(testdb, 0)
self.assertEqual(original_data, filled_data)
def test_file_diff_invalid(self):
testdb = "test-%s" % self.filename
self.addCleanup(self._remove, testdb)
whisper.create(testdb, [(120, 10)])
whisper.create(self.filename, self.retention)
# Merging 2 archives with different retentions should fail
with open(testdb, 'rb') as fh_1:
with open(self.filename, 'rb+') as fh_2:
with AssertRaisesException(
NotImplementedError(
'test-db.wsp and db.wsp archive configurations are '
'unalike. Resize the input before diffing')):
whisper.file_diff(fh_1, fh_2)
def test_create_and_info(self):
"""
Create a db and use info() to validate
"""
# check if invalid configuration fails successfully
for retention in (0, []):
with AssertRaisesException(
whisper.InvalidConfiguration(
'You must specify at least one archive configuration!')):
whisper.create(self.filename, retention)
# create a new db with a valid configuration
whisper.create(self.filename, self.retention)
# Ensure another file can't be created when one exists already
with AssertRaisesException(
whisper.InvalidConfiguration(
'File {0} already exists!'.format(self.filename))):
whisper.create(self.filename, self.retention)
info = whisper.info(self.filename)
# check header information
self.assertEqual(info['maxRetention'],
max([a[0] * a[1] for a in self.retention]))
self.assertEqual(info['aggregationMethod'], 'average')
self.assertEqual(info['xFilesFactor'], 0.5)
# check archive information
def test_file_fetch_edge_cases(self):
"""
Test some of the edge cases in file_fetch() that should return
None or raise an exception
"""
whisper.create(self.filename, [(1, 60)])
with open(self.filename, 'rb') as fh:
msg = "Invalid time interval: from time '{0}' is after until time '{1}'"
until_time = 0
from_time = int(time.time()) + 100
with AssertRaisesException(
whisper.InvalidTimeInterval(msg.format(from_time, until_time))):
whisper.file_fetch(fh, fromTime=from_time, untilTime=until_time)
# fromTime > now aka metrics from the future
self.assertIsNone(
whisper.file_fetch(fh, fromTime=int(time.time()) + 100,
untilTime=int(time.time()) + 200),
)