Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def add_data_values(self, data_values):
data = [(data_value.value_datetime.strftime(self.date_format),
'{0}:00'.format(data_value.value_datetime_utc_offset),
(data_value.value_datetime - timedelta(hours=data_value.value_datetime_utc_offset)).strftime(self.date_format),
data_value.data_value,
data_value.censor_code_id,
data_value.quality_code_id)
for data_value in data_values]
with self.open_csv_file() as output_file:
csv_writer = UnicodeWriter(output_file)
csv_writer.writerows(data)
def build_csv(self):
with self.create_csv_file() as output_file:
output_file.write(self.generate_metadata())
csv_writer = UnicodeWriter(output_file)
csv_writer.writerow(self.headers)
def generate_csv_file(result_ids, request=None): # type: (list, any) -> (str, StringIO)
"""
Gathers time series data for the passed in result id's to generate a csv file for download
"""
sensors = SiteSensor.objects\
.prefetch_related('sensor_output', 'registration', 'last_measurement')\
.filter(result_id__in=result_ids)\
.order_by('pk')
if not sensors.count():
raise ValueError('The results were not found (result id(s): {}).'.format(', '.join(result_ids)))
csv_file = StringIO()
csv_writer = UnicodeWriter(csv_file)
csv_file.write(CSVDataApi.generate_metadata(sensors))
csv_writer.writerow(CSVDataApi.generate_csv_headers(sensors))
csv_writer.writerows(CSVDataApi.get_data_values(sensors))
sensor = sensors.first()
try:
resultids_len = len(result_ids)
except TypeError:
resultids_len = 1
if resultids_len > 1:
filename = "{}_TimeSeriesResults".format(sensor.registration.sampling_feature_code)
else:
filename = "{0}_{1}_{2}".format(sensor.registration.sampling_feature_code,
sensor.sensor_output.variable_code, sensor.result_id)
def __init__(self, csvfile, fieldnames, restval='',
extrasaction='raise', dialect='excel', encoding='utf-8',
errors='strict', *args, **kwds):
self.encoding = encoding
csv.DictWriter.__init__(self, csvfile, fieldnames, restval,
extrasaction, dialect, *args, **kwds)
self.writer = UnicodeWriter(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
self.encoding_errors = errors
.select_related('result__feature_action__sampling_feature', 'result__variable') \
.filter(pk__in=result_ids)
except TypeError:
# If exception is raised, `result_ids` is not an iterable,
# so filter using 'pk'
time_series_result = TimeSeriesResult.objects \
.prefetch_related('values') \
.prefetch_related('result__feature_action__action__people') \
.select_related('result__feature_action__sampling_feature', 'result__variable') \
.filter(pk=result_ids)
if not time_series_result:
raise ValueError('Time Series Result(s) not found (result id(s): {}).'.format(', '.join(result_ids)))
csv_file = StringIO()
csv_writer = UnicodeWriter(csv_file)
csv_file.write(CSVDataApi.generate_metadata(time_series_result, request=request))
csv_writer.writerow(CSVDataApi.get_csv_headers(time_series_result))
csv_writer.writerows(CSVDataApi.get_data_values(time_series_result))
result = time_series_result.first().result
try:
resultids_len = len(result_ids)
except TypeError:
resultids_len = 1
if resultids_len > 1:
filename = "{}_TimeSeriesResults".format(result.feature_action.sampling_feature.sampling_feature_code)
else:
filename = "{0}_{1}_{2}".format(result.feature_action.sampling_feature.sampling_feature_code,
result.variable.variable_code, result.result_id)