Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def cdf_read(fn: Path, validate: bool = False):
# str(fn) is a Python==3.5 workaround
return cdfread.CDF(str(fn), validate=validate)
def _get_attdata(self, adr_info, entry_num, num_entry, first_entry, to_np=True):
position = first_entry
for _ in range(0, num_entry):
if (self.cdfversion == 3):
got_entry_num, next_aedr = self._read_aedr_fast(position)
else:
got_entry_num, next_aedr = self._read_aedr_fast2(position)
if entry_num == got_entry_num:
if (self.cdfversion == 3):
aedr_info = self._read_aedr(position, to_np=to_np)
else:
aedr_info = self._read_aedr2(position, to_np=to_np)
return_dict = {}
return_dict['Item_Size'] = CDF._type_size(aedr_info['data_type'],
aedr_info['num_elements'])
return_dict['Data_Type'] = CDF._datatype_token(aedr_info['data_type'])
return_dict['Num_Items'] = aedr_info['num_elements']
return_dict['Data'] = aedr_info['entry']
if (aedr_info['data_type'] == 51 or aedr_info['data_type'] == 52):
return_dict['Num_Items'] = aedr_info['num_strings']
if (aedr_info['num_strings'] > 1):
return_dict['Data'] = aedr_info['entry'].split('\\N ')
if not to_np and (aedr_info['data_type'] == 32):
return_dict['Data'] = complex(aedr_info['entry'][0],
aedr_info['entry'][1])
return return_dict
else:
position = next_aedr
raise KeyError('The entry does not exist')
def getVersion(): # @NoSelf
"""
Prints the code version and last modified date.
"""
print('CDFread version:', str(CDF.version) + '.' + str(CDF.release) +
'.' + str(CDF.increment))
print('Date: 2018/01/11')
var_block_data = self._read_vvr_block2(vvr_offs[vvr_num])
asize = len(var_block_data)
byte_stream[pos:pos+asize] = var_block_data
pos = pos + asize
startPos = (startrec - vvr_start[firstBlock]) * numBytes * numValues
stopOff = (vvr_end[lastBlock] - endrec) * numBytes * numValues
byte_stream = byte_stream[startPos:len(byte_stream)-stopOff]
else:
# with sparse records
if ('pad' in vdr_dict):
# use default pad value
filled_data = CDF._convert_np_data(vdr_dict['pad'],
vdr_dict['data_type'],
vdr_dict['num_elements'])
else:
filled_data = CDF._convert_np_data(
self._default_pad(vdr_dict['data_type'],
vdr_dict['num_elements']),
vdr_dict['data_type'],
vdr_dict['num_elements'])
cur_block = -1
rec_size = numBytes * numValues
for rec_num in range(startrec, (endrec+1)):
block, prev_block = CDF._find_block(vvr_start, vvr_end,
cur_block, rec_num)
if (block > -1):
record_off = rec_num - vvr_start[block]
if (cur_block != block):
if (self.cdfversion == 3):
var_block_data = self._read_vvr_block(vvr_offs[block])
else:
var_block_data = self._read_vvr_block2(vvr_offs[block])
if not self._md5_validation():
raise OSError('This file fails the md5 checksum.')
if not cdr_info['format']:
raise OSError('This package does not support multi-format CDF')
if cdr_info['encoding'] in (3, 14, 15):
raise OSError('This package does not support CDFs with this ' +
CDF._encoding_token(cdr_info['encoding']) +
' encoding')
# SET GLOBAL VARIABLES
self._post25 = cdr_info['post25']
self._version = cdr_info['version']
self._encoding = cdr_info['encoding']
self._majority = CDF._major_token(cdr_info['majority'])
self._copyright = cdr_info['copyright']
self._md5 = cdr_info['md5']
self._first_zvariable = gdr_info['first_zvariable']
self._first_rvariable = gdr_info['first_rvariable']
self._first_adr = gdr_info['first_adr']
self._num_zvariable = gdr_info['num_zvariables']
self._num_rvariable = gdr_info['num_rvariables']
self._rvariables_num_dims = gdr_info['rvariables_num_dims']
self._rvariables_dim_sizes = gdr_info['rvariables_dim_sizes']
self._num_att = gdr_info['num_attributes']
self._num_rdim = gdr_info['rvariables_num_dims']
self._rdim_sizes = gdr_info['rvariables_dim_sizes']
if (self.cdfversion == 3):
self._leap_second_updated = gdr_info['leapsecond_updated']
if self.compressed_file is not None:
def _convert_data(self, data, data_type, num_recs, num_values, num_elems):
'''
Converts data to the appropriate type using the struct.unpack method,
rather than using numpy.
'''
if (data_type == 51 or data_type == 52):
return [data[i:i+num_elems].decode('utf-8') for i in
range(0, num_recs*num_values*num_elems, num_elems)]
else:
tofrom = self._convert_option()
dt_string = self._convert_type(data_type)
form = tofrom + str(num_recs*num_values*num_elems) + dt_string
value_len = CDF._type_size(data_type, num_elems)
return list(struct.unpack_from(form,
data[0:num_recs*num_values*value_len]))
variable :
"""
vdr_info = self.varget(variable=variable, inq=True)
if vdr_info is None:
raise KeyError("Variable {} not found.".format(variable))
var = {}
var['Variable'] = vdr_info['name']
var['Num'] = vdr_info['variable_number']
var['Var_Type'] = CDF._variable_token(vdr_info['section_type'])
var['Data_Type'] = vdr_info['data_type']
var['Data_Type_Description'] = CDF._datatype_token(vdr_info['data_type'])
var['Num_Elements'] = vdr_info['num_elements']
var['Num_Dims'] = vdr_info['num_dims']
var['Dim_Sizes'] = vdr_info['dim_sizes']
var['Sparse'] = CDF._sparse_token(vdr_info['sparse'])
var['Last_Rec'] = vdr_info['max_records']
var['Rec_Vary'] = vdr_info['record_vary']
var['Dim_Vary'] = vdr_info['dim_vary']
if ('pad' in vdr_info):
var['Pad'] = vdr_info['pad']
var['Compress'] = vdr_info['compression_level']
if ('blocking_factor' in vdr_info):
var['Block_Factor'] = vdr_info['blocking_factor']
return var
def _get_attnames(self):
attrs = []
position = self._first_adr
for _ in range(0, self._num_att):
attr = {}
adr_info = self._read_adr(position)
attr[adr_info['name']] = CDF._scope_token(int(adr_info['scope']))
attrs.append(attr)
position = adr_info['next_adr_location']
return attrs