Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _load_group_data(self, group):
""" get group's data block bytes"""
offset = 0
if self.memory == 'full':
yield group['data_block']['data'], offset
else:
channel_group = group['channel_group']
if group['data_location'] == v23c.LOCATION_ORIGINAL_FILE:
# go to the first data block of the current data group
stream = self._file
else:
stream = self._tempfile
# go to the first data block of the current data group
if group['sorted']:
samples_size = channel_group['samples_byte_nr']
if not samples_size:
yield b'', 0
else:
if self._read_fragment_size:
split_size = self._read_fragment_size // samples_size
split_size *= samples_size
else:
channels_nr = len(group['channels'])
comment='Byte array channel {}'.format(i),
raw=True,
)
sigs.append(sig)
mdf.append(sigs, common_timebase=True)
# value to text
sigs = []
ones = np.ones(cycles, dtype=np.uint64)
conversion = {
'raw': np.arange(255, dtype=np.float64),
'phys': np.array([
'Value {}'.format(i).encode('ascii')
for i in range(255)
]),
'conversion_type': v4c.CONVERSION_TYPE_TABX if version >= '4.00' else v3c.CONVERSION_TYPE_TABX,
'links_nr': 260,
'ref_param_nr': 255,
}
for i in range(255):
conversion['val_{}'.format(i)] = conversion['param_val_{}'.format(i)] = conversion['raw'][i]
conversion['text_{}'.format(i)] = conversion['phys'][i]
conversion['text_{}'.format(255)] = 'Default'
for i in range(channels_count):
sig = Signal(
ones * i,
t,
name='Channel_{}'.format(i),
unit='unit_{}'.format(i),
comment='Value to text channel {}'.format(i),
def _get_not_byte_aligned_data(self, data, group, ch_nr):
big_endian_types = (
v23c.DATA_TYPE_UNSIGNED_MOTOROLA,
v23c.DATA_TYPE_FLOAT_MOTOROLA,
v23c.DATA_TYPE_DOUBLE_MOTOROLA,
v23c.DATA_TYPE_SIGNED_MOTOROLA,
)
record_size = group['channel_group']['samples_byte_nr']
if self.memory != 'minimum':
channel = group['channels'][ch_nr]
else:
if group['data_location'] == v23c.LOCATION_ORIGINAL_FILE:
stream=self._file
else:
stream=self._tempfile,
channel = Channel(
address=group['channels'][ch_nr],
stream=stream,
name='_',
).interp(t).samples
timestamps = t
if conversion is None:
conversion_type = v23c.CONVERSION_TYPE_NONE
else:
conversion_type = conversion['conversion_type']
if conversion_type == v23c.CONVERSION_TYPE_NONE:
pass
elif conversion_type in (
v23c.CONVERSION_TYPE_LINEAR,
v23c.CONVERSION_TYPE_TABI,
v23c.CONVERSION_TYPE_TAB,
v23c.CONVERSION_TYPE_EXPO,
v23c.CONVERSION_TYPE_LOGH,
v23c.CONVERSION_TYPE_RAT,
v23c.CONVERSION_TYPE_POLY,
v23c.CONVERSION_TYPE_FORMULA):
if not raw:
try:
vals = conversion.convert(vals)
except:
print(channel, conversion)
raise
elif conversion_type in (
v23c.CONVERSION_TYPE_TABX,
v23c.CONVERSION_TYPE_RTABX):
grp['trigger'] = trigger
grp['channel_dependencies'] = []
if record_id_nr:
grp['sorted'] = False
else:
grp['sorted'] = True
kargs = {
'first_cg_addr': cg_addr,
'data_block_addr': data_addr,
}
if self.version >= '3.20':
kargs['block_len'] = v23c.DG_POST_320_BLOCK_SIZE
else:
kargs['block_len'] = v23c.DG_PRE_320_BLOCK_SIZE
kargs['record_id_len'] = record_id_nr
grp['data_group'] = DataGroup(**kargs)
# read each channel group sequentially
grp['channel_group'] = ChannelGroup(
address=cg_addr,
stream=stream,
)
# go to first channel of the current channel group
ch_addr = grp['channel_group']['first_ch_addr']
ch_cntr = 0
grp_chs = grp['channels']
while ch_addr:
def _get_not_byte_aligned_data(self, data, group, ch_nr):
big_endian_types = (
v23c.DATA_TYPE_UNSIGNED_MOTOROLA,
v23c.DATA_TYPE_FLOAT_MOTOROLA,
v23c.DATA_TYPE_DOUBLE_MOTOROLA,
v23c.DATA_TYPE_SIGNED_MOTOROLA,
)
record_size = group['channel_group']['samples_byte_nr']
if self.memory != 'minimum':
channel = group['channels'][ch_nr]
else:
if group['data_location'] == v23c.LOCATION_ORIGINAL_FILE:
stream=self._file
else:
stream=self._tempfile,
channel = Channel(
address=group['channels'][ch_nr],
stream=stream,
load_metadata=False,
)
timestamps = t
if conversion is None:
conversion_type = v23c.CONVERSION_TYPE_NONE
else:
conversion_type = conversion['conversion_type']
if conversion_type == v23c.CONVERSION_TYPE_NONE:
pass
elif conversion_type in (
v23c.CONVERSION_TYPE_LINEAR,
v23c.CONVERSION_TYPE_TABI,
v23c.CONVERSION_TYPE_TAB,
v23c.CONVERSION_TYPE_EXPO,
v23c.CONVERSION_TYPE_LOGH,
v23c.CONVERSION_TYPE_RAT,
v23c.CONVERSION_TYPE_POLY,
v23c.CONVERSION_TYPE_FORMULA):
if not raw:
try:
vals = conversion.convert(vals)
except:
print(channel, conversion)
raise
elif conversion_type in (
v23c.CONVERSION_TYPE_TABX,
v23c.CONVERSION_TYPE_RTABX):
raw = True
conversion_type = v23c.CONVERSION_TYPE_NONE
else:
conversion_type = conversion['conversion_type']
if conversion_type == v23c.CONVERSION_TYPE_NONE:
pass
elif conversion_type in (
v23c.CONVERSION_TYPE_LINEAR,
v23c.CONVERSION_TYPE_TABI,
v23c.CONVERSION_TYPE_TAB,
v23c.CONVERSION_TYPE_EXPO,
v23c.CONVERSION_TYPE_LOGH,
v23c.CONVERSION_TYPE_RAT,
v23c.CONVERSION_TYPE_POLY,
v23c.CONVERSION_TYPE_FORMULA):
if not raw:
try:
vals = conversion.convert(vals)
except:
print(channel, conversion)
raise
elif conversion_type in (
v23c.CONVERSION_TYPE_TABX,
v23c.CONVERSION_TYPE_RTABX):
raw = True
if samples_only:
res = vals, None
else:
if conversion:
if record_id_nr == 2:
i += rec_size + 1
else:
i += rec_size
for grp in new_groups:
grp['data_location'] = v23c.LOCATION_MEMORY
record_id = grp['channel_group']['record_id']
if PYVERSION == 2:
record_id = chr(record_id)
data = cg_data[record_id]
data = b''.join(data)
grp['channel_group']['record_id'] = 1
grp['data_block'] = DataBlock(data=data)
else:
for grp in new_groups:
grp['data_location'] = v23c.LOCATION_ORIGINAL_FILE
grp['data_group']['data_block_addr'] = data_group['data_block_addr']
grp['data_block_addr'] = [data_group['data_block_addr'], ]
grp['data_block_size'] = [total_size, ]
self.groups.extend(new_groups)
# go to next data group
dg_addr = data_group['next_dg_addr']
# finally update the channel depency references
for grp in self.groups:
for dep in grp['channel_dependencies']:
if dep:
for i in range(dep['sd_nr']):
ref_channel_addr = dep['ch_{}'.format(i)]
channel = ch_map[ref_channel_addr]