Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
elif gp['data_location'] == v2c.LOCATION_TEMPORARY_FILE:
stream = self._tempfile
inf = {}
info['group {}'.format(i)] = inf
inf['cycles'] = gp['channel_group']['cycles_nr']
inf['channels count'] = len(gp['channels'])
for j, channel in enumerate(gp['channels']):
if self.memory != 'minimum':
name = channel.name
else:
channel = Channel(
address=channel,
stream=stream,
)
if channel['long_name_addr']:
name = TextBlock(
address=channel['long_name_addr'],
stream=stream,
)
name = name['text']
else:
name = channel['short_name']
name = name.decode('utf-8').strip(' \r\t\n\0')
name = name.split('\\')[0]
if channel['channel_type'] == v2c.CHANNEL_TYPE_MASTER:
ch_type = 'master'
else:
ch_type = 'value'
inf['channel {}'.format(j)] = 'name="{}" type={}'.format(name, ch_type)
return info
stream=stream,
)
else:
ch_texts[key] = address
if ch_texts:
grp_ch_texts.append(ch_texts)
else:
grp_ch_texts.append(None)
# update channel object name and block_size attributes
if new_ch['long_name_addr']:
if memory != 'minimum':
name = ch_texts['long_name_addr']['text']
else:
block = TextBlock(
address=ch_texts['long_name_addr'],
stream=stream,
)
name = block['text']
else:
name = new_ch['short_name']
name = name.decode('latin-1').strip(' \n\t\0')
name = name.split('\\')[0]
new_ch.name = name
if name in self.channels_db:
self.channels_db[name].append((dg_cntr, ch_cntr))
else:
self.channels_db[name] = []
self.channels_db[name].append((dg_cntr, ch_cntr))
dg_addr = self.header['first_dg_addr']
# read each data group sequentially
while dg_addr:
gp = DataGroup(address=dg_addr, stream=stream)
record_id_nr = gp['record_id_nr']
cg_nr = gp['cg_nr']
cg_addr = gp['first_cg_addr']
data_addr = gp['data_block_addr']
# read trigger information if available
trigger_addr = gp['trigger_addr']
if trigger_addr:
trigger = TriggerBlock(address=trigger_addr,
stream=stream)
if trigger['text_addr']:
trigger_text = TextBlock(
address=trigger['text_addr'],
stream=stream,
)
else:
trigger_text = None
else:
trigger = None
trigger_text = None
new_groups = []
for i in range(cg_nr):
new_groups.append({})
grp = new_groups[-1]
grp['channels'] = []
grp['channel_conversions'] = []
trigger['block_len'] += 24
trigger['trigger_{}_time'.format(nr)] = timestamp
trigger['trigger_{}_pretime'.format(nr)] = pre_time
trigger['trigger_{}_posttime'.format(nr)] = post_time
if trigger_text is None and comment:
trigger_text = TextBlock(text=comment)
gp['trigger'][1] = trigger_text
else:
trigger = TriggerBlock(
trigger_event_nr=1,
trigger_0_time=timestamp,
trigger_0_pretime=pre_time,
trigger_0_posttime=post_time,
)
if comment:
trigger_text = TextBlock(text=comment)
else:
trigger_text = None
gp['trigger'] = [trigger, trigger_text]
additional_byte_offset = 0
if signal.samples.dtype.kind == 'u':
data_type = v2c.DATA_TYPE_UNSIGNED_INTEL
else:
data_type = v2c.DATA_TYPE_SIGNED_INTEL
texts = {}
if len(name) >= 32:
short_name = (name[:31] + '\0').encode('latin-1')
if memory != 'minimum':
texts['long_name_addr'] = TextBlock(texts=name)
else:
address = tell()
texts['long_name_addr'] = address
block = TextBlock(texts=name)
gp_channels.append(address)
write(bytes(block))
else:
short_name = name.encode('latin-1')
if texts:
gp_texts['channels'][-1] = texts
kargs = {
'short_name': short_name,
'channel_type': v2c.CHANNEL_TYPE_VALUE,
'data_type': data_type,
'min_raw_value': min_val if min_val <= max_val else 0,
'max_raw_value': max_val if min_val <= max_val else 0,
'start_offset': start_bit_offset,
'bit_count': bit_count,
created
asammdf
{}
'''.format(__version__))
else:
text = '{}\n{}: updated by asammdf {}'
old_history = self.file_history['text'].decode('latin-1')
timestamp = time.asctime().encode('latin-1')
text = text.format(
old_history,
timestamp,
__version__,
)
self.file_history = TextBlock(text=text)
if self.name is None and dst == '':
message = ('Must specify a destination file name '
'for MDF created from scratch')
raise MdfException(message)
dst = dst if dst else self.name
if overwrite is False:
if os.path.isfile(dst):
cntr = 0
while True:
name = os.path.splitext(dst)[0] + '_{}.mdf'.format(cntr)
if not os.path.isfile(name):
break
else:
cntr += 1
read = stream.read
seek = stream.seek
dg_cntr = 0
seek(0, v2c.SEEK_START)
self.identification = FileIdentificationBlock(
stream=stream,
)
self.header = HeaderBlock(stream=stream)
self.version = self.identification['version_str']\
.decode('latin-1')\
.strip(' \n\t\0')
self.file_history = TextBlock(
address=self.header['comment_addr'],
stream=stream,
)
# this will hold mapping from channel address to Channel object
# needed for linking dependecy blocks to refernced channels after
# the file is loaded
ch_map = {}
# go to first date group
dg_addr = self.header['first_dg_addr']
# read each data group sequentially
while dg_addr:
gp = DataGroup(address=dg_addr, stream=stream)
record_id_nr = gp['record_id_nr']
cg_nr = gp['cg_nr']
created
asammdf
{}
'''.format(__version__))
else:
text = '{}\n{}: updated by asammdf {}'
old_history = self.file_history['text'].decode('latin-1')
timestamp = time.asctime().encode('latin-1')
text = text.format(
old_history,
timestamp,
__version__,
)
self.file_history = TextBlock(text=text)
# all MDF blocks are appended to the blocks list in the order in which
# they will be written to disk. While creating this list, all the
# relevant block links are updated so that once all blocks have been
# added to the list they can be written using the bytes protocol.
# DataGroup blocks are written first after the identification and
# header blocks. When memory=False we need to restore the
# original data block addresses within the data group block. This is
# needed to allow further work with the object after the save method
# call (eq. new calls to get method). Since the data group blocks are
# written first, it is safe to restor the original links when the data
# blocks are written. For memory=False the blocks list will
# contain a tuple instead of a DataBlock instance; the tuple will have
# the reference to the data group object and the original link to the
# data block in the soource MDF file.