Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
t_type, t_size = fmt_to_datatype_v3(
timestamps.dtype,
timestamps.shape,
)
kargs = {
'short_name': time_name.encode('latin-1'),
'channel_type': v23c.CHANNEL_TYPE_MASTER,
'data_type': t_type,
'start_offset': 0,
'min_raw_value': timestamps[0] if cycles_nr else 0,
'max_raw_value': timestamps[-1] if cycles_nr else 0,
'bit_count': t_size,
'block_len': channel_size,
'version': version,
}
channel = Channel(**kargs)
channel.name = name = time_name
channel.conversion = conversion
channel.source = source
if memory != 'minimum':
gp_channels.append(channel)
else:
channel.to_stream(file, defined_texts, cc_map, si_map)
gp_channels.append(channel.address)
self.channels_db.add(name, dg_cntr, ch_cntr)
self.masters_db[dg_cntr] = 0
# data group record parents
parents[ch_cntr] = name, 0
# time channel doesn't have channel dependencies
# read each channel group sequentially
grp['channel_group'] = ChannelGroup(
address=cg_addr,
stream=stream,
)
# go to first channel of the current channel group
ch_addr = grp['channel_group']['first_ch_addr']
ch_cntr = 0
grp_chs = grp['channels']
while ch_addr:
# read channel block and create channel object
load_metadata = memory != 'minimum'
new_ch = Channel(
address=ch_addr,
stream=stream,
load_metadata=load_metadata,
)
# check if it has channel dependencies
if new_ch['ch_depend_addr']:
dep = ChannelDependency(
address=new_ch['ch_depend_addr'],
stream=stream,
)
grp['channel_dependencies'].append(dep)
else:
grp['channel_dependencies'].append(None)
# update channel map
'channel_type': v23c.CHANNEL_TYPE_VALUE,
'data_type': s_type,
'min_raw_value': min_val if min_val <= max_val else 0,
'max_raw_value': max_val if min_val <= max_val else 0,
'start_offset': start_bit_offset,
'bit_count': s_size,
'aditional_byte_offset': additional_byte_offset,
'block_len': channel_size,
'description': description,
'version': version,
}
if s_size < 8:
s_size = 8
channel = Channel(**kargs)
channel.name = name
channel.source = source
if memory != 'minimum':
gp_channels.append(channel)
else:
channel.to_stream(file, defined_texts, cc_map, si_map)
gp_channels.append(channel.address)
size = s_size
for dim in shape:
size *= dim
offset += size
self.channels_db.add(name, dg_cntr, ch_cntr)
# update the parents as well
kargs = {
'channel_type': v23c.CHANNEL_TYPE_VALUE,
'data_type': s_type,
'min_raw_value': min_val if min_val <= max_val else 0,
'max_raw_value': max_val if min_val <= max_val else 0,
'start_offset': start_bit_offset,
'bit_count': s_size,
'aditional_byte_offset': additional_byte_offset,
'block_len': channel_size,
'version': version,
}
if s_size < 8:
s_size = 8
channel = Channel(**kargs)
channel.name = name
channel.comment = signal.comment
channel.source = source
if memory != 'minimum':
gp_channels.append(channel)
else:
channel.to_stream(file, defined_texts, cc_map, si_map)
gp_channels.append(channel.address)
self.channels_db.add(name, dg_cntr, ch_cntr)
ch_cntr += 1
for i, (name, samples) in enumerate(
zip(component_names, component_samples)):
gp_nr, ch_nr = self._validate_channel_selection(
name,
group,
index,
)
grp = self.groups[gp_nr]
if grp['data_location'] == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
channel = grp['channels'][ch_nr]
if self.memory == 'minimum':
channel = Channel(
address=channel,
stream=stream,
)
channel = deepcopy(channel)
return channel
)
original_data = data
memory = self.memory
grp = self.groups[gp_nr]
if grp['data_location'] == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
channel = grp['channels'][ch_nr]
if memory == 'minimum':
channel = Channel(
address=grp['channels'][ch_nr],
stream=stream,
)
conversion = channel.conversion
name = channel.name
display_name = channel.display_name
bit_count = channel['bit_count'] or 64
dep = grp['channel_dependencies'][ch_nr]
cycles_nr = grp['channel_group']['cycles_nr']
# get data group record
if data is None:
data = self._load_group_data(grp)
else:
stream = self._tempfile
grp = group
record_size = grp['channel_group']['samples_byte_nr'] << 3
next_byte_aligned_position = 0
types = []
current_parent = ""
parent_start_offset = 0
parents = {}
group_channels = set()
if memory != 'minimum':
channels = grp['channels']
else:
channels = [
Channel(address=ch_addr, stream=stream, load_metadata=False)
for ch_addr in grp['channels']
]
# the channels are first sorted ascending (see __lt__ method of Channel
# class): a channel with lower start offset is smaller, when two
# channels havethe same start offset the one with higer bit size is
# considered smaller. The reason is that when the numpy record is built
# and there are overlapping channels, the parent fields mustbe bigger
# (bit size) than the embedded channels. For each channel the parent
# dict will have a (parent name, bit offset) pair: the channel value is
# computed using the values from the parent field, and the bit offset,
# which is the channel's bit offset within the parent bytes.
# This means all parents will have themselves as parent, and bit offset
# of 0. Gaps in the records are also considered. Non standard integers
# size is adjusted to the first higher standard integer size (eq. uint
# of 28bits will be adjusted to 32bits)