How to use the asammdf.v3constants function in asammdf

To help you get started, we’ve selected a few asammdf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
def _get_not_byte_aligned_data(self, data, group, ch_nr):

        big_endian_types = (
            v3c.DATA_TYPE_UNSIGNED_MOTOROLA,
            v3c.DATA_TYPE_FLOAT_MOTOROLA,
            v3c.DATA_TYPE_DOUBLE_MOTOROLA,
            v3c.DATA_TYPE_SIGNED_MOTOROLA,
        )

        record_size = group['channel_group']['samples_byte_nr']

        if self.memory != 'minimum':
            channel = group['channels'][ch_nr]
        else:
            channel = Channel(
                address=group['channels'][ch_nr],
                stream=self._file,
            )

        bit_offset = channel['start_offset'] % 8
        byte_offset = channel['start_offset'] // 8
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
stream=stream,
                            )
                            name = block['text']
                    else:
                        name = new_ch['short_name']
                    name = name.decode('latin-1').strip(' \n\t\0')
                    name = name.split('\\')[0]
                    new_ch.name = name

                    if name in self.channels_db:
                        self.channels_db[name].append((dg_cntr, ch_cntr))
                    else:
                        self.channels_db[name] = []
                        self.channels_db[name].append((dg_cntr, ch_cntr))

                    if new_ch['channel_type'] == v3c.CHANNEL_TYPE_MASTER:
                        self.masters_db[dg_cntr] = ch_cntr
                    # go to next channel of the current channel group

                    ch_cntr += 1
                    if memory != 'minimum':
                        grp_chs.append(new_ch)
                    else:
                        grp_chs.append(ch_addr)
                    ch_addr = new_ch['next_ch_addr']

                cg_addr = grp['channel_group']['next_cg_addr']
                dg_cntr += 1

            # store channel groups record sizes dict and data block size in
            # each new group data belong to the initial unsorted group, and
            # add the key 'sorted' with the value False to use a flag;
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
name = name.decode('utf-8').strip(' \r\t\n\0')
            name = name.split('\\')[0]
            channel.name = name

        dep = grp['channel_dependencies'][ch_nr]
        cycles_nr = grp['channel_group']['cycles_nr']

        # get data group record
        if data is None:
            data = self._load_group_data(grp)

        info = None

        # check if this is a channel array
        if dep:
            if dep['dependency_type'] == v3c.DEPENDENCY_TYPE_VECTOR:
                shape = [dep['sd_nr'], ]
            elif dep['dependency_type'] >= v3c.DEPENDENCY_TYPE_NDIM:
                shape = []
                i = 0
                while True:
                    try:
                        dim = dep['dim_{}'.format(i)]
                        shape.append(dim)
                        i += 1
                    except KeyError:
                        break
                shape = shape[::-1]

            record_shape = tuple(shape)

            arrays = [
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
cycles_nr = grp['channel_group']['cycles_nr']
                record_size = grp['channel_group']['samples_byte_nr']

                cg_size[record_id] = record_size

                record_size += record_id_nr
                total_size += record_size * cycles_nr

                grp['record_size'] = cg_size
                grp['size'] = total_size

            if memory == 'full':
                # read data block of the current data group
                dat_addr = gp['data_block_addr']
                if dat_addr:
                    seek(dat_addr, v3c.SEEK_START)
                    data = read(total_size)
                else:
                    data = b''
                if record_id_nr == 0:
                    grp = new_groups[0]
                    grp['data_location'] = v3c.LOCATION_MEMORY
                    grp['data_block'] = DataBlock(data=data)

                else:
                    # agregate data for each record ID in the cg_data dict
                    cg_data = defaultdict(list)
                    i = 0
                    size = len(data)
                    while i < size:
                        rec_id = data[i]
                        # skip record id
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
a = conversion['a']
                    b = conversion['b']
                    if (a, b) != (1, 0):
                        vals = vals * a
                        if b:
                            vals += b

                elif conversion_type in (v3c.CONVERSION_TYPE_TABI,
                                         v3c.CONVERSION_TYPE_TABX):
                    nr = conversion['ref_param_nr']

                    raw = [conversion['raw_{}'.format(i)] for i in range(nr)]
                    raw = array(raw)
                    phys = [conversion['phys_{}'.format(i)] for i in range(nr)]
                    phys = array(phys)
                    if conversion_type == v3c.CONVERSION_TYPE_TABI:
                        vals = interp(vals, raw, phys)
                    else:
                        idx = searchsorted(raw, vals)
                        idx = clip(idx, 0, len(raw) - 1)
                        vals = phys[idx]

                elif conversion_type == v3c.CONVERSION_TYPE_VTAB:
                    nr = conversion['ref_param_nr']
                    raw = [
                        conversion['param_val_{}'.format(i)]
                        for i in range(nr)
                    ]
                    raw = array(raw)
                    phys = [conversion['text_{}'.format(i)] for i in range(nr)]
                    phys = array(phys)
                    info = {'raw': raw, 'phys': phys}
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
types = [(channel.name, vals.dtype, vals.shape[1:]), ]
                        if PYVERSION == 2:
                            types = fix_dtype_fields(types)
                        types = dtype(types)
                        vals = fromarrays(arrays, dtype=types)

                elif conversion_type == v3c.CONVERSION_TYPE_LINEAR:
                    a = conversion['a']
                    b = conversion['b']
                    if (a, b) != (1, 0):
                        vals = vals * a
                        if b:
                            vals += b

                elif conversion_type in (v3c.CONVERSION_TYPE_TABI,
                                         v3c.CONVERSION_TYPE_TABX):
                    nr = conversion['ref_param_nr']

                    raw = [conversion['raw_{}'.format(i)] for i in range(nr)]
                    raw = array(raw)
                    phys = [conversion['phys_{}'.format(i)] for i in range(nr)]
                    phys = array(phys)
                    if conversion_type == v3c.CONVERSION_TYPE_TABI:
                        vals = interp(vals, raw, phys)
                    else:
                        idx = searchsorted(raw, vals)
                        idx = clip(idx, 0, len(raw) - 1)
                        vals = phys[idx]

                elif conversion_type == v3c.CONVERSION_TYPE_VTAB:
                    nr = conversion['ref_param_nr']
                    raw = [
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
texts.append(b'')

                    texts = array(texts)
                    lower = [
                        conversion['lower_{}'.format(i)]
                        for i in range(nr)
                    ]
                    lower = array(lower)
                    upper = [
                        conversion['upper_{}'.format(i)]
                        for i in range(nr)
                    ]
                    upper = array(upper)
                    info = {'lower': lower, 'upper': upper, 'phys': texts}

                elif conversion_type in (v3c.CONVERSION_TYPE_EXPO,
                                         v3c.CONVERSION_TYPE_LOGH):
                    if conversion_type == v3c.CONVERSION_TYPE_EXPO:
                        func = log
                    else:
                        func = exp
                    P1 = conversion['P1']
                    P2 = conversion['P2']
                    P3 = conversion['P3']
                    P4 = conversion['P4']
                    P5 = conversion['P5']
                    P6 = conversion['P6']
                    P7 = conversion['P7']
                    if P4 == 0:
                        vals = func(((vals - P7) * P6 - P3) / P1) / P2
                    elif P1 == 0:
                        vals = func((P3 / (vals - P7) - P6) / P4) / P5
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
'channels': [],
                    'conversion_tab': [],
                    'channel_group': [],
                }
                grp['trigger'] = [trigger, trigger_text]
                grp['channel_dependencies'] = []

                if record_id_nr:
                    grp['sorted'] = False
                else:
                    grp['sorted'] = True

                kargs = {'first_cg_addr': cg_addr,
                         'data_block_addr': data_addr}
                if self.version in ('3.20', '3.30'):
                    kargs['block_len'] = v3c.DG32_BLOCK_SIZE
                else:
                    kargs['block_len'] = v3c.DG31_BLOCK_SIZE

                grp['data_group'] = DataGroup(**kargs)

                # read each channel group sequentially
                grp['channel_group'] = ChannelGroup(
                    address=cg_addr,
                    stream=stream,
                )

                # read name and comment for current channel group
                cg_texts = {}
                grp['texts']['channel_group'].append(cg_texts)

                address = grp['channel_group']['comment_addr']
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
index : int
            group index
        data : bytes
            data block raw bytes; default None

        Returns
        -------
        t : numpy.array
            master channel samples

        """
        if index in self._master_channel_cache:
            return self._master_channel_cache[index]
        group = self.groups[index]

        if group['data_location'] == v3c.LOCATION_ORIGINAL_FILE:
            stream = self._file
        else:
            stream = self._tempfile
        memory = self.memory

        time_ch_nr = self.masters_db.get(index, None)
        cycles_nr = group['channel_group']['cycles_nr']

        if time_ch_nr is None:
            t = arange(cycles_nr, dtype=float64)
        else:
            time_conv = group['channel_conversions'][time_ch_nr]
            if memory == 'minimum':
                if time_conv:
                    time_conv = ChannelConversion(
                        address=group['channel_conversions'][time_ch_nr],
github danielhrisca / asammdf / asammdf / mdf3.py View on Github external
def _get_not_byte_aligned_data(self, data, group, ch_nr):

        big_endian_types = (
            v3c.DATA_TYPE_UNSIGNED_MOTOROLA,
            v3c.DATA_TYPE_FLOAT_MOTOROLA,
            v3c.DATA_TYPE_DOUBLE_MOTOROLA,
            v3c.DATA_TYPE_SIGNED_MOTOROLA,
        )

        record_size = group['channel_group']['samples_byte_nr']

        if self.memory != 'minimum':
            channel = group['channels'][ch_nr]
        else:
            channel = Channel(
                address=group['channels'][ch_nr],
                stream=self._file,
            )

        bit_offset = channel['start_offset'] % 8