How to use the dace.data function in dace

To help you get started, we’ve selected a few dace examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github spcl / dace / diode / property_renderer.py View on Github external
rownum += 1

        data_label = Gtk.Label()
        data_label.set_text("Data of the SDFG")
        grid.attach(data_label, 0, rownum, 5, 1)
        rownum += 1

        for name, dtype in sdfg.arrays.items():

            label_name = Gtk.Label()
            label_name.set_text(str(name))
            label_name.set_tooltip_text("Name of the data element")
            grid.attach(label_name, 0, rownum, 1, 1)

            label_type = Gtk.Label()
            if isinstance(dtype, dace.data.Array):
                label_type.set_text("Array")
            elif isinstance(dtype, dace.data.Stream):
                label_type.set_text("Stream")
            elif isinstance(dtype, dace.data.Scalar):
                label_type.set_text("Scalar")
            else:
                label_type.set_text(str(type(dtype)))
            label_type.set_tooltip_text("Type of the data element")
            grid.attach(label_type, 1, rownum, 1, 1)

            label_shape = Gtk.Label()
            if dtype is not None:
                label_shape.set_text(str(dtype.shape))
            else:
                label_shape.set_text("None")
            label_shape.set_tooltip_text("Shape of the data element")
github spcl / dace / dace / transformation / dataflow / gpu_transform.py View on Github external
# Ensure that map does not include internal arrays that are
            # allocated on non-default space
            subgraph = graph.scope_subgraph(map_entry)
            for node in subgraph.nodes():
                if (isinstance(node, nodes.AccessNode) and
                        node.desc(sdfg).storage != dtypes.StorageType.Default
                        and node.desc(sdfg).storage !=
                        dtypes.StorageType.Register):
                    return False

            # If one of the outputs is a stream, do not match
            map_exit = graph.exit_nodes(map_entry)[0]
            for edge in graph.out_edges(map_exit):
                dst = graph.memlet_path(edge)[-1].dst
                if (isinstance(dst, nodes.AccessNode)
                        and isinstance(sdfg.arrays[dst.data], data.Stream)):
                    return False

            return True
        elif expr_index == 1:
            reduce = graph.nodes()[candidate[GPUTransformMap._reduce]]

            # Map schedules that are disallowed to transform to GPUs
            if (reduce.schedule in [dtypes.ScheduleType.MPI] +
                    dtypes.GPU_SCHEDULES):
                return False
            if sd.is_devicelevel(sdfg, graph, reduce):
                return False

            return True
github spcl / dace / dace / codegen / targets / intel_fpga.py View on Github external
data_name, offset, read_expr,
                                         memlet.wcr)

            if isinstance(data_desc, dace.data.Scalar):
                if memlet.num_accesses == 1:
                    # The value will be written during the tasklet, and will be
                    # automatically written out after
                    result += write_expr
                elif memlet.num_accesses == -1:
                    # Variable number of reads or writes
                    pass
                else:
                    raise dace.codegen.codegen.CodegenError(
                        "Unsupported number of accesses {} for scalar {}".
                        format(memlet.num_accesses, connector))
            elif isinstance(data_desc, dace.data.Array):
                if memlet.num_accesses == 1:
                    result += write_expr
                else:
                    pass
            elif isinstance(data_desc, dace.data.Stream):
                if not data_desc.is_stream_array():
                    if memlet.num_accesses == 1:
                        result += write_expr
                    else:
                        # Must happen directly in the code
                        pass
                else:  # is array of streams
                    if memlet.num_accesses == 1:
                        result += write_expr
                    else:
                        # Must happen directly in the code
github spcl / dace / dace / codegen / compiler.py View on Github external
for a in sig:
                try:
                    arglist.append(kwargs[a])
                    argtypes.append(typedict[a])
                    argnames.append(a)
                except KeyError:
                    raise KeyError("Missing program argument \"{}\"".format(a))
        else:
            arglist = []
            argtypes = []
            argnames = []
            sig = []

        # Type checking
        for a, arg, atype in zip(argnames, arglist, argtypes):
            if not _is_array(arg) and isinstance(atype, dt.Array):
                raise TypeError(
                    'Passing an object (type %s) to an array in argument "%s"'
                    % (type(arg).__name__, a))
            if _is_array(arg) and not isinstance(atype, dt.Array):
                raise TypeError(
                    'Passing an array to a scalar (type %s) in argument "%s"' %
                    (atype.dtype.ctype, a))
            if not isinstance(atype, dt.Array) and not isinstance(
                    atype.dtype, dace.callback) and not isinstance(
                        arg, atype.dtype.type) and not (
                            isinstance(arg, symbolic.symbol)
                            and arg.dtype == atype.dtype):
                print('WARNING: Casting scalar argument "%s" from %s to %s' %
                      (a, type(arg).__name__, atype.dtype.type))

        # Call a wrapper function to make NumPy arrays from pointers.
github spcl / dace / dace / frontend / python / simulator.py View on Github external
slice=rhs.slice,
                                    ctx=ast.Load())), node)
                elif not isinstance(rhs, ast.Subscript):
                    if isinstance(rhs, ast.Call):
                        array_name = rhs.func
                    else:
                        array_name = rhs

                    lhs_name = lhs.id

                    # In case of "tmp >> array", write "array[:]"
                    if node.value.left.id in self.curprim.transients:
                        init_expr = None
                    # If reading from a single stream ("b << stream")
                    elif (array_name.id in arrays
                          and isinstance(arrays[array_name.id], data.Stream)):
                        if arrays[array_name.id].shape == [1]:
                            init_expr = _copy_location(
                                ast.parse('{v} = {q}[0]'.format(
                                    v=lhs_name, q=array_name.id)).body[0],
                                node)
                        return init_expr, None, []
                    else:
                        init_expr = _copy_location(
                            ast.Assign(
                                targets=[
                                    ast.Name(id=lhs_name, ctx=ast.Store())
                                ],
                                value=ast.Subscript(
                                    value=ast.Name(
                                        id=array_name.id, ctx=ast.Load()),
                                    slice=ast.Slice(
github spcl / dace / dace / sdfg / utils.py View on Github external
""" Test whether a stream is directly connected to an array. """

    # Test all memlet paths from the array. If the path goes directly
    # to/from a stream, construct a stream array view
    all_source_paths = []
    source_paths = []
    all_sink_paths = []
    sink_paths = []
    for e in dfg.in_edges(node):
        src_node = dfg.memlet_path(e)[0].src
        # Append empty path to differentiate between a copy and an array-view
        if isinstance(src_node, nd.CodeNode):
            all_source_paths.append(None)
        # Append path from source node
        if isinstance(src_node, nd.AccessNode) and isinstance(
                src_node.desc(sdfg), dt.Array):
            source_paths.append(src_node)
    for e in dfg.out_edges(node):
        sink_node = dfg.memlet_path(e)[-1].dst

        # Append empty path to differentiate between a copy and an array-view
        if isinstance(sink_node, nd.CodeNode):
            all_sink_paths.append(None)
        # Append path to sink node
        if isinstance(sink_node, nd.AccessNode) and isinstance(
                sink_node.desc(sdfg), dt.Array):
            sink_paths.append(sink_node)

    all_sink_paths.extend(sink_paths)
    all_source_paths.extend(source_paths)

    # Special case: stream can be represented as a view of an array
github spcl / dace / dace / sdfg / validation.py View on Github external
########################################
        if isinstance(node, nd.AccessNode):
            if node.data not in sdfg.arrays:
                raise InvalidSDFGNodeError(
                    "Access node must point to a valid array name in the SDFG",
                    sdfg,
                    state_id,
                    nid,
                )

            # Find uninitialized transients
            arr = sdfg.arrays[node.data]
            if (arr.transient and state.in_degree(node) == 0
                    and state.out_degree(node) > 0
                    # Streams do not need to be initialized
                    and not isinstance(arr, dt.Stream)):
                # Find other instances of node in predecessor states
                states = sdfg.predecessor_states(state)
                input_found = False
                for s in states:
                    for onode in s.nodes():
                        if (isinstance(onode, nd.AccessNode)
                                and onode.data == node.data):
                            if s.in_degree(onode) > 0:
                                input_found = True
                                break
                    if input_found:
                        break
                if not input_found and node.setzero == False:
                    warnings.warn(
                        'WARNING: Use of uninitialized transient "%s" in state %s'
                        % (node.data, state.label))
github spcl / dace / dace / frontend / python / newast.py View on Github external
a=access_type)
        else:
            var_name = self.sdfg.temp_data_name()

        parent_name = self.scope_vars[name]
        parent_array = self.scope_arrays[parent_name]
        squeezed_rng = copy.deepcopy(rng)
        non_squeezed = squeezed_rng.squeeze()
        shape = squeezed_rng.size()
        dtype = parent_array.dtype

        if arr_type is None:
            arr_type = type(parent_array)
        if arr_type == data.Scalar:
            self.sdfg.add_scalar(var_name, dtype)
        elif arr_type == data.Array:
            if non_squeezed:
                strides = [parent_array.strides[d] for d in non_squeezed]
            else:
                strides = [1]
            self.sdfg.add_array(var_name, shape, dtype, strides=strides)
        elif arr_type == data.Stream:
            self.sdfg.add_stream(var_name, dtype)
        else:
            raise NotImplementedError(
                "Data type {} is not implemented".format(arr_type))

        self.accesses[(name, rng, access_type)] = (var_name, squeezed_rng)

        inner_indices = set(non_squeezed)

        if access_type == 'r':
github spcl / dace / dace / transformation / dataflow / copy_to_device.py View on Github external
def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
        nested_sdfg = graph.nodes()[candidate[CopyToDevice._nested_sdfg]]

        for edge in graph.all_edges(nested_sdfg):
            # Stream inputs/outputs not allowed
            path = graph.memlet_path(edge)
            if ((isinstance(path[0].src, nodes.AccessNode)
                 and isinstance(sdfg.arrays[path[0].src.data], data.Stream)) or
                (isinstance(path[-1].dst, nodes.AccessNode)
                 and isinstance(sdfg.arrays[path[-1].dst.data], data.Stream))):
                return False
            # WCR outputs with arrays are not allowed
            if (edge.data.wcr is not None
                    and edge.data.subset.num_elements() != 1):
                return False

        return True
github spcl / dace / dace / transformation / dataflow / stream_transient.py View on Github external
def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
        map_exit = graph.nodes()[candidate[StreamTransient._map_exit]]
        outer_map_exit = graph.nodes()[candidate[
            StreamTransient._outer_map_exit]]

        # Check if there is a streaming output
        for _src, _, dest, _, memlet in graph.out_edges(map_exit):
            if isinstance(sdfg.arrays[memlet.data],
                          data.Stream) and dest == outer_map_exit:
                return True

        return False