How to use the h5pyd._hl.selections.select function in h5pyd

To help you get started, we’ve selected a few h5pyd examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github HDFGroup / h5pyd / h5pyd / _hl / table.py View on Github external
# setup for pagination in case we can't read everthing in one go
        data = []
        cursor = start
        page_size = stop - start

        while True:
            # Perfom the actual read
            req = "/datasets/" + self.id.uuid + "/value"
            params = {}
            params["query"] = condition
            self.log.info("req - cursor: {} page_size: {}".format(cursor, page_size))
            end_row = cursor+page_size
            if end_row > stop:
                end_row = stop
            selection_arg = slice(cursor, end_row)
            selection = sel.select(self, selection_arg)

            sel_param = selection.getQueryParam()
            self.log.debug("query param: {}".format(sel_param))
            if sel_param:
                params["select"] = sel_param
            try:
                self.log.debug("params: {}".format(params))
                rsp = self.GET(req, params=params)
                values = rsp["value"]
                count = len(values)
                self.log.info("got {} rows".format(count))
                if count > 0:
                    if limit is None or count + len(data) <= limit:
                        # add in all the data
                        data.extend(values)
                    else:
github HDFGroup / h5pyd / h5pyd / _hl / dataset.py View on Github external
for i in range(len(data)):
                        converted_data.append(self.toTuple(data[i]))
                    data = tuple(converted_data)

                arr = numpy.empty((), dtype=new_dtype)
                arr[()] = data
            if selection.mshape is None:
                self.log.info("return scalar selection of: {}, dtype: {}, shape: {}".format(arr, arr.dtype, arr.shape))
                return arr[()]

            return arr

        # === Everything else ===================

        # Perform the dataspace selection
        selection = sel.select(self, args)
        self.log.debug("selection_constructor")

        if selection.nselect == 0:
            return numpy.ndarray(selection.mshape, dtype=new_dtype)
        # Up-converting to (1,) so that numpy.ndarray correctly creates
        # np.void rows in case of multi-field dtype. (issue 135)
        single_element = selection.mshape == ()
        mshape = (1,) if single_element else selection.mshape

        rank = len(self._shape)

        self.log.debug("dataset shape: {}".format(self._shape))
        self.log.debug("mshape: {}".format(mshape))
        self.log.debug("single_element: {}".format(single_element))
        # Perfom the actual read
        rsp = None
github HDFGroup / h5pyd / h5pyd / _hl / dataset.py View on Github external
for fieldname in fieldnames:
                    subtype = h5t.py_create(val.dtype.fields[fieldname][0])
                    offset = val.dtype.fields[fieldname][1]
                   mtype.insert(self._e(fieldname), offset, subtype)

        # Use mtype derived from array (let DatasetID.write figure it out)
        else:
            mshape = val.shape
            #mtype = None
        """
        mshape = val.shape
        self.log.debug("mshape: {}".format(mshape))
        self.log.debug("data dtype: {}".format(val.dtype))

        # Perform the dataspace selection
        selection = sel.select(self, args)
        self.log.debug("selection.mshape: {}".format(selection.mshape))
        if selection.nselect == 0:
            return

        # Broadcast scalars if necessary.
        if (mshape == () and selection.mshape != None and selection.mshape != ()):
            self.log.debug("broadcast scalar")
            self.log.debug("selection.mshape: {}".format(selection.mshape))
            if self.dtype.subdtype is not None:
                raise TypeError("Scalar broadcasting is not supported for array dtypes")
            val2 = numpy.empty(selection.mshape, dtype=val.dtype)
            val2[...] = val
            val = val2
            mshape = val.shape

        # Perform the write, with broadcasting
github HDFGroup / h5pyd / h5pyd / _hl / dataset.py View on Github external
self.id.read(sid_out, sid, out, mtype)
            return out
        """

        # === Check for zero-sized datasets =====
        if self._shape is None or numpy.product(self._shape) == 0:
            # These are the only access methods NumPy allows for such objects
            if len(args) == 0 or len(args) == 1 and isinstance(args[0], tuple) and args[0] == Ellipsis:
                return numpy.empty(self._shape, dtype=new_dtype)

        # === Scalar dataspaces =================

        if self._shape == ():
            #fspace = self.id.get_space()
            #selection = sel2.select_read(fspace, args)
            selection = sel.select(self, args)
            self.log.info("selection.mshape: {}".format(selection.mshape))

            # TBD - refactor the following with the code for the non-scalar case
            req = "/datasets/" + self.id.uuid + "/value"
            rsp = self.GET(req, format="binary")
            if type(rsp) is bytes:
                # got binary response
                self.log.info("got binary response for scalar selection")
                arr = numpy.frombuffer(rsp, dtype=new_dtype)
                #arr = bytesToArray(rsp, new_dtype, self._shape)
                if not self.dtype.shape:
                    self.log.debug("reshape arr to: {}".format(self._shape))
                    arr = numpy.reshape(arr, self._shape)
            else:
                # got JSON response
                # need some special conversion for compound types --
github HDFGroup / h5pyd / h5pyd / _hl / table.py View on Github external
# todo - will need the following once we have binary transfers
        # mtype = h5t.py_create(new_dtype)
        mtype = new_dtype

        # Perform the dataspace selection
        if start or stop:
            if not start:
                start = 0
            if not stop:
                stop = self._shape[0]
        else:
            start = 0
            stop = self._shape[0]

        selection_arg = slice(start, stop)
        selection = sel.select(self, selection_arg)

        if selection.nselect == 0:
            return numpy.ndarray(selection.mshape, dtype=new_dtype)

        # setup for pagination in case we can't read everthing in one go
        data = []
        cursor = start
        page_size = stop - start

        while True:
            # Perfom the actual read
            req = "/datasets/" + self.id.uuid + "/value"
            params = {}
            params["query"] = condition
            self.log.info("req - cursor: {} page_size: {}".format(cursor, page_size))
            end_row = cursor+page_size
github HDFGroup / h5pyd / h5pyd / _hl / table.py View on Github external
"""
        if not isinstance(value, dict):
            raise ValueError("expected value to be a dict")

        # Perform the dataspace selection
        if start or stop:
            if not start:
                start = 0
            if not stop:
                stop = self._shape[0]
        else:
            start = 0
            stop = self._shape[0]

        selection_arg = slice(start, stop)
        selection = sel.select(self, selection_arg)
        sel_param = selection.getQueryParam()
        params = {}
        params["query"] = condition
        if limit:
            params["Limit"] = limit
        self.log.debug("query param: {}".format(sel_param))
        if sel_param:
            params["select"] = sel_param

        req = "/datasets/" + self.id.uuid + "/value"

        rsp = self.PUT(req, body=value, format="json", params=params)
        indices = None
        arr = None
        if "index" in rsp:
            indices = rsp["index"]