How to use the blosc.pack_array function in blosc

To help you get started, we’ve selected a few blosc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wehr-lab / autopilot / autopilot / hardware / cameras.py View on Github external
def _write_frame(self):
        """
        Put :attr:`.frame` into the :attr:`._write_q`, optionally compressing it with :func:`blosc.pack_array`
        """
        try:
            if self.blosc:
                self._write_q.put_nowait((self.frame[0], blosc.pack_array(self.frame[1])))
            else:
                self._write_q.put_nowait(self.frame)
        except Full:
            self.logger.exception('Frame {} could not be written, queue full'.format(self.frame_n))
github jhuapl-boss / intern / ndio / remote / neurodata.py View on Github external
def _post_cutout_no_chunking_blosc(self, token, channel,
                                       x_start, y_start, z_start,
                                       data, resolution):
        """
        Accepts data in zyx. !!!
        """
        data = numpy.expand_dims(data, axis=0)
        blosc_data = blosc.pack_array(data)

        url = self.url("{}/{}/blosc/{}/{},{}/{},{}/{},{}/".format(
            token, channel,
            resolution,
            x_start, x_start + data.shape[3],
            y_start, y_start + data.shape[2],
            z_start, z_start + data.shape[1]
        ))

        req = requests.post(url, data=blosc_data, headers={
            'Content-Type': 'application/octet-stream'
        })

        if req.status_code is not 200:
            raise RemoteDataUploadError(req.text)
        else:
github neurodata / ndstore / scripts / aws_interface.py View on Github external
data[b,:,:] = np.asarray(Image.open(tile_handle))
              except IOError as e:
                pass
                # print "missing file", file_name
            # iterate over the tile if it is larger then supercuboid size
            for y_index in range(0, y_tilesz/ysupercubedim):
              for x_index in range(0, x_tilesz/xsupercubedim):
                # calculate the morton index 
                insert_data = data[:, y_index*ysupercubedim:(y_index+1)*ysupercubedim, x_index*xsupercubedim:(x_index+1)*xsupercubedim]
                if np.any(insert_data):
                  morton_index = XYZMorton([x_index+(x*x_tilesz/xsupercubedim), y_index+(y*y_tilesz/ysupercubedim), z])
                  self.logger.info("[{},{},{}]".format((x_index+x)*x_tilesz, (y_index+y)*y_tilesz, z))
                  # updating the index
                  self.cuboidindex_db.putItem(ch.channel_name, cur_res, x, y, z)
                  # inserting the cube
                  self.s3_io.putCube(ch, cur_res, morton_index, blosc.pack_array(insert_data))
github hwang595 / ps_pytorch / src / compression.py View on Github external
def g_compress(grad):
    assert isinstance(grad, np.ndarray)
    compressed_grad = blosc.pack_array(grad, cname='snappy')
    return compressed_grad
github neurodata / ndstore / webservices / ndwsingest.py View on Github external
# Getting a Cube id and ingesting the data one cube at a time
              zidx = XYZMorton ( [x/xsupercubedim, y/ysupercubedim, (slice_number-zoffset)/zsupercubedim] )
              cube = Cube.CubeFactory(supercubedim, ch.channel_type, ch.channel_datatype)
              cube.zeros()

              xmin,ymin = x,y
              xmax = min ( ximagesz, x+xsupercubedim )
              ymax = min ( yimagesz, y+ysupercubedim )
              zmin = 0
              zmax = min(slice_number+zsupercubedim, zimagesz+1)

              cube.data[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
              if cube.isNotZeros():
                cuboidindex_db.putItem(ch.channel_name, self.resolution, x, y, slice_number)
                s3_io.putCube(ch, self.resolution, zidx, blosc.pack_array(cube.data))
                
                # if ch.channel_type in IMAGE_CHANNELS:
                  # db.putCube(ch, zidx, self.resolution, cube, update=True)
                # elif ch.channel_type in TIMESERIES_CHANNELS:
                  # db.putTimeCube(ch, zidx, timestamp, self.resolution, cube, update=False)
                # elif ch.channel_type in ANNOTATION_CHANNELS:
                  # corner = map(sub, [x,y,slice_number], [xoffset,yoffset,zoffset])
                  # db.annotateDense(ch, corner, self.resolution, cube.data, 'O')
                # else:
                  # logger.error("Channel type {} not supported".format(ch.channel_type))
                  # raise NDWSError("Channel type {} not supported".format(ch.channel_type))
          
          # clean up the slices fetched
          self.cleanData(range(slice_number, slice_number+zsupercubedim) if slice_number + zsupercubedim<=zimagesz else range(slice_number, zimagesz))
github uncbiag / easyreg / easyreg / seg_data_loader_onfly.py View on Github external
def __read_img_label_into_zipnp(self,img_label_path_dic,img_label_dic):
        pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(img_label_path_dic)).start()
        count = 0
        for fn, img_label_path in img_label_path_dic.items():
            img_label_np_dic = {}
            img_sitk, original_spacing, original_sz = self.__read_and_clean_itk_info(img_label_path['image'])
            resized_img, resize_factor = self.resize_img(img_sitk)
            img_np = sitk.GetArrayFromImage(resized_img)
            img_np = self.normalize_intensity(img_np)
            img_label_np_dic['image'] = blosc.pack_array(img_np.astype(np.float32))

            if self.has_label:
                label_sitk, _, _ = self.__read_and_clean_itk_info(img_label_path['label'])
                resized_label,_ = self.resize_img(label_sitk,is_label=True)
                label_np = sitk.GetArrayFromImage(resized_label)
                label_index = list(np.unique(label_np))
                img_label_np_dic['label'] = blosc.pack_array(label_np.astype(np.int64))
                img_label_np_dic['label_index'] = label_index
            img_after_resize = self.img_after_resize if self.img_after_resize is not None else original_sz
            new_spacing=  original_spacing*(original_sz-1)/(np.array(img_after_resize)-1)
            normalized_spacing = self._normalize_spacing(new_spacing,img_after_resize, silent_mode=True)
            img_label_np_dic['original_sz'] =original_sz
            img_label_np_dic['original_spacing'] = original_spacing
            img_label_np_dic['spacing'] = normalized_spacing
            img_label_np_dic['img_sz'] = list(img_np.shape)
            img_label_dic[fn] =img_label_np_dic
github hwang595 / ps_pytorch / src / compression.py View on Github external
def w_compress(w):
    assert isinstance(w, np.ndarray)
    packed_msg = blosc.pack_array(w, cname='snappy')
    return packed_msg
github uncbiag / easyreg / easyreg / seg_data_loader_onfly.py View on Github external
for i, fname in enumerate(img_name_list):
                label_set = img_label_dict[fname]['label_index']
                if i ==0:
                    interested_label_set = set(label_set)
                else:
                    interested_label_set = interested_label_set.union(label_set)
            interested_label_list = list(interested_label_set)
        else:
            interested_label_list = self.interested_label_list

        #self.standard_label_index = tuple([int(item) for item in interested_label_list])
        for fname in img_name_list:
            label = img_label_dict[fname]['label']
            label = self.__convert_to_standard_label_map(label, interested_label_list)
            label_density = list(np.bincount(label.reshape(-1).astype(np.int32)) / len(label.reshape(-1)))
            img_label_dict[fname]['label'] = blosc.pack_array(label)
            img_label_dict[fname]['label_density']=label_density
            img_label_dict[fname]['label_org_index'] = interested_label_list
            img_label_dict[fname]['label_converted_index'] = list(range(len(interested_label_list)))
        return img_label_dict
github neurodata / ndstore / webservices / ndstack.py View on Github external
tempdata = np.asarray(slimage.resize([xsupercubedim, ysupercubedim]))
                    newdata[sl,:,:] = np.left_shift(tempdata[:,:,3], 24, dtype=np.uint32) | np.left_shift(tempdata[:,:,2], 16, dtype=np.uint32) | np.left_shift(tempdata[:,:,1], 8, dtype=np.uint32) | np.uint32(tempdata[:,:,0])

                zidx = XYZMorton ([x,y,z])
                cube = Cube.CubeFactory(supercubedim, ch.channel_type, ch.channel_datatype, time_range=[ts, ts+1])
                cube.zeros()
                # copying array into cube.data
                # we have to ensure that we always insert 4D data into the database
                # we convert 4D data to 3D data since we cannot process 4D data in ctypes
                # removing this breaks caching since caching expects there to be 4D cuboids
                cube.data[0,:] = newdata
                
                # checking if the cube is empty or not
                if cube.isNotZeros():
                  if proj.s3backend == S3_TRUE:
                    s3_io.putCube(ch, ts, zidx, cur_res, blosc.pack_array(cube.data), neariso=neariso)
                  else:
                    db.putCube(ch, ts, zidx, cur_res, cube, update=True, neariso=neariso)
              
              # annotation channel
              else:
    
                try:
                  if scaling == ZSLICES and neariso==False:
                    ZSliceStackCube_ctype(olddata, newdata)
                  else:
                    IsotropicStackCube_ctype(olddata, newdata)

                  corner = [x*xsupercubedim, y*ysupercubedim, z*zsupercubedim] 
                  # add resized data to cube
                  if np.any(newdata):
                    db.annotateDense(ch, ts, corner, cur_res, newdata, 'O', neariso=neariso)
github neurodata / ndstore / spdb / s3io.py View on Github external
[x,y,z] = ndlib.MortonXYZ(super_zidx)
    # start = map(mul, cubedim, [x,y,z])
    start = map(mul, [x,y,z], self.db.datasetcfg.getSuperCubeSize())
    
    for z in range(znumcubes):
      for y in range(ynumcubes):
        for x in range(xnumcubes):
          zidx = ndlib.XYZMorton(map(add, start, [x,y,z]))

          # Parameters in the cube slab
          index = map(mul, cubedim, [x,y,z])
          end = map(add, index, cubedim)

          cube_data = super_cube[index[2]:end[2], index[1]:end[1], index[0]:end[0]]
          zidx_list.append(zidx)
          cube_list.append(blosc.pack_array(cube_data))
    
    return zidx_list, cube_list