How to use the blosc.set_nthreads function in blosc

To help you get started, we’ve selected a few blosc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uncbiag / easyreg / easyreg / seg_data_loader_onfly.py View on Github external
from __future__ import print_function, division
import blosc
import torch
from torch.utils.data import Dataset
from data_pre.seg_data_utils import *
from data_pre.transform import Transform
import SimpleITK as sitk
from multiprocessing import *
blosc.set_nthreads(1)
import progressbar as pb
from copy import deepcopy
import random
import time
class SegmentationDataset(Dataset):
    """segmentation dataset.
    if the data are loaded into memory, we provide data processing option like image resampling and label filtering
    if not, for efficiency, we assume the data are preprocessed and the image resampling still works but the label filtering are disabled
    """

    def __init__(self, data_path,phase, transform=None, option = None):
        """
        :param data_path:  string, path to processed data
        :param transform: function,   apply transform on data
        """
        self.data_path = data_path
github Blosc / bloscpack / bloscpack / __init__.py View on Github external
def process_nthread_arg(args):
    """ Extract and set nthreads. """
    if args.nthreads != blosc.ncores:
        blosc.set_nthreads(args.nthreads)
    print_verbose('using %d thread%s' %
            (args.nthreads, 's' if args.nthreads > 1 else ''))
github uncbiag / easyreg / data_pre / seg_data_loader_online.py View on Github external
self.use_org_size = seg_option['use_org_size']
        self.detect_et = seg_option['detect_et']
        self.option_p['patch_size'] = seg_option['patch_size']
        self.seg_option = seg_option
        self.img_pool = []
        if self.is_train:
            self.init_img_pool()
            print('img pool initialized complete')
            self.init_corr_transform_pool()
            print('transforms initialized complete')
        else:
            self.init_img_pool()
            print('img pool initialized complete')
            self.init_corr_partition_pool()
            print("partition pool initialized complete")
        blosc.set_nthreads(1)
github uncbiag / easyreg / data_pre / reg_data_loader_onfly.py View on Github external
from __future__ import print_function, division
import os

import blosc
import torch
from torch.utils.data import Dataset, DataLoader
from data_pre.reg_data_utils import *
from multiprocessing import *
num_of_workers = 12
blosc.set_nthreads(1)
import progressbar as pb

class RegistrationDataset(Dataset):
    """registration dataset."""

    def __init__(self, data_path,phase=None, transform=None, seg_option=None, reg_option=None):
        """
        the dataloader for registration task, to avoid frequent disk communication, all pairs are compressed into memory
        :param data_path:  string, path to the data
            the data should be preprocessed and saved into txt
        :param phase:  string, 'train'/'val'/ 'test'/ 'debug' ,    debug here means a subset of train data, to check if model is overfitting
        :param transform: function,  apply transform on data
        : seg_option: pars,  settings for segmentation task,  None for registration task
        : reg_option:  pars, settings for registration task, None for segmentation task

        """
github Blosc / bloscpack / bloscpack / cli.py View on Github external
def process_nthread_arg(args):
    """ Extract and set nthreads. """
    if args.nthreads != blosc.ncores:
        blosc.set_nthreads(args.nthreads)
    log.verbose('using %d thread%s' %
                (args.nthreads, 's' if args.nthreads > 1 else ''))
github Blosc / python-blosc / bench / threadpool.py View on Github external
def compressStack( imageStack, blosc_threads = 1, pool_threads=maxThreads ):
    """
    Does frame compression using a ThreadPool to distribute the load.
    """
    blosc.set_nthreads( blosc_threads )
    tPool = ThreadPool( pool_threads )

    num_slices = imageStack.shape[0]
    # Build parameters list for the threaded processeses, consisting of index
    tArgs = [None] * num_slices
    itemSize = imageStack.dtype.itemsize
    bytesList = [None] * num_slices
    for J in np.arange(num_slices):
        tArgs[J] = (imageStack[J,:,:].__array_interface__['data'][0], \
                    N*N, itemSize, bytesList, J)

    # All operations are done 'in-place'
    tPool.map( compressSlice, tArgs )
    tPool.close()
    tPool.join()
github arq5x / gemini / gemini / gemini_load_chunk2.py View on Github external
from __future__ import print_function
from effects import SnpEff

# native Python imports
import os.path
import time
import sys
import sqlite3
import itertools as it

import toml  # toml.py

# third-party imports
import cyvcf2 as vcf
import blosc
blosc.set_nthreads(1)
blosc.set_blocksize(8192)

import zlib
import cPickle

def opack_blob(obj, _none=buffer(zlib.compress(cPickle.dumps(None, cPickle.HIGHEST_PROTOCOL)))):
    if obj is None: return _none
    return buffer(zlib.compress(cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL), 1))

def pack_blob(obj):
    if obj is None: return ''
    return buffer(blosc.compress(obj.tostring(), obj.dtype.itemsize, clevel=5, shuffle=True))
    #return buffer(blosc.pack_array(obj))

def is_number(op, field):
    return field.endswith("_float") or op in ("mean", "median", "min", "max")
github tensorwerk / hangar-py / src / hangar / utils.py View on Github external
Returns
    -------
    int
        ncores blosc will use on the system
    """
    nCores = blosc.detect_number_of_cores()
    if nCores == 1:
        nUsed = 1
    elif nCores == 2:
        nUsed = 2
    elif nCores <= 4:
        nUsed = nCores - 1
    else:
        nUsed = nCores - 2
    blosc.set_nthreads(nUsed)
    return nUsed
github C-CINA / focus / scripts / proc / ioMRC.py View on Github external
and (REVERSE_COMPRESSOR_ENUM[header['compressor']]) > 0:
            # compressed MRCZ
            print( "Compressing %s with compressor %s%d" %
                    (MRCfilename, header['compressor'], header['clevel'] ) )
            
            if header['dtype'] != 'uint4' and input_image.dtype != header['dtype']:
                # This correctly works for text to dtype comparison
                input_image = input_image.astype(header['dtype']) 
                
            if input_image.ndim == 3:
                chunkSize = input_image[0,:,:].size
            else:
                chunkSize = input_image.size
                input_image = np.reshape( input_image, [1,input_image.shape[0],input_image.shape[1] ])
                
            blosc.set_nthreads( header['n_threads'] )
            blosc.set_blocksize( 65536 )
            
            header['packedBytes'] = 0
            typeSize = input_image.dtype.itemsize
            
            print( input_image.shape )
            for J in np.arange( input_image.shape[0] ):
                # print( "Slice %d: Compressing address at: %d of %d:" % (J, int(J*typeSize*blockSize), input_image.nbytes) )
                
                # Looks like I have problem for typesize > 1?
                if int(J*typeSize*chunkSize) >= input_image.nbytes:
                    raise MemoryError( "MRCExport: Tried to reference past end of ndarray %d > %d" % (int(J*typeSize*chunkSize), input_image.nbytes ) )
                    

                compressedData = blosc.compress( input_image[J,:,:].tobytes(),
                            typeSize,