How to use the blosc.set_blocksize function in blosc

To help you get started, we’ve selected a few blosc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Blosc / python-blosc / bench / threadpool.py View on Github external
# All operations are done 'in-place'
    tPool.map( compressSlice, tArgs )
    tPool.close()
    tPool.join()

def decompressStack( imageShape, imageDtype, blosc_threads = 1, pool_threads=maxThreads ):
    blosc.set_nthreads( blosc_threads )
    tPool = ThreadPool( pool_threads )

    num_slices = imageShape[0]
    imageStack = np.full(imageShape, fill_value=0)


blosc.print_versions()
blosc.set_blocksize( BLOCKSIZE )
print("Creating NumPy stack with %d float32 elements:" %(m*N*N) )

stack = np.zeros( [m,N,N], dtype=dtype )
xmesh, ymesh = np.meshgrid( np.arange(-N/2,N/2), np.arange(-N/2,N/2) )
compress_mesh = (np.cos( xmesh ) + np.exp( -ymesh**2 / N )).astype(dtype)
for J in np.arange(m):
    stack[J,:,:] = compress_mesh


### Determine arrangement of pool threads and blosc threads
testCases = int( np.floor( np.log2( maxThreads )) + 1 )
powProduct = 2**np.arange(0,testCases)
poolThreads = np.hstack( [1, powProduct] )
bloscThreads = np.hstack( [1, powProduct[::-1]] )
# Let's try instead just pool threads...
#poolThreads = np.arange( 1, maxThreads+1 )
github C-CINA / focus / scripts / proc / ioMRC.py View on Github external
# compressed MRCZ
            print( "Compressing %s with compressor %s%d" %
                    (MRCfilename, header['compressor'], header['clevel'] ) )
            
            if header['dtype'] != 'uint4' and input_image.dtype != header['dtype']:
                # This correctly works for text to dtype comparison
                input_image = input_image.astype(header['dtype']) 
                
            if input_image.ndim == 3:
                chunkSize = input_image[0,:,:].size
            else:
                chunkSize = input_image.size
                input_image = np.reshape( input_image, [1,input_image.shape[0],input_image.shape[1] ])
                
            blosc.set_nthreads( header['n_threads'] )
            blosc.set_blocksize( 65536 )
            
            header['packedBytes'] = 0
            typeSize = input_image.dtype.itemsize
            
            print( input_image.shape )
            for J in np.arange( input_image.shape[0] ):
                # print( "Slice %d: Compressing address at: %d of %d:" % (J, int(J*typeSize*blockSize), input_image.nbytes) )
                
                # Looks like I have problem for typesize > 1?
                if int(J*typeSize*chunkSize) >= input_image.nbytes:
                    raise MemoryError( "MRCExport: Tried to reference past end of ndarray %d > %d" % (int(J*typeSize*chunkSize), input_image.nbytes ) )
                    

                compressedData = blosc.compress( input_image[J,:,:].tobytes(),
                            typeSize, 
                            clevel=header['clevel'],
github arq5x / gemini / gemini / gemini_load_chunk2.py View on Github external
from effects import SnpEff

# native Python imports
import os.path
import time
import sys
import sqlite3
import itertools as it

import toml  # toml.py

# third-party imports
import cyvcf2 as vcf
import blosc
blosc.set_nthreads(1)
blosc.set_blocksize(8192)

import zlib
import cPickle

def opack_blob(obj, _none=buffer(zlib.compress(cPickle.dumps(None, cPickle.HIGHEST_PROTOCOL)))):
    if obj is None: return _none
    return buffer(zlib.compress(cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL), 1))

def pack_blob(obj):
    if obj is None: return ''
    return buffer(blosc.compress(obj.tostring(), obj.dtype.itemsize, clevel=5, shuffle=True))
    #return buffer(blosc.pack_array(obj))

def is_number(op, field):
    return field.endswith("_float") or op in ("mean", "median", "min", "max")