How to use the h5py.Group function in h5py

To help you get started, we’ve selected a few h5py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github data-exchange / dxchange / dxchange / reader.py View on Github external
def read_hdf5_item_structure(meta, fp, file_name, offset='    ', label1='/measurement/', label2='/process/'):
    """
    Access the input file/group/dataset(fp) name and begin iterations on its content

    """

    if isinstance(fp, h5py.Dataset):
        if (label1 in fp.name) or  (label2 in fp.name):
            s = fp.name.split('/')
            name = s[-1].replace('-', '_')
            
            value = read_hdf5(file_name,  fp.name)[0]
            if  (value.dtype.kind == 'S'):
                value = value.decode(encoding="utf-8")
            meta.update( {name : value} )
    elif isinstance(fp, h5py.Group):
        logger.debug('Group: %s' % fp.name)

    else :
        logger.error('WARNING: UNKNOWN ITEM IN HDF5 FILE', fp.name)
        sys.exit( "EXECUTION IS TERMINATED" )
 
    if isinstance(fp, h5py.File) or isinstance(fp, h5py.Group) :
        for key,val in dict(fp).items() :
            subg = val
            logger.debug(offset, key )
            read_hdf5_item_structure(meta, subg, file_name, offset + '    ')
github ilastik / lazyflow / lazyflow / utility / io_util / multiprocessHdf5File.py View on Github external
if sub_path[0] != '/':
            sub_path = '/' + sub_path

        if self._internal_path != '/': 
            full_internal_path = self._internal_path + sub_path
        else:
            full_internal_path = sub_path

        try:        
            object_type = self.mp_file._all_paths[full_internal_path]
        except KeyError:
            raise
        
        if object_type is h5py.Dataset:
            return self.mp_file._get_dataset(full_internal_path)
        elif object_type is h5py.Group:
            return _Group( self.mp_file, full_internal_path )
        else:
            assert False, "Don't know how to access object: {}".format( object_type )
github TomSkelly / PacBioEDA / PacBio_XY.py View on Github external
def main ():

    logger.debug("%s starting" % sys.argv[0])

    opt, args = getParms()

    infile_name = args[0]
    infile = h5py.File (infile_name, 'r')

    colours = ('#aaaaaa', '#ff0000', '#00ff00', '#0000ff', '#ff0080', '#8000ff', '#80ff00', '#ff8000', '#ffff00')
    holeStatusTable = ('SEQUENCING', 'ANTIHOLE', 'FIDUCIAL', 'SUSPECT', \
                       'ANTIMIRROR', 'FDZMW', 'FBZMW', 'ANTIBEAMLET', 'OUTSIDEFOV')

    top = h5py.Group (infile, '/')

    ZMW = top["PulseData/BaseCalls/ZMW"]
    
    holeStatus = ZMW["HoleStatus"]
    holeXY     = ZMW["HoleXY"]

    for num in xrange(len(holeStatusTable)):

        whichHoles = holeStatus[:] == num
        HowMany = sum(whichHoles)
        logger.debug("%5d ZMWs %d  %s" % (HowMany, num, holeStatusTable[num]));
        if HowMany > 0:
            plt.scatter (holeXY[whichHoles,0], holeXY[whichHoles,1], \
                             s=1, c=colours[num], edgecolor='face', \
                             label="%d  %5d  %s" % (num, HowMany, holeStatusTable[num]))
github spyder-ide / spyder / spyder / plugins / io_hdf5 / plugin.py View on Github external
def get_group(group):
            contents = {}
            for name, obj in list(group.items()):
                if isinstance(obj, h5py.Dataset):
                    contents[name] = np.array(obj)
                elif isinstance(obj, h5py.Group):
                    # it is a group, so call self recursively
                    contents[name] = get_group(obj)
                # other objects such as links are ignored
            return contents
github NeurodataWithoutBorders / pynwb / src / pynwb / spec / tools.py View on Github external
def update_stats(name, obj):
            """
            Callback function used in conjunction with the visititems function to compile
            statistics for the file

            :param name: the name of the object in the file
            :param obj: the hdf5 object itself
            """
            obj_name = os.path.join(root, name)
            # Group and dataset metadata
            if isinstance(obj, h5py.Dataset):
                filestats.add_dataset(name=obj_name, shape=obj.shape, dtype=obj.dtype)
            elif isinstance(obj, h5py.Group):
                ntype = None
                try:
                    ntype = obj.attrs['neurodata_type'][()]
                except:
                    pass
                filestats.add_group(name=obj_name, neurodata_type=ntype)
            # Visit all attributes of the object
            for attr_name, attr_value in obj.attrs.items():
                attr_path = os.path.join(obj_name, attr_name)
                filestats.add_attribute(name=attr_path, value=attr_value)
                filestats.add_relationship(source=attr_path,
                                           target=obj_name,
                                           name=attr_name + '_attribute_of_' + obj_name,
                                           rtype='attribute_of')

            # Create the relationship for the object
github simpeg / simpeg / SimPEG / Utils / Save.py View on Github external
def __getitem__(self, val):
        if type(val) is int:
            val = self.children[val]
        child = self.node[val]
        if type(child) is h5py.Group:
            child = self.childClass(self.T, child)
        return child
github jdfekete / progressivis / progressivis / storage / hdf5.py View on Github external
from __future__ import absolute_import, division, print_function

from progressivis.core.storagemanager import StorageManager
from progressivis.core.config import get_option
from .base import StorageEngine, Group, Attribute, Dataset

import h5py

Group.register(h5py.Group)
Attribute.register(h5py.AttributeManager)
Dataset.register(h5py.Dataset)

class HDF5StorageEngine(StorageEngine):
    def __init__(self):
        super(HDF5StorageEngine, self).__init__("hdf5")
        self._h5py = None
        self._root = None

    @property
    def h5py(self):
        if self._h5py is None:
            self._h5py = self.open('default.h5', 'w')
        return self._h5py

    @property
github BaPSF / bapsflib / bapsflib / _hdf_mappers / msi / map_msi.py View on Github external
"""
        :param msi_group: HDF5 group object
        """
        # condition msi_group arg
        if not isinstance(msi_group, h5py.Group):
            raise TypeError('msi_group is not of type h5py.Group')

        # store HDF5 MSI group
        self.__msi_group = msi_group

        # Determine Diagnostics in msi
        # - it is assumed that any subgroup of 'MSI/' is a diagnostic
        # - any dataset directly under 'MSI/' is ignored
        self.msi_group_subgnames = []
        for diag in msi_group:
            if isinstance(msi_group[diag], h5py.Group):
                self.msi_group_subgnames.append(diag)

        # Build the self dictionary
        dict.__init__(self, self.__build_dict)
github theochem / horton / horton / io / smart.py View on Github external
format. It returns a dictionary with data loaded from the file.

       For each file format, a specialized load_xxx function is called that
       returns a dictionary with data from the file.
    '''
    result = {}

    lf = kwargs.pop('lf', None)
    if lf is None:
        lf = DenseLinalgFactory()
    if len(kwargs) > 0:
        raise TypeError('Keyword argument(s) not supported: %s' % lf.keys())
    result['lf'] = lf

    for filename in filenames:
        if isinstance(filename, h5.Group) or filename.endswith('.h5'):
            from horton.io.internal import load_h5
            result.update(load_h5(filename, lf))
        elif filename.endswith('.xyz'):
            from horton.io.xyz import load_xyz
            result.update(load_xyz(filename))
        elif filename.endswith('.fchk'):
            from horton.io.gaussian import load_fchk
            result.update(load_fchk(filename, lf))
        elif filename.endswith('.log'):
            from horton.io.gaussian import load_operators_g09
            result.update(load_operators_g09(filename, lf))
        elif filename.endswith('.mkl'):
            from horton.io.molekel import load_mkl
            result.update(load_mkl(filename, lf))
        elif filename.endswith('.molden.input'):
            from horton.io.molden import load_molden
github vasole / pymca / PyMca5 / PyMcaCore / NexusTools.py View on Github external
def isGroup(item):
    if isinstance(item, Group):
        return True
    elif hasattr(item, "keys"):
        return True
    elif is_group(item):
        return True
    else:
        return False