How to use the hdmf.backends.hdf5.h5_utils.H5DataIO function in hdmf

To help you get started, we’ve selected a few hdmf examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github NeurodataWithoutBorders / pynwb / tests / integration / ui_write / test_modular_storage.py View on Github external
with HDF5IO(self.data_filename, 'r', manager=get_manager()) as self.data_read_io:
            data_file_obt = self.data_read_io.read()

            # write "link file" with timeseries.data that is an external link to the timeseries in "data file"
            # also link timeseries.timestamps.data to the timeseries.timestamps in "data file"
            with HDF5IO(self.link_filename, 'w', manager=get_manager()) as link_write_io:
                link_file = NWBFile(
                    session_description='a test file',
                    identifier='link_file',
                    session_start_time=self.start_time
                )
                self.link_container = TimeSeries(
                    name='test_mod_ts',
                    unit='V',
                    data=data_file_obt.get_acquisition('data_ts'),  # test direct link
                    timestamps=H5DataIO(
                        data=data_file_obt.get_acquisition('data_ts').timestamps,
                        link_data=True  # test with setting link data
                    )
                )
                link_file.add_acquisition(self.link_container)
                link_write_io.write(link_file)

        # note that self.link_container contains a link to a dataset that is now closed

        # read the link file
        self.link_read_io = HDF5IO(self.link_filename, 'r', manager=get_manager())
        self.read_nwbfile = self.link_read_io.read()
        return self.getContainer(self.read_nwbfile)
github NeurodataWithoutBorders / pynwb / docs / gallery / general / iterative_write.py View on Github external
#
from hdmf.backends.hdf5.h5_utils import H5DataIO

# Increase the chunk size and add compression
matrix3 = SparseMatrixIterator(shape=(xsize, ysize),
                               num_chunks=num_chunks,
                               chunk_shape=chunk_shape)
data3 = H5DataIO(data=matrix3,
                 chunks=(100, 100),
                 fillvalue=np.nan)

# Increase the chunk size and add compression
matrix4 = SparseMatrixIterator(shape=(xsize, ysize),
                               num_chunks=num_chunks,
                               chunk_shape=chunk_shape)
data4 = H5DataIO(data=matrix4,
                 compression='gzip',
                 compression_opts=4,
                 chunks=(100, 100),
                 fillvalue=np.nan
                 )

####################
# Step 3: Write the data as usual
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Here we simply use our ``SparseMatrixIterator`` as input for our ``TimeSeries``

write_test_file(filename='basic_sparse_iterwrite_example.nwb',
                data=data)
write_test_file(filename='basic_sparse_iterwrite_compressed_example.nwb',
                data=data2)
github NeurodataWithoutBorders / pynwb / docs / gallery / general / iterative_write.py View on Github external
num_chunks=num_chunks,
                               chunk_shape=chunk_shape)
data2 = H5DataIO(data=matrix2,
                 compression='gzip',
                 compression_opts=4)

######################
# We can now also customize the chunking , fillvalue and other settings
#
from hdmf.backends.hdf5.h5_utils import H5DataIO

# Increase the chunk size and add compression
matrix3 = SparseMatrixIterator(shape=(xsize, ysize),
                               num_chunks=num_chunks,
                               chunk_shape=chunk_shape)
data3 = H5DataIO(data=matrix3,
                 chunks=(100, 100),
                 fillvalue=np.nan)

# Increase the chunk size and add compression
matrix4 = SparseMatrixIterator(shape=(xsize, ysize),
                               num_chunks=num_chunks,
                               chunk_shape=chunk_shape)
data4 = H5DataIO(data=matrix4,
                 compression='gzip',
                 compression_opts=4,
                 chunks=(100, 100),
                 fillvalue=np.nan
                 )

####################
# Step 3: Write the data as usual
github NeurodataWithoutBorders / pynwb / docs / gallery / general / iterative_write.py View on Github external
#
# In the above cases we used the built-in capabilities of PyNWB to perform iterative data write. To
# gain more fine-grained control of the write process we can alternatively use PyNWB to setup the full
# structure of our NWB:N file and then update select datasets afterwards. This approach is useful, e.g.,
# in context of parallel write and any time we need to optimize write patterns.
#
#

####################
# Step 1: Initially allocate the data as empty
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
from hdmf.backends.hdf5.h5_utils import H5DataIO

write_test_file(filename='basic_alternative_custom_write.nwb',
                data=H5DataIO(data=np.empty(shape=(0, 10), dtype='float'),
                              maxshape=(None, 10),  # <-- Make the time dimension resizable
                              chunks=(131072, 2),   # <-- Use 2MB chunks
                              compression='gzip',   # <-- Enable GZip compression
                              compression_opts=4,   # <-- GZip aggression
                              shuffle=True,         # <-- Enable shuffle filter
                              fillvalue=np.nan      # <-- Use NAN as fillvalue
                              )
                )

####################
# Step 2: Get the dataset(s) to be updated
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
from pynwb import NWBHDF5IO    # noqa

io = NWBHDF5IO('basic_alternative_custom_write.nwb', mode='a')
github NeurodataWithoutBorders / pynwb / docs / gallery / general / linking_data.py View on Github external
unit='SIunit',
                      timestamps=timestamps)
nwbfile4.add_acquisition(test_ts4)

####################
# In the above case we did not make it explicit how we want to handle the data from
# our TimeSeries, this means that :py:class:`~pynwb.NWBHDF5IO` will need to
# determine on write how to treat the dataset. We can make this explicit and customize this
# behavior on a per-dataset basis by wrapping our dataset using
# :py:class:`~hdmf.backends.hdf5.h5_utils.H5DataIO`

from hdmf.backends.hdf5.h5_utils import H5DataIO

# Create another timeseries that links to the same data
test_ts5 = TimeSeries(name='test_timeseries5',
                      data=H5DataIO(data=timeseries_1_data,     # <-------
                                    link_data=True),            # <-------
                      unit='SIunit',
                      timestamps=timestamps)
nwbfile4.add_acquisition(test_ts5)

####################
# Step 4: Write the data
# ^^^^^^^^^^^^^^^^^^^^^^^
#
from pynwb import NWBHDF5IO

io4 = NWBHDF5IO(filename4, 'w')
io4.write(nwbfile4,
          link_data=True)     # <-------- Specify default behavior to link rather than copy data
io4.close()
github NeurodataWithoutBorders / pynwb / docs / gallery / general / iterative_write.py View on Github external
chunk_shape = (10, 10)
num_values = num_chunks * np.prod(chunk_shape)

# Create our sparse matrix data.
data = SparseMatrixIterator(shape=(xsize, ysize),
                            num_chunks=num_chunks,
                            chunk_shape=chunk_shape)

#####################
# In order to also enable compression and other advanced HDF5 dataset I/O featurs we can then also
# wrap our data via :py:class:`~hdmf.backends.hdf5.h5_utils.H5DataIO`.
from hdmf.backends.hdf5.h5_utils import H5DataIO
matrix2 = SparseMatrixIterator(shape=(xsize, ysize),
                               num_chunks=num_chunks,
                               chunk_shape=chunk_shape)
data2 = H5DataIO(data=matrix2,
                 compression='gzip',
                 compression_opts=4)

######################
# We can now also customize the chunking , fillvalue and other settings
#
from hdmf.backends.hdf5.h5_utils import H5DataIO

# Increase the chunk size and add compression
matrix3 = SparseMatrixIterator(shape=(xsize, ysize),
                               num_chunks=num_chunks,
                               chunk_shape=chunk_shape)
data3 = H5DataIO(data=matrix3,
                 chunks=(100, 100),
                 fillvalue=np.nan)