How to use the h5py.File function in h5py

To help you get started, we’ve selected a few h5py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github calico / basenji / bin / basenji_data_read.py View on Github external
for line in open(seqs_bed_file):
    a = line.split()
    model_seqs.append(ModelSeq(a[0],int(a[1]),int(a[2]),None))

  # read blacklist regions
  black_chr_trees = read_blacklist(options.blacklist_bed)

  # compute dimensions
  num_seqs = len(model_seqs)
  seq_len_nt = model_seqs[0].end - model_seqs[0].start
  seq_len_nt -= 2*options.crop_bp
  target_length = seq_len_nt // options.pool_width
  assert(target_length > 0)

  # initialize sequences coverage file
  seqs_cov_open = h5py.File(seqs_cov_file, 'w')
  seqs_cov_open.create_dataset('seqs_cov', shape=(num_seqs, target_length), dtype='float16')

  # open genome coverage file
  genome_cov_open = CovFace(genome_cov_file)

  # for each model sequence
  for si in range(num_seqs):
    mseq = model_seqs[si]

    # read coverage
    seq_cov_nt = genome_cov_open.read(mseq.chr, mseq.start, mseq.end)

    # determine baseline coverage
    baseline_cov = np.percentile(seq_cov_nt, 10)
    baseline_cov = np.nan_to_num(baseline_cov)
github CellProfiler / CellProfiler / tests / utilities / test_hdf5_dict.py View on Github external
def tearDown(self):
        if isinstance(self.hdf_file, h5py.File):
            self.hdf_file.close()
        os.close(self.temp_fd)
        os.remove(self.temp_filename)
        if isinstance(self.hdf_file_nocache, h5py.File):
            self.hdf_file_nocache.close()
        os.close(self.temp_fd_nocache)
        os.remove(self.temp_filename_nocache)
        if isinstance(self.hdf_file_empty, h5py.File):
            self.hdf_file_empty.close()
        os.close(self.temp_fd_empty)
        os.remove(self.temp_filename_empty)
github IDSIA / brainstorm / examples / cifar10_cnn.py View on Github external
import os

import h5py

import brainstorm as bs
from brainstorm.data_iterators import Minibatches
from brainstorm.handlers import PyCudaHandler
from brainstorm.initializers import Gaussian

bs.global_rnd.set_seed(42)

# ----------------------------- Set up Iterators ---------------------------- #

data_dir = os.environ.get('BRAINSTORM_DATA_DIR', '../data')
data_file = os.path.join(data_dir, 'CIFAR-10.hdf5')
ds = h5py.File(data_file, 'r')['normalized_split']

getter_tr = Minibatches(100, default=ds['training']['default'][:],
                        targets=ds['training']['targets'][:])
getter_va = Minibatches(100, default=ds['validation']['default'][:],
                        targets=ds['validation']['targets'][:])

# ------------------------------ Set up Network ----------------------------- #

inp, fc = bs.tools.get_in_out_layers('classification', (32, 32, 3), 10)

(inp >>
    bs.layers.Convolution2D(32, kernel_size=(5, 5), padding=2, name='Conv1') >>
    bs.layers.Pooling2D(type="max", kernel_size=(3, 3), stride=(2, 2)) >>
    bs.layers.Convolution2D(32, kernel_size=(5, 5), padding=2, name='Conv2') >>
    bs.layers.Pooling2D(type="max", kernel_size=(3, 3), stride=(2, 2)) >>
    bs.layers.Convolution2D(64, kernel_size=(5, 5), padding=2, name='Conv3') >>
github zhengyang-wang / Unet_3D / preprocessing / generate_h5.py View on Github external
dataset_r1_2 = h5py.File(os.path.join(target_path, "data_rotate1_2.h5"), 'w')
        dataset_r1_2.create_dataset('X', d_imgshape, dtype='f')
        dataset_r1_2.create_dataset('Y', d_labelshape, dtype='i')

        # data after cutting, with rotating k=3 axes=(0,1)
        dataset_r1_3 = h5py.File(os.path.join(target_path, "data_rotate1_3.h5"), 'w')
        dataset_r1_3.create_dataset('X', d_imgshape_r1, dtype='f')
        dataset_r1_3.create_dataset('Y', d_labelshape_r1, dtype='i')

        # data after cutting, with rotating k=1 axes=(0,2)
        dataset_r2_1 = h5py.File(os.path.join(target_path, "data_rotate2_1.h5"), 'w')
        dataset_r2_1.create_dataset('X', d_imgshape_r2, dtype='f')
        dataset_r2_1.create_dataset('Y', d_labelshape_r2, dtype='i')

        # data after cutting, with rotating k=2 axes=(0,2)
        dataset_r2_2 = h5py.File(os.path.join(target_path, "data_rotate2_2.h5"), 'w')
        dataset_r2_2.create_dataset('X', d_imgshape, dtype='f')
        dataset_r2_2.create_dataset('Y', d_labelshape, dtype='i')

        # data after cutting, with rotating k=3 axes=(0,2)
        dataset_r2_3 = h5py.File(os.path.join(target_path, "data_rotate2_3.h5"), 'w')
        dataset_r2_3.create_dataset('X', d_imgshape_r2, dtype='f')
        dataset_r2_3.create_dataset('Y', d_labelshape_r2, dtype='i')

        # data after cutting, with rotating k=1 axes=(1,2)
        dataset_r3_1 = h5py.File(os.path.join(target_path, "data_rotate3_1.h5"), 'w')
        dataset_r3_1.create_dataset('X', d_imgshape_r3, dtype='f')
        dataset_r3_1.create_dataset('Y', d_labelshape_r3, dtype='i')

        # data after cutting, with rotating k=2 axes=(1,2)
        dataset_r3_2 = h5py.File(os.path.join(target_path, "data_rotate3_2.h5"), 'w')
        dataset_r3_2.create_dataset('X', d_imgshape, dtype='f')
github liu-nlper / SLTK / main.py View on Github external
all_in_memory = configs['all_in_memory']
    char_max_len = configs['model_params']['char_max_len']
    batch_size = configs['model_params']['batch_size']
    dev_size = configs['model_params']['dev_size']
    max_len_limit = configs['max_len_limit']

    features_names = configs['data_params']['feature_names']
    data_names = [name for name in features_names]
    use_char = configs['model_params']['use_char']
    if use_char:
        data_names.append('char')
    data_names.append('label')

    # load train hdf5 file
    path_data = configs['data_params']['path_test'] + '.hdf5'
    test_object_dict_ = h5py.File(path_data, 'r')
    test_object_dict = test_object_dict_
    if all_in_memory:
        test_object_dict = dict()
        for data_name in data_names:  # 全部加载到内存
            test_object_dict[data_name] = test_object_dict_[data_name].value
    test_count = test_object_dict[data_names[0]].size

    data_iter = DataIter(
        test_count, test_object_dict, data_names, use_char=use_char, char_max_len=char_max_len,
        batch_size=batch_size, max_len_limit=max_len_limit)

    return data_iter
github spyder-ide / spyder / spyder / plugins / io_hdf5 / plugin.py View on Github external
def save_hdf5(data, filename):
        import h5py
        try:
            f = h5py.File(filename, 'w')
            for key, value in list(data.items()):
                f[key] = np.array(value)
            f.close()
        except Exception as error:
            return str(error)            
except ImportError:
github Ning-Ding / Implementation-CVPR2015-CNN-for-ReID / CUHK03 / main.py View on Github external
def _generate_negative_pair(mode='train'):
    with h5py.File(cfg.DATA.CREATED_FILE, 'r') as f:
        index_array = _get_index_array(mode)
        i, j = np.random.choice(index_array, 2, replace=False)
        x = np.random.choice(f[str(i)].shape[0], replace=False)
        y = np.random.choice(f[str(j)].shape[0], replace=False)
        image_x = f[str(i)][x]
        image_y = f[str(j)][y]
        return image_x, image_y
github ruimashita / caffe-train / create_lmdb.py View on Github external
image = caffe.io.load_image(path)
        image = caffe.io.resize_image(image, (IMAGE_SIZE, IMAGE_SIZE,))
        # height, width, channels to channels, height, width
        image = numpy.rollaxis(image, 2).astype(float)
        return image

    for i, path in enumerate(paths):
        label_index = get_label_index(path)
        image = get_image(path)
        print image.shape
        print image.dtype
        datas[i, : ,: ,:] = image
        data_labels[i, :] = [label_index, label_index]
        print '{0:0>8d}:{1}'.format(i, path)

    f = h5py.File(db_path, "w")
    f.create_dataset("data", data=datas,  compression="gzip", compression_opts=4)
    f.create_dataset("label", data=data_labels,  compression="gzip", compression_opts=4)
    f.close()
    print data_labels
github felipecode / coiltraine / tools / export_predictions.py View on Github external
ts = []
        images = [np.zeros([resolution[1], resolution[0], 3])] * sensors['RGB']
        labels = [np.zeros([resolution[1], resolution[0], 1])] * sensors['labels']
        depths = [np.zeros([resolution[1], resolution[0], 3])] * sensors['depth']
        actions = [Control()] * sensors['RGB']
        actions_noise = [Control()] * sensors['RGB']

        first_time = True
        end_of_episodes = []
        count = 0
        for h_num in positions_to_test:

            print (" SEQUENCE NUMBER ", h_num)
            try:
                data = h5py.File(path + 'data_' + str(h_num).zfill(5) + '.h5', "r")
            except Exception as e:
                print (e)
                continue

            for i in range(0, 200):
                steer = data['targets'][i][0]
                camera_angle = data['targets'][i][26]
                camera_label = data['targets'][i][25]
                speed = data['targets'][i][10]
                steer = augment_steering(camera_angle, steer, speed)
                #camera_label_file.write(str(camera_angle) + '\n')
                wpa1 = data['targets'][i][31]
                wpa2 = data['targets'][i][33]