How to use the yt.utilities.on_demand_imports._h5py.File function in yt

To help you get started, we’ve selected a few yt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yt-project / yt / yt / analysis_modules / cosmological_observation / light_cone / light_cone.py View on Github external
field_node = "%s_%s" % (field, weight_field)
        weight_field_node = "weight_field_%s" % weight_field

        if (filename is None):
            filename = os.path.join(self.output_dir, "%s_data" % self.output_prefix)
        if not(filename.endswith(".h5")):
               filename += ".h5"

        if pstack.size == 0:
            mylog.info("save_light_cone_stack: light cone projection is empty.")
            return

        mylog.info("Writing light cone data to %s." % filename)

        fh = h5py.File(filename, "a")

        if field_node in fh:
            del fh[field_node]

        mylog.info("Saving %s to %s." % (field_node, filename))
        dataset = fh.create_dataset(field_node,
                                          data=pstack)
        dataset.attrs["units"] = str(pstack.units)
        dataset.attrs["redshifts"] = redshift_list
        dataset.attrs["observer_redshift"] = np.float(self.observer_redshift)
        for key, value in attrs.items():
            dataset.attrs[key] = value

        if wstack.size > 0:
            if weight_field_node in fh:
                del fh[weight_field_node]
github yt-project / yt / yt / analysis_modules / halo_finding / halo_objects.py View on Github external
# If we dont have this field, we can give up right now.
        if field not in fields:
            return None
        elif field == 'particle_index' or field == 'particle_type':
            # the only integer field
            field_data = np.empty(size, dtype='int64')
        else:
            field_data = np.empty(size, dtype='float64')
        f.close()
        # Apparently, there's a bug in h5py that was keeping the file pointer
        # f closed, even though it's re-opened below. This del seems to fix
        # that.
        del f
        offset = 0
        for fname in fnames:
            f = h5py.File(fname, 'r')
            this = f["Halo%08d" % halo][field][:]
            s = this.size
            field_data[offset:offset + s] = this
            offset += s
            f.close()
            del f
        return field_data
github yt-project / yt / yt / utilities / minimal_representation.py View on Github external
def store(self, storage):
        if hasattr(self, "_ds_mrep"):
            self._ds_mrep.store(storage)
        metadata, (final_name, chunks) = self._generate_post()
        metadata['obj_type'] = self.type
        with h5.File(storage) as h5f:
            dset = str(uuid4())[:8]
            h5f.create_group(dset)
            _serialize_to_h5(h5f[dset], metadata)
            if len(chunks) > 0:
                g = h5f[dset].create_group('chunks')
                g.attrs['final_name'] = final_name
                for fname, fdata in chunks:
                    if isinstance(fname, (tuple, list)):
                        fname = "*".join(fname)

                    if isinstance(fdata, (YTQuantity, YTArray)):
                        g.create_dataset(fname, data=fdata.d,
                                         compression="lzf")
                        g[fname].attrs["units"] = str(fdata.units)
                    else:
                        g.create_dataset(fname, data=fdata, compression="lzf")
github yt-project / yt / yt / frontends / swift / io.py View on Github external
def _get_smoothing_length(self, sub_file, pdtype=None, pshape=None):
        # We do not need the pdtype and the pshape, but some frontends do so we
        # accept them and then just ignore them
        ptype = self.ds._sph_ptypes[0]
        ind = int(ptype[-1])
        si, ei = sub_file.start, sub_file.end
        with h5py.File(sub_file.filename, "r") as f:
            pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int")
            pcount = np.clip(pcount - si, 0, ei - si)
            # we upscale to float64
            hsml = f[ptype]["SmoothingLength"][si:ei,...]
            hsml = hsml.astype("float64", copy=False)
            return hsml
github yt-project / yt / yt / frontends / owls_subfind / data_structures.py View on Github external
def _is_valid(self, *args, **kwargs):
        need_groups = ['Constants', 'Header', 'Parameters', 'Units', 'FOF']
        veto_groups = []
        valid = True
        try:
            fh = h5py.File(args[0], mode='r')
            valid = all(ng in fh["/"] for ng in need_groups) and \
              not any(vg in fh["/"] for vg in veto_groups)
            fh.close()
        except:
            valid = False
            pass
        return valid
github yt-project / yt / yt / analysis_modules / photon_simulator / photon_simulator.py View on Github external
else:
            p_out[key] = param.value

    skip = [exp_time_key] if add_exposure_times else []
    for fn in input_files[1:]:
        f = h5py.File(fn, "r")
        validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
        f.close()

    f_in.close()

    data = defaultdict(list)
    tot_exp_time = 0.0

    for i, fn in enumerate(input_files):
        f = h5py.File(fn, "r")
        if add_exposure_times:
            tot_exp_time += f["/parameters"][exp_time_key].value
        elif i == 0:
            tot_exp_time = f["/parameters"][exp_time_key].value
        for key in f["/data"]:
            data[key].append(f["/data"][key][:])
        f.close()

    p_out["exp_time"] = tot_exp_time

    d = f_out.create_group("data")
    for k in data:
        d.create_dataset(k, data=np.concatenate(data[k]))

    f_out.close()
github yt-project / yt / yt / frontends / enzo_p / io.py View on Github external
def _read_particle_fields(self, chunks, ptf, selector):
        chunks = list(chunks)
        dc = self.ds.domain_center.in_units("code_length").d
        for chunk in chunks: # These should be organized by grid filename
            f = None
            for g in chunk.objs:
                if g.filename is None:
                    continue
                if f is None:
                    f = h5py.File(g.filename, "r")
                if g.particle_count is None:
                    fnstr = "%s/%s" % \
                      (g.block_name, self._sep.join(["particle", "%s", "%s"]))
                    g.particle_count = \
                      dict((ptype, f.get(fnstr %
                            (ptype, self.sample_pfields[ptype])).size)
                            for ptype in self.sample_pfields)
                    g.total_particles = sum(g.particle_count.values())
                if g.total_particles == 0:
                    continue
                group = f.get(g.block_name)
                for ptype, field_list in sorted(ptf.items()):
                    pn = self._sep.join(
                        ["particle", ptype, "%s"])
                    if g.particle_count[ptype] == 0:
                        continue
github yt-project / yt / yt / utilities / grid_data_format / writer.py View on Github external
@contextmanager
def _get_backup_file(ds):
    backup_filename = ds.backup_filename
    if os.path.exists(backup_filename):
        # backup file already exists, open it. We use parallel
        # h5py if it is available
        if communication_system.communicators[-1].size > 1 and \
                h5py.get_config().mpi is True:
            mpi4py_communicator = communication_system.communicators[-1].comm
            f = h5py.File(backup_filename, "r+", driver='mpio', 
                          comm=mpi4py_communicator)
        else:
            f = h5py.File(backup_filename, "r+")
        yield f
        f.close()
    else:
        # backup file does not exist, create it
        with _create_new_gdf(ds, backup_filename, 
                             data_author=None,
                             data_comment=None,
                             particle_type_name="dark_matter") as f:
            yield f