How to use the yt.utilities.logger.ytLogger.info function in yt

To help you get started, we’ve selected a few yt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yt-project / yt / yt / frontends / open_pmd / data_structures.py View on Github external
if match(regex, filename):
                    iterations.append(filename)
            mylog.info("Found {} iterations in directory".format(len(iterations)))

        if len(iterations) == 0:
            mylog.warning("No iterations found!")
        if "groupBased" in encoding and len(iterations) > 1:
            mylog.warning("Only chose to load one iteration ({})".format(iteration))

        self.base_path = "/data/{}/".format(iteration)
        try:
            self.meshes_path = self._handle["/"].attrs["meshesPath"].decode()
            handle[self.base_path + self.meshes_path]
        except(KeyError):
            if self.standard_version <= StrictVersion("1.1.0"):
                mylog.info("meshesPath not present in file."
                           " Assuming file contains no meshes and has a domain extent of 1m^3!")
                self.meshes_path = None
            else:
                raise
        try:
            self.particles_path = self._handle["/"].attrs["particlesPath"].decode()
            handle[self.base_path + self.particles_path]
        except(KeyError):
            if self.standard_version <= StrictVersion("1.1.0"):
                mylog.info("particlesPath not present in file."
                           " Assuming file contains no particles!")
                self.particles_path = None
            else:
                raise
github yt-project / yt / yt / analysis_modules / halo_analysis / halo_callbacks.py View on Github external
output_dir : string
        Name of directory where profile data will be written.  The full path will be
        the output_dir of the halo catalog concatenated with this directory.
        Default : "."
    
    """

    if not hasattr(halo, storage):
        return
    
    if filename is None:
        filename = storage
    output_file = os.path.join(halo.halo_catalog.output_dir, output_dir,
                               "%s_%06d.h5" % (filename, 
                                               halo.quantities["particle_identifier"]))
    mylog.info("Saving halo %d profile data to %s." %
               (halo.quantities["particle_identifier"], output_file))

    fh = h5py.File(output_file, "w")
    my_profile = getattr(halo, storage)
    profile_group = fh.create_group("profiles")
    for field in my_profile:
        # Don't write code units because we might not know those later.
        if isinstance(my_profile[field], YTArray):
            my_profile[field].convert_to_cgs()
        _yt_array_hdf5(profile_group, str(field), my_profile[field])
    variance_storage = "%s_variance" % storage
    if hasattr(halo, variance_storage):
        my_profile = getattr(halo, variance_storage)
        variance_group = fh.create_group("variance")
        for field in my_profile:
            # Don't write code units because we might not know those later.
github yt-project / yt / yt / frontends / gadget / simulation_handling.py View on Github external
my_indices = np.digitize([my_initial_time, my_final_time], my_times)
            if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
            my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]

        init_outputs = []
        for output in my_outputs:
            if os.path.exists(output["filename"]):
                init_outputs.append(output["filename"])
        if len(init_outputs) == 0 and len(my_outputs) > 0:
            mylog.warning("Could not find any datasets.  " +
                          "Check the value of OutputDir in your parameter file.")
            
        DatasetSeries.__init__(self, outputs=init_outputs, parallel=parallel,
                                setup_function=setup_function,
                                unit_base=self.unit_base)
        mylog.info("%d outputs loaded into time series.", len(init_outputs))
github yt-project / yt / yt / analysis_modules / light_cone / halo_mask.py View on Github external
# Loop through files in light cone solution and get virial quantities.
    for slice in lightCone.light_cone_solution:
        halo_list = _get_halo_list(slice['filename'], **kwargs)
        light_cone_mask.append(_make_slice_mask(slice, halo_list, pixels))

    # Write out cube of masks from each slice.
    if cube_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
        mylog.info("Saving halo mask cube to %s." % cube_file)
        output = h5py.File(cube_file, 'a')
        output.create_dataset('haloMaskCube', data=na.array(light_cone_mask))
        output.close()

    # Write out final mask.
    if mask_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
        # Final mask is simply the product of the mask from each slice.
        mylog.info("Saving halo mask to %s." % mask_file)
        finalMask = na.ones(shape=(pixels, pixels))
        for mask in light_cone_mask:
            finalMask *= mask

        output = h5py.File(mask_file, 'a')
        output.create_dataset('HaloMask', data=na.array(finalMask))
        output.close()

    return light_cone_mask
github yt-project / yt / yt / analysis_modules / halo_merger_tree / merger_tree.py View on Github external
def _find_likely_children(self, parentfile, childfile):
        # For each halo in the parent list, identify likely children in the 
        # list of children.

        # First, read in the locations of the child halos.
        child_ds = load(childfile)
        child_t = child_ds.unique_identifier
        if self.comm.rank == 0:
            line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
            Halos WHERE SnapCurrentTimeIdentifier = %d" % child_t
            self.cursor.execute(line)
            
            mylog.info("Finding likely parents for z=%1.5f child halos." % \
                child_ds.current_redshift)
            
            # Build the kdtree for the children by looping over the fetched rows.
            # Normalize the points for use only within the kdtree.
            child_points = []
            for row in self.cursor:
                child_points.append([row[1] / self.period[0],
                row[2] / self.period[1],
                row[3] / self.period[2]])
            child_points = np.array(child_points)
            kdtree = cKDTree(child_points, leafsize = 10)
    
        # Find the parent points from the database.
        parent_ds = load(parentfile)
        parent_t = parent_ds.unique_identifier
        if self.comm.rank == 0:
github yt-project / yt / yt / visualization / particle_plotter.py View on Github external
def _splat_particle_field(self, item):

        mylog.info("Splatting (%s) onto a %d by %d mesh in %s by %s space" %
                (item, self.x_bins, self.y_bins, self.x_field, self.y_field))

        bounds = []
        for b in self.bounds:
            if hasattr(b, "in_units"):
                b = float(b.in_units("code_length"))
            bounds.append(b)

        x_data = self.data_source[self.x_field]
        y_data = self.data_source[self.y_field]
        data = self.data_source[item]

        px = (x_data.d - self.bounds[0]) / (self.bounds[1] - self.bounds[0])
        py = (y_data.d - self.bounds[2]) / (self.bounds[3] - self.bounds[2])

        locs1 = np.logical_and(px > 0.0, px < 1.0)
github yt-project / yt / yt / frontends / exodus_ii / data_structures.py View on Github external
def _read_connectivity(self):
        """
        Loads the connectivity data for the mesh
        """
        mylog.info("Loading connectivity")
        connectivity = []
        with self._handle.open_ds() as ds:
            for i in range(self.parameters['num_meshes']):
                connectivity.append(ds.variables["connect%d" % (i+1)][:].astype("i8"))
            return connectivity
github yt-project / yt / yt / units / yt_array.py View on Github external
float(word)
                num_cols = len(col_words)
                break
            except ValueError:
                mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
    f.close()
    if len(units) != num_cols:
        mylog.warning("Malformed or incomplete units header. Arrays will be "
                      "dimensionless!")
        units = ["dimensionless"]*num_cols
    arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
                        delimiter=delimiter, converters=None,
                        unpack=True, usecols=usecols, ndmin=0)
    if usecols is not None:
        units = [units[col] for col in usecols]
    mylog.info("Array units: %s" % ", ".join(units))
    return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
github yt-project / yt / yt / analysis_modules / cosmological_observation / light_ray / light_ray.py View on Github external
def _write_light_ray_solution(self, filename, extra_info=None):
        """
        _write_light_ray_solution(filename, extra_info=None)

        Write light ray solution to a file.
        """

        mylog.info("Writing light ray solution to %s." % filename)
        f = open(filename, 'w')
        if extra_info is not None:
            for par, val in extra_info.items():
                f.write("%s = %s\n" % (par, val))
        f.write("\nSegment Redshift dl/box    Start x       y             " + \
                "z             End x         y             z            Dataset\n")
        for q, my_segment in enumerate(self.light_ray_solution):
            f.write("%04d    %.6f %.6f % .10f % .10f % .10f % .10f % .10f % .10f %s\n" % \
                    (q, my_segment['redshift'], my_segment['traversal_box_fraction'],
                     my_segment['start'][0], my_segment['start'][1], my_segment['start'][2],
                     my_segment['end'][0], my_segment['end'][1], my_segment['end'][2],
                     my_segment['filename']))
        f.close()
github yt-project / yt / yt / frontends / art / io.py View on Github external
def interpolate_ages(data, file_stars, interp_tb=None, interp_ages=None,
                     current_time=None):
    if interp_tb is None:
        t_stars, a_stars = read_star_field(file_stars,
                                     field="t_stars")
        # timestamp of file should match amr timestamp
        if current_time:
            tdiff = YTQuantity(b2t(t_stars), 'Gyr') - current_time.in_units('Gyr')
            if np.abs(tdiff) > 1e-4:
                mylog.info("Timestamp mismatch in star " +
                           "particle header: %s", tdiff)
        mylog.info("Interpolating ages")
        interp_tb, interp_ages = b2t(data)
        interp_tb = YTArray(interp_tb, 'Gyr')
        interp_ages = YTArray(interp_ages, 'Gyr')
    temp = np.interp(data, interp_tb, interp_ages)
    return interp_tb, interp_ages, temp