How to use the yt.funcs.mylog function in yt

To help you get started, we’ve selected a few yt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yt-project / yt / yt / data_objects / construction_data_containers.py View on Github external
def deserialize(self, fields):
        if not ytcfg.getboolean("yt", "serialize"):
            return False
        for field in fields:
            self[field] = None
        deserialized_successfully = False
        store_file = self.ds.parameter_filename + '.yt'
        if os.path.isfile(store_file):
            deserialized_successfully = self._mrep.restore(store_file, self.ds)

            if deserialized_successfully:
                mylog.info("Using previous projection data from %s" % store_file)
                for field, field_data in self._mrep.field_data.items():
                    self[field] = field_data
        if not deserialized_successfully:
            for field in fields:
                del self[field]
        return deserialized_successfully
github yt-project / yt / yt / frontends / art / data_structures.py View on Github external
def _identify_base_chunk(self, dobj):
        """
        Take the passed in data source dobj, and use its embedded selector
        to calculate the domain mask, build the reduced domain
        subsets and oct counts. Attach this information to dobj.
        """
        if getattr(dobj, "_chunk_info", None) is None:
            # Get all octs within this oct handler
            domains = [dom for dom in self.domains if
                       dom.included(dobj.selector)]
            base_region = getattr(dobj, "base_region", dobj)
            if len(domains) > 1:
                mylog.debug("Identified %s intersecting domains", len(domains))
            subsets = [ARTDomainSubset(base_region, domain, self.dataset)
                       for domain in domains]
            dobj._chunk_info = subsets
        dobj._current_chunk = list(self._chunk_all(dobj))[0]
github ECP-WarpX / WarpX / Examples / Modules / RigidInjection / analysis_rigid_injection_BoostedFrame.py View on Github external
Analysis script of a WarpX simulation of rigid injection in a boosted frame.

A Gaussian electron beam starts from -5 microns, propagates rigidly up to
20 microns after which it expands due to emittance only (the focal position is
20 microns). The beam width is measured after ~50 microns, and compared with
the theory (with a 5% error allowed).

The simulation runs in a boosted frame, and the analysis is done in the lab
frame, i.e., on the back-transformed diagnostics.
'''

import sys, os, yt, glob
import numpy as np
import scipy.constants as scc
import read_raw_data
yt.funcs.mylog.setLevel(0)

# Read data from back-transformed diagnostics
snapshot = './lab_frame_data/snapshot00001'
header   = './lab_frame_data/Header'
allrd, info = read_raw_data.read_lab_snapshot(snapshot, header)
z = np.mean( read_raw_data.get_particle_field(snapshot, 'beam', 'z') )
w = np.std ( read_raw_data.get_particle_field(snapshot, 'beam', 'x') )

# initial parameters
z0 = 20.e-6
w0 = 1.e-6
theta0 = np.arcsin(0.1)

# Theoretical beam width after propagation if rigid ON
wth = np.sqrt( w0**2 + (z-z0)**2*theta0**2 )
error = np.abs((w-wth)/wth)
github ECP-WarpX / WarpX / Tools / plot_parallel.py View on Github external
import os
import glob
import matplotlib
import sys
import argparse
import yt
yt.funcs.mylog.setLevel(50)
import numpy as np
import matplotlib.pyplot as plt

'''
This script loops over all WarpX plotfiles in a directory and, for each
plotfile, saves an image showing the field and particles.

Requires yt>3.5 and Python3

It can be run serial:

> python plot_parallel.py --path 
github yt-project / yt / yt / frontends / chombo / data_structures.py View on Github external
def print_key_parameters(self):
        for a in ["current_time", "domain_dimensions", "domain_left_edge",
                  "domain_right_edge"]:
            if not hasattr(self, a):
                mylog.error("Missing %s in parameter file definition!", a)
                continue
            v = getattr(self, a)
            mylog.info("Parameters: %-25s = %s", a, v)
github yt-project / yt / yt / analysis_modules / two_point_functions / two_point_functions.py View on Github external
def _build_sort_array(self):
        """
        When running on a unigrid simulation, the kD tree isn't necessary.
        But we need to ensure that the points are sorted in the usual manner
        allowing values to be found via array indices.
        """
        mylog.info("Unigrid: finding cell centers.")
        xp = self.ds["x"]
        yp = self.ds["y"]
        zp = self.ds["z"]
        self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size]        
        self.sort = np.lexsort([zp, yp, xp])
        del xp, yp, zp
        self.ds.clear_data()
github yt-project / yt / yt / analysis_modules / halo_finding / rockstar / rockstar.py View on Github external
def __init__(self, ts, num_readers = 1, num_writers = None,
            outbase="rockstar_halos", particle_type="all",
            force_res=None, total_particles=None, dm_only=False,
            particle_mass=None, min_halo_size=25):
        if is_root():
            mylog.info("The citation for the Rockstar halo finder can be found at")
            mylog.info("https://ui.adsabs.harvard.edu/abs/2013ApJ...762..109B")
        ParallelAnalysisInterface.__init__(self)
        # Decide how we're working.
        if ytcfg.getboolean("yt", "inline") is True:
            self.runner = InlineRunner()
        else:
            self.runner = StandardRunner(num_readers, num_writers)
        self.num_readers = self.runner.num_readers
        self.num_writers = self.runner.num_writers
        mylog.info("Rockstar is using %d readers and %d writers",
            self.num_readers, self.num_writers)
        # Note that Rockstar does not support subvolumes.
        # We assume that all of the snapshots in the time series
        # use the same domain info as the first snapshots.
        if not isinstance(ts, DatasetSeries):
            ts = DatasetSeries([ts])
        self.ts = ts
        self.particle_type = particle_type
        self.outbase = six.b(outbase)
        self.min_halo_size = min_halo_size
        if force_res is None:
            tds = ts[-1] # Cache a reference
            self.force_res = tds.index.get_smallest_dx().in_units("Mpc/h")
            # We have to delete now to wipe the index
            del tds
        else:
github yt-project / yt / yt / data_objects / time_series.py View on Github external
def _print_attr(self, a):
        """
        Print the attribute or warn about it missing.
        """
        if not hasattr(self, a):
            mylog.error("Missing %s in dataset definition!", a)
            return
        v = getattr(self, a)
        mylog.info("Parameters: %-25s = %s", a, v)
github ECP-WarpX / WarpX / Examples / Modules / RigidInjection / analysis_3Dbacktransformed_diag.py View on Github external
#! /usr/bin/env python

'''
Analysis script of a WarpX simulation in a boosted frame.

The simulation runs in a boosted frame, and the analysis is done in the lab
frame, i.e., on the back-transformed diagnostics for the full 3D simulation and
an x-z slice at y=y_center. The field-data, Ez, along z, at (x_center,y_center,:) is compared
between the full back-transformed diagnostic and the reduced diagnostic (i.e., x-z slice) .
'''

import sys, os, yt, glob
import numpy as np
import scipy.constants as scc
import read_raw_data
yt.funcs.mylog.setLevel(0)

# Read data from back-transformed diagnostics of entire domain
snapshot = './lab_frame_data/snapshots/snapshot00000'
header   = './lab_frame_data/snapshots/Header'
allrd, info = read_raw_data.read_lab_snapshot(snapshot, header)
F = allrd['Ez']
F_1D = np.squeeze(F[F.shape[0]//2,F.shape[1]//2,:])


# Read data from reduced back-transformed diagnostics (i.e. slice)
snapshot_slice = './lab_frame_data/slices/slice00000'
header_slice   = './lab_frame_data/slices/Header'
allrd, info = read_raw_data.read_lab_snapshot(snapshot_slice, header_slice)
Fs = allrd['Ez']
Fs_1D = np.squeeze(Fs[Fs.shape[0]//2,Fs.shape[1]//2,:])