Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def fake_random_ds(
ndims, peak_value = 1.0,
fields = ("density", "velocity_x", "velocity_y", "velocity_z"),
units = ('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
particle_fields=None, particle_field_units=None,
negative = False, nprocs = 1, particles = 0, length_unit=1.0,
unit_system="cgs", bbox=None):
from yt.frontends.stream.api import load_uniform_grid
prng = RandomState(0x4d3d3d3)
if not iterable(ndims):
ndims = [ndims, ndims, ndims]
else:
assert(len(ndims) == 3)
if not iterable(negative):
negative = [negative for f in fields]
assert(len(fields) == len(negative))
offsets = []
for n in negative:
if n:
offsets.append(0.5)
else:
offsets.append(0.0)
data = {}
for field, offset, u in zip(fields, offsets, units):
v = (prng.random_sample(ndims) - offset) * peak_value
if field[0] == "all":
v = v.ravel()
data[field] = (v, u)
if particles:
if particle_fields is not None:
def _validate_point(point, ds, start=False):
if not iterable(point):
raise RuntimeError(
"Input point must be array-like"
)
if not isinstance(point, YTArray):
point = ds.arr(point, 'code_length')
if len(point.shape) != 1:
raise RuntimeError(
"Input point must be a 1D array"
)
if point.shape[0] < ds.dimensionality:
raise RuntimeError(
"Input point must have an element for each dimension"
)
# need to pad to 3D elements to avoid issues later
if point.shape[0] < 3:
if start:
def set_defaults_from_data_source(self, data_source):
"""Resets the camera attributes to their default values"""
position = data_source.ds.domain_right_edge
width = 1.5 * data_source.ds.domain_width.max()
(xmi, xma), (ymi, yma), (zmi, zma) = \
data_source.quantities['Extrema'](['x', 'y', 'z'])
width = np.sqrt((xma - xmi) ** 2 + (yma - ymi) ** 2 +
(zma - zmi) ** 2)
focus = data_source.get_field_parameter('center')
if iterable(width) and len(width) > 1 and isinstance(width[1], string_types):
width = data_source.ds.quan(width[0], input_units=width[1])
# Now convert back to code length for subsequent manipulation
width = width.in_units("code_length") # .value
if not iterable(width):
width = data_source.ds.arr([width, width, width],
input_units='code_length')
# left/right, top/bottom, front/back
if not isinstance(width, YTArray):
width = data_source.ds.arr(width, input_units="code_length")
if not isinstance(focus, YTArray):
focus = data_source.ds.arr(focus, input_units="code_length")
# We can't use the property setters yet, since they rely on attributes
# that will not be set up until the base class initializer is called.
# See Issue #1131.
self._width = width
def _sanitize_dims(self, dims):
if not iterable(dims):
dims = [dims]*len(self.ds.domain_left_edge)
if len(dims) != len(self.ds.domain_left_edge):
raise RuntimeError(
"Length of dims must match the dimensionality of the dataset")
return np.array(dims, dtype='int32')
def sanitize_depth(self, depth):
if iterable(depth):
validate_width_tuple(depth)
depth = (self.ds.quan(depth[0], fix_unitary(depth[1])), )
elif isinstance(depth, Number):
depth = (self.ds.quan(depth, 'code_length',
registry=self.ds.unit_registry), )
elif isinstance(depth, YTQuantity):
depth = (depth, )
else:
raise YTInvalidWidthError(depth)
return depth
sub_samples = 5, ds = None,
min_level=None, max_level=None, no_ghost=True,
data_source=None,
use_light=False):
ParallelAnalysisInterface.__init__(self)
if ds is not None: self.ds = ds
if not iterable(resolution):
resolution = (resolution, resolution)
self.resolution = resolution
self.sub_samples = sub_samples
self.rotation_vector = north_vector
if iterable(width) and len(width) > 1 and isinstance(width[1], str):
width = self.ds.quan(width[0], input_units=width[1])
# Now convert back to code length for subsequent manipulation
width = width.in_units("code_length").value
if not iterable(width):
width = (width, width, width) # left/right, top/bottom, front/back
if not isinstance(width, YTArray):
width = self.ds.arr(width, input_units="code_length")
if not isinstance(center, YTArray):
center = self.ds.arr(center, input_units="code_length")
# Ensure that width and center are in the same units
# Cf. https://bitbucket.org/yt_analysis/yt/issue/1080
width.convert_to_units("code_length")
center.convert_to_units("code_length")
self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
if not steady_north:
self.rotation_vector = self.orienter.unit_vectors[1]
self._setup_box_properties(width, center, self.orienter.unit_vectors)
if fields is None: fields = ["density"]
self.fields = fields
if transfer_function is None:
fh : an open hdf5 file, group, or dataset
The hdf5 file, group, or dataset to which the
attribute will be written.
attr : str
The name of the attribute to be saved.
val : anything
The value to be saved.
"""
if val is None: val = "None"
if hasattr(val, "units"):
fh.attrs["%s_units" % attr] = str(val.units)
# The following is a crappy workaround for getting
# Unicode strings into HDF5 attributes in Python 3
if iterable(val):
val = np.array(val)
if val.dtype.kind == 'U':
val = val.astype('|S')
try:
fh.attrs[str(attr)] = val
# This is raised if no HDF5 equivalent exists.
# In that case, save its string representation.
except TypeError:
fh.attrs[str(attr)] = str(val)
def load_object(self, name):
"""
Load and return and object from the data_file using the Pickle protocol,
under the name *name* on the node /Objects.
"""
obj = self.get_data("/Objects", name)
if obj is None:
return
obj = cPickle.loads(obj.value)
if iterable(obj) and len(obj) == 2:
obj = obj[1] # Just the object, not the ds
if hasattr(obj, '_fix_pickle'): obj._fix_pickle()
return obj
def _deserialize_from_h5(g, ds):
result = {}
for item in g:
if item == "chunks":
continue
if "units" in g[item].attrs:
if iterable(g[item]):
result[item] = ds.arr(g[item][:], g[item].attrs["units"])
else:
result[item] = ds.quan(g[item][()],
g[item].attrs["units"])
elif isinstance(g[item], h5.Group):
result[item] = _deserialize_from_h5(g[item], ds)
elif g[item] == "None":
result[item] = None
else:
try:
result[item] = g[item][:] # try array
except ValueError:
result[item] = g[item][()] # fallback to scalar
return result
def parse_value(value, default_units):
if isinstance(value, YTQuantity):
return value.in_units(default_units)
elif iterable(value):
return YTQuantity(value[0], value[1]).in_units(default_units)
else:
return YTQuantity(value, default_units)