Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
*axes*: matplotlib axes object (deault: None)
if not None, the axes object to plot to
*dynamic_range*: float (default: 2.0)
The number of dex in luminosity over which the image brightness ranges
'''
if isinstance(width, str) or issubclass(width.__class__, _units.UnitBase):
if isinstance(width, str):
width = _units.Unit(width)
width = width.in_units(sim['pos'].units, **sim.conversion_context())
if starsize is not None:
smf = filt.HighPass('smooth', str(starsize) + ' kpc')
sim.s[smf]['smooth'] = array.SimArray(starsize, 'kpc', sim=sim)
r = image(sim.s, qty=r_band + '_lum_den', width=width, log=False,
av_z=True, clear=False, noplot=True) * r_scale
g = image(sim.s, qty=g_band + '_lum_den', width=width, log=False,
av_z=True, clear=False, noplot=True) * g_scale
b = image(sim.s, qty=b_band + '_lum_den', width=width, log=False,
av_z=True, clear=False, noplot=True) * b_scale
#r,g,b = nw_scale_rgb(r,g,b)
#r,g,b = nw_arcsinh_fit(r,g,b)
rgbim = combine(r, g, b, dynamic_range)
if plot:
if clear:
plt.clf()
else:
p_types = gadget_type(self.families())
# Get the data. Get one type at a time and then concatenate.
# A possible optimisation is to special-case loading all particles.
data = np.array([], dtype=self._get_array_type(name))
for p in p_types:
# Special-case mass
if g_name == b"MASS" and self.header.mass[p] != 0.:
data = np.append(data, self.header.mass[
p] * np.ones(self.header.npart[p], dtype=data.dtype))
else:
data = np.append(data, self.__load_array(g_name, p))
if fam is None:
self[name] = data.reshape(dims, order='C').view(array.SimArray)
self[name].set_default_units(quiet=True)
else:
self[fam][name] = data.reshape(
dims, order='C').view(array.SimArray)
self[fam][name].set_default_units(quiet=True)
import logging
import csv
logger = logging.getLogger('pynbody.snapshot.ramses')
from collections import OrderedDict
multiprocess_num = int(config_parser.get('ramses', "parallel-read"))
multiprocess = (multiprocess_num > 1)
issue_multiprocess_warning = False
if multiprocess:
try:
import multiprocessing
import posix_ipc
remote_exec = array.shared_array_remote
remote_map = array.remote_map
except ImportError:
issue_multiprocess_warning = True
multiprocess = False
if not multiprocess:
def remote_exec(fn):
def q(*args):
t0 = time.time()
r = fn(*args)
return r
return q
def remote_map(*args, **kwargs):
return map(*args[1:], **kwargs)
@Profile.profile_property
def weight_fn(self, weight_by=None):
"""
Calculate mass in each bin
"""
if weight_by is None:
weight_by = self._weight_by
mass = array.SimArray(np.zeros(self.nbins), self.sim[weight_by].units)
with self.sim.immediate_mode:
pmass = self.sim[weight_by].view(np.ndarray)
for i in range(self.nbins):
mass[i] = (pmass[self.binind[i]]).sum()
mass.sim = self.sim
mass.units = self.sim[weight_by].units
return mass
def j(self):
"""Specific angular momentum"""
angmom = np.cross(self['pos'], self['vel']).view(array.SimArray)
angmom.units = self['pos'].units * self['vel'].units
return angmom
else:
p_types = gadget_type(self.families())
# Get the data. Get one type at a time and then concatenate.
# A possible optimisation is to special-case loading all particles.
data = np.array([], dtype=self._get_array_type(name))
for p in p_types:
# Special-case mass
if g_name == b"MASS" and self.header.mass[p] != 0.:
data = np.append(data, self.header.mass[
p] * np.ones(self.header.npart[p], dtype=data.dtype))
else:
data = np.append(data, self.__load_array(g_name, p))
if fam is None:
self[name] = data.reshape(dims, order='C').view(array.SimArray)
self[name].set_default_units(quiet=True)
else:
self[fam][name] = data.reshape(
dims, order='C').view(array.SimArray)
self[fam][name].set_default_units(quiet=True)
# This makes promotion to simulation-level arrays possible.
raise ValueError("Requested data type %r is not consistent with existing data type %r for family array %r" % (
str(dtype), str(dtx), array_name))
if all([x in fams for x in self_families]):
# If, once we created this array, *all* families would have
# this array, just create a simulation-level array
if self._promote_family_array(array_name, ndim=ndim, derived=derived, shared=shared) is not None:
return None
# if we get here, either the array cannot be promoted to simulation level, or that would
# not be appropriate, so actually go ahead and create the family array
if shared is None:
shared = self._shared_arrays
new_ar = array._array_factory(dims, dtype, False, shared)
new_ar._sim = weakref.ref(self)
new_ar._name = array_name
new_ar.family = family
def sfa(n, v):
try:
self._family_arrays[n][family] = v
except KeyError:
self._family_arrays[n] = dict({family: v})
sfa(array_name, new_ar)
if derived:
if array_name not in self._family_derived_array_names[family]:
self._family_derived_array_names[family].append(array_name)
if ndim is 3:
import functools
import time
import logging
logger = logging.getLogger('pynbody.snapshot.ramses')
multiprocess_num = int(config_parser.get('ramses', "parallel-read"))
multiprocess = (multiprocess_num > 1)
issue_multiprocess_warning = False
if multiprocess:
try:
import multiprocessing
import posix_ipc
remote_exec = array.shared_array_remote
remote_map = array.remote_map
except ImportError:
issue_multiprocess_warning = True
multiprocess = False
if not multiprocess:
def remote_exec(fn):
def q(*args):
t0 = time.time()
r = fn(*args)
return r
return q
def remote_map(*args, **kwargs):
return map(*args[1:], **kwargs)
d = h[filt.Disc('1 Mpc', h['eps'].min()*3)]
try :
# attempt to load rotation curve off disk
r, v_c = np.loadtxt(h.ancestor.filename+".rot."+str(h.properties["halo_id"]), skiprows=1, unpack=True)
pro_d = profile.Profile(d, nbins=100, type='log')
r_me = pro_d["rbins"].in_units("kpc")
r_x = np.concatenate(([0], r, [r.max()*2]))
v_c = np.concatenate(([v_c[0]], v_c, [v_c[-1]]))
v_c = interp.interp1d(r_x, v_c, bounds_error=False)(r_me)
logger.info(" - found existing rotation curve on disk, using that")
v_c = v_c.view(array.SimArray)
v_c.units = "km s^-1"
v_c.sim = d
v_c.convert_units(h['vel'].units)
pro_d._profiles['v_circ'] = v_c
pro_d.v_circ_loaded = True
except :
pro_d = profile.Profile(d, nbins=100, type='log') #.D()
# Nasty hack follows to force the full halo to be used in calculating the
# gravity (otherwise get incorrect rotation curves)
pro_d._profiles['v_circ'] = profile.v_circ(pro_d, h)
pro_phi = pro_d['phi']
#import pdb; pdb.set_trace()
i = 0
for r in rxy_points:
r_acc_r = []
for pos in [(r, 0, 0), (0, r, 0), (-r, 0, 0), (0, -r, 0)]:
r_acc_r.append(np.dot(-accel[i, :], pos))
i = i + 1
vel2 = np.mean(r_acc_r)
if vel2 > 0:
vel = math.sqrt(vel2)
else:
vel = 0
vels.append(vel)
x = array.SimArray(vels, units=u_out)
x.sim = f.ancestor
return x