Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
elif name=="pos" :
self._create_array('pos', 3)
self['pos'].units="Mpc a"
pos=self['pos']
nx,ny,nz = [int(self._header[x]) for x in 'nx','ny','nz']
# the following is equivalent to
#
# self['z'],self['y'],self['x'] = np.mgrid[0.0:self._header['nx'], 0.0:self._header['ny'], 0.0:self._header['nz']]
#
# but works on partial loading without having to generate the entire mgrid
# (which might easily exceed the available memory for a big grid)
pos_cache = np.empty((_max_buflen,3))
fp0 = 0
for readlen, buf_index, mem_index in self._load_control.iterate(family.dm, family.dm) :
if mem_index is not None :
pos[mem_index] = _grid_gen(slice(fp0, fp0+readlen), nx, ny, nz, pos=pos_cache)[buf_index]
fp0+=readlen
self['pos']*=self._header['dx']
a = self.properties['a']
bdot_by_b = analysis.cosmology.rate_linear_growth(self, unit='km Mpc^-1 s^-1')/analysis.cosmology.linear_growth_factor(self)
# offset position according to zeldovich approximation
self['offset'] = self['vel']/(a*bdot_by_b)
self['offset'].units=self['vel'].units/units.Unit('km Mpc^-1 s^-1 a^-1')
self['pos']+=self['offset']
elif name=="vel" :
self._create_array('vel', 3)
vel = self['vel']
positive_typemap = map(lambda x: family.get_family(str.strip(x)),
config_parser.get('ramses', 'type-mapping-positive').split(","))
import warnings
import logging
from . import namemapper
logger = logging.getLogger('pynbody.snapshot.gadgethdf')
try:
import h5py
except ImportError:
h5py = None
_default_type_map = {}
for x in family.family_names():
try:
_default_type_map[family.get_family(x)] = \
[q.strip() for q in config_parser.get('gadgethdf-type-mapping', x).split(",")]
except ConfigParser.NoOptionError:
pass
_all_hdf_particle_groups = []
for hdf_groups in _default_type_map.itervalues():
for hdf_group in hdf_groups:
_all_hdf_particle_groups.append(hdf_group)
def _append_if_array(to_list, name, obj):
if not hasattr(obj, 'keys'):
to_list.append(name)
if binary:
fhand = util.open_(filename, 'wb')
if byteswap:
fhand.write(struct.pack(">i", len(self)))
else:
fhand.write(struct.pack("i", len(self)))
else:
fhand = util.open_(filename, 'wb')
fhand.write((str(len(self)) + '\n').encode('utf-8'))
if contents is None:
if array_name in self.family_keys():
for f in [family.gas, family.dm, family.star]:
try:
dtype = self[f][array_name].dtype
ar = self[f][array_name]
units_out = ar.units
except KeyError:
ar = np.zeros(len(self[f]), dtype=int)
TipsySnap.__write_block(fhand, ar, binary, byteswap)
else:
ar = self[array_name]
dtype = self[array_name].dtype
units_out = ar.units
TipsySnap.__write_block(fhand, ar, binary, byteswap)
def _count_particles(self):
if self._has_explicit_particle_families:
return self._count_particles_using_explicit_families()
else:
ndm, nstar = self._count_particles_using_implicit_families()
return OrderedDict([(family.dm, ndm), (family.star, nstar)])
def loadable_keys(self, fam=None):
if fam is None:
keys = None
for f0 in self.families():
if keys:
keys.intersection_update(self.loadable_keys(f0))
else:
keys = set(self.loadable_keys(f0))
else:
if fam is family.gas:
keys = ['x', 'y', 'z', 'smooth'] + hydro_blocks + self._rt_blocks
elif fam in self._iter_particle_families():
keys = self._particle_blocks
elif fam is self._sink_family:
keys = set(self._sink_column_names)
else:
keys = set()
keys_ND = set()
for key in keys:
keys_ND.add(self._array_name_1D_to_ND(key) or key)
return list(keys_ND)
def _read_grafic_file(self, filename, target_buffer, data_type):
with FortranFile(filename) as f:
h = f.read_field(genic_header)
length = self._dlen * data_type.itemsize
alen = f.get_raw_memmapped(util._head_type)
if alen != length:
raise IOError("Unexpected FORTRAN block length %d!=%d" % (alen, length))
for readlen, buf_index, mem_index in (self._load_control.iterate_with_interrupts(family.dm, family.dm,
np.arange(
1, h['nz']) * (
h['nx'] * h['ny']),
functools.partial(
_midway_fortran_skip, f,
length))):
if buf_index is not None:
re = f.get_raw_memmapped(data_type, readlen)
target_buffer[mem_index] = re[buf_index]
else:
f.seek(data_type.itemsize * readlen)
alen = f.get_raw_memmapped(util._head_type)
if alen != length:
raise IOError("Unexpected FORTRAN block length (tail) %d!=%d" % (alen, length))
with open(self._filename,'r') as f:
if not self._header: f.readline()
rs=[]
for array_name in array_name_list:
self._create_array(array_name, ndim=1)
rs = [self[array_name] for array_name in array_name_list]
cols = [self._loadable_keys.index(array_name) for array_name in array_name_list]
ncols = len(self._loadable_keys)
buf_shape = _max_buf
b = np.empty(buf_shape)
for readlen, buf_index, mem_index in self._load_control.iterate([family.dm],[family.dm]):
b = np.fromfile(f, count=readlen*ncols, sep=' ').reshape((-1,ncols))
if mem_index is not None:
for r,col in zip(rs,cols):
r[mem_index] = b[buf_index,col]
def _load_sink_data_to_temporary_store(self):
if not os.path.exists(self._sink_filename()):
self._after_load_sink_data_failure(warn=False)
return
self._sink_family = family.get_family(config_parser.get('ramses', 'type-sink'))
with open(self._sink_filename(),"r") as sink_file:
reader = csv.reader(sink_file, skipinitialspace=True)
data = list(reader)
if len(data)<2:
self._after_load_sink_data_failure()
return
column_names = data[0]
dimensions = data[1]
data = np.array(data[2:], dtype=object)
if column_names[0][0]!='#' or dimensions[0][0]!='#':
self._after_load_sink_data_failure()
return