Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# if it is not 6
N_TYPE = 6
_type_map = backcompat.OrderedDict({})
for name, gtypes in config_parser.items('gadget-type-mapping'):
try:
gtypes = np.array([int(q) for q in gtypes.split(",")])
if (gtypes >= N_TYPE).any() or (gtypes < 0).any():
raise ValueError(
"Type specified for family " + name + " is out of bounds (" + gtypes + ").")
_type_map[family.get_family(name)] = gtypes
except ConfigParser.NoOptionError:
pass
_name_map, _rev_name_map = util.setup_name_maps(
'gadget-name-mapping', gadget_blocks=True)
_translate_array_name = util.name_map_function(_name_map, _rev_name_map)
def _to_raw(s):
if isinstance(s, str) and sys.version_info[0] > 2:
return s.encode('utf-8')
else:
return s
def gadget_type(fam):
if isinstance(fam, list):
l = []
for sf in fam:
l.extend(gadget_type(sf))
def _load_ahf_halos(self, filename):
f = util.open_(filename,"rt")
# get all the property names from the first, commented line
# remove (#)
keys = [re.sub('\([0-9]*\)', '', field)
for field in f.readline().split()]
# provide translations
for i, key in enumerate(keys):
if self.isnew:
if(key == '#npart'):
keys[i] = 'npart'
else:
if(key == '#'):
keys[i] = 'dumb'
if(key == 'a'):
keys[i] = 'a_axis'
if(key == 'b'):
keys[i] = 'b_axis'
def precalculate(self):
"""Speed up future operations by precalculating the indices
for all halos in one operation. This is slow compared to
getting a single halo, however."""
self._sorted = np.argsort(
self.base[self._array], kind='mergesort') # mergesort for stability
self._boundaries = util.find_boundaries(
self.base[self._array][self._sorted])
def _get_particles_for_halo(self, num):
self._init_iord_to_fpos()
with util.open_(self._rsFilename, 'rb') as f:
f.seek(self._halo_offsets[num-self._halo_min])
halo_ptcls=np.fromfile(f,dtype=np.int64,count=self._halo_lens[num-self._halo_min])
halo_ptcls = self._iord_to_fpos[halo_ptcls]
halo_ptcls.sort()
return halo_ptcls
if alen != length:
raise IOError("Unexpected FORTRAN block length %d!=%d" % (alen, length))
for readlen, buf_index, mem_index in (self._load_control.iterate_with_interrupts(family.dm, family.dm,
np.arange(
1, h['nz']) * (
h['nx'] * h['ny']),
functools.partial(
_midway_fortran_skip, f,
length))):
if buf_index is not None:
re = f.get_raw_memmapped(data_type, readlen)
target_buffer[mem_index] = re[buf_index]
else:
f.seek(data_type.itemsize * readlen)
alen = f.get_raw_memmapped(util._head_type)
if alen != length:
raise IOError("Unexpected FORTRAN block length (tail) %d!=%d" % (alen, length))
ourlen_1 = (self._load_control.disk_num_particles)& 0xffffffffL
ourlen_3 = (self._load_control.disk_num_particles*3)& 0xffffffffL
if buflen == ourlen_1: # it's a vector
return True
elif buflen == ourlen_3: # it's an array
return True
else:
return False
except IOError:
return False
import glob
fs = map(util.cutgz, glob.glob(self._filename + ".*"))
res = map(lambda q: q[len(self._filename) + 1:],
filter(is_readable_array, fs))
# Create an empty dictionary of sets to store the loadable
# arrays for each family
rdict = dict([(x, set()) for x in self.families()])
rdict.update(dict([a, copy.copy(b)]
for a, b in self._basic_loadable_keys.iteritems() if a is not None))
# Now work out which families can load which arrays
# according to the stored metadata
for r in res:
fams = self._get_loadable_array_metadata(r)[1]
for x in fams or rdict.keys():
rdict[x].add(r)
self._loadable_keys_registry = rdict
def _set_array(self, name, value, index=None):
if name in self.base.keys():
self.base._set_array(
name, value, util.concatenate_indexing(self._slice, index))
else:
self.base._set_family_array(name, self._unifamily, value, index)
if alen!=length :
raise IOError, "Unexpected FORTRAN block length %d!=%d"%(alen,length)
readpos = 0
for readlen, buf_index, mem_index in (self._load_control.iterate_with_interrupts(family.dm, family.dm,
np.arange(1,h['nz'])*(h['nx']*h['ny']),
functools.partial(_midway_fortran_skip, f, length))) :
if buf_index is not None :
re = np.fromfile(f, _data_type ,readlen)
vel[mem_index] = re[buf_index]*ratio
else :
f.seek(_data_type.itemsize*readlen, 1)
alen = np.fromfile(f, util._head_type, 1)
if alen!=length :
raise IOError, "Unexpected FORTRAN block length (tail) %d!=%d"%(alen,length)
else :
raise IOError, "No such array"
# work out what to read second
len_post = nread_disk-len_pre
d_slice_post = copy.copy(util.concatenate_indexing(disk_slice, slice(len_m_pre, util.indexing_length(mem_slice))))
# the copy above is necessary to ensure we don't end up inadvertently modifying
# list of offsets somewhere else
# that's the disk slice relative to having read the whole thing continuously.
# Offset to reflect what we've missed.
if isinstance(d_slice_post, slice) :
d_slice_post = slice(d_slice_post.start-len_pre, d_slice_post.stop-len_pre, d_slice_post.step)
else :
d_slice_post-=len_pre
len_m_post = util.indexing_length(d_slice_post)
m_slice_post = util.concatenate_indexing(mem_slice, slice(len_m_pre, len_m_pre+len_m_post))
if util.indexing_length(d_slice_post)==0 :
d_slice_post=None
m_slice_post=None
if util.indexing_length(d_slice_pre)==0 :
d_slice_pre=None
m_slice_pre=None
else :
d_slice_pre = None
m_slice_pre = None
len_post = nread_disk-len_pre
d_slice_post = None
m_slice_post = None
def _get_family_slice(self, fam):
sl = util.relative_slice(self._slice,
util.intersect_slices(self._slice, self.base._get_family_slice(fam), len(self.base)))
return sl