Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
np.testing.assert_almost_equal(header['tot'], dd.tot)
out_file_name5 = os.path.join('output','nn_out5.fits')
del dd.xi # Equivalent to not having called calculateXi
del dd.varxi
dd.write(out_file_name5)
data = fitsio.read(out_file_name5)
np.testing.assert_almost_equal(data['r_nom'], np.exp(dd.logr))
np.testing.assert_almost_equal(data['meanr'], dd.meanr)
np.testing.assert_almost_equal(data['meanlogr'], dd.meanlogr)
np.testing.assert_almost_equal(data['DD'], dd.npairs)
assert 'xi' not in data.dtype.names
assert 'varxi' not in data.dtype.names
assert 'RR' not in data.dtype.names
assert 'DR' not in data.dtype.names
header = fitsio.read_header(out_file_name5, 1)
np.testing.assert_almost_equal(header['tot'], dd.tot)
with assert_raises(TypeError):
dd.write(out_file_name3, dr=dr)
with assert_raises(TypeError):
dd.write(out_file_name3, rd=dr)
# Check the read function
dd.calculateXi(rr,dr) # gets xi, varxi back in dd
dd2 = treecorr.NNCorrelation(bin_size=0.1, min_sep=1., max_sep=25., sep_units='arcmin')
dd2.read(out_file_name1)
np.testing.assert_almost_equal(dd2.logr, dd.logr)
np.testing.assert_almost_equal(dd2.meanr, dd.meanr)
np.testing.assert_almost_equal(dd2.meanlogr, dd.meanlogr)
np.testing.assert_almost_equal(dd2.npairs, dd.npairs)
np.testing.assert_almost_equal(dd2.tot, dd.tot)
corr2_outfile = os.path.join('output','nn_3d.fits')
corr2_output = fitsio.read(corr2_outfile)
print('xi = ',xi)
print('from corr2 output = ',corr2_output['xi'])
print('ratio = ',corr2_output['xi']/xi)
print('diff = ',corr2_output['xi']-xi)
np.testing.assert_almost_equal(corr2_output['r_nom'], np.exp(dd.logr))
np.testing.assert_almost_equal(corr2_output['meanr'], dd.meanr)
np.testing.assert_almost_equal(corr2_output['meanlogr'], dd.meanlogr)
np.testing.assert_almost_equal(corr2_output['xi'], xi)
np.testing.assert_almost_equal(corr2_output['sigma_xi'], np.sqrt(varxi))
np.testing.assert_almost_equal(corr2_output['DD'], dd.npairs)
np.testing.assert_almost_equal(corr2_output['RR'], rr.npairs * (dd.tot / rr.tot))
np.testing.assert_almost_equal(corr2_output['DR'], dr.npairs * (dd.tot / dr.tot))
header = fitsio.read_header(corr2_outfile, 1)
np.testing.assert_almost_equal(header['tot'], dd.tot)
# And repeat with Catalogs that use x,y,z
cat = treecorr.Catalog(x=x, y=y, z=z)
rand = treecorr.Catalog(x=rx, y=ry, z=rz)
dd.process(cat)
rr.process(rand)
dr.process(cat,rand)
xi, varxi = dd.calculateXi(rr,dr)
np.testing.assert_allclose(xi, true_xi, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(xi)), np.log(np.abs(true_xi)),
rtol=0.1*tol_factor)
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['zeta'], zeta.flatten())
np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(data['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(data['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
header = fitsio.read_header(out_file_name2, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
# Check the read function
# Note: These don't need the flatten. The read function should reshape them to the right shape.
ddd2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
ddd2.read(out_file_name1)
np.testing.assert_almost_equal(ddd2.logr, ddd.logr)
np.testing.assert_almost_equal(ddd2.u, ddd.u)
np.testing.assert_almost_equal(ddd2.v, ddd.v)
np.testing.assert_almost_equal(ddd2.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd2.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd2.meanlogd2, ddd.meanlogd2)
typeinfo = filetype_short(infile)
if typeinfo is None:
logging.debug('Could not determine file type of "%s"' % infile)
return None
# print('uncompress_file: type is', typeinfo)
(ext,cmd) = get_cmd(typeinfo, compcmds)
# print('ext:', ext)
if ext is None:
# Check for fpack compressed FITS file.
if fitstype in typeinfo:
# FITS file. Check header for ZIMAGE=T
try:
import fitsio
logging.debug('Checking FITS header of', infile, 'ext',
extension, 'for ZIMAGE card (fpack compression)')
hdr = fitsio.read_header(infile, ext=extension)
if hdr.get('ZIMAGE', False):
# Compressed
cmd = (funpack_cmd % (
extension or 0,
shell_escape(infile), shell_escape(uncompressed)))
logging.debug('Fpack compressed; uncompressing with', cmd)
if os.system(cmd) == 0:
return 'fz'
except:
pass
logging.debug('File is not compressed: "%s"' % '/'.join(typeinfo))
return None
assert uncompressed != infile
logging.debug('Compressed file (type %s), dumping to: "%s"' % (ext, uncompressed))
do_command(cmd % (shell_escape(infile), shell_escape(uncompressed)))
return ext
Parameters:
-----------
filenames : list of filenames to merge
outfile : name of outfile to create
kwargs : kwargs passed to fitsio.read
Returns:
--------
None
"""
import fitsio
import ugali.utils.fileio
filenames = np.atleast_1d(filenames)
header = fitsio.read_header(filenames[0],ext=kwargs.get('ext',1))
nside = header['NSIDE']
data = ugali.utils.fileio.load_files(filenames,**kwargs)
pix = data['PIXEL']
ndupes = len(pix) - len(np.unique(pix))
if ndupes > 0:
msg = '%i duplicate pixels during load.'%(ndupes)
raise Exception(msg)
extname = 'DISTANCE_MODULUS'
distance = ugali.utils.fileio.load_files(filenames,ext=extname)[extname]
unique_distance = np.unique(distance)
# Check if distance moduli are the same...
if np.any(distance[:len(unique_distance)] != unique_distance):
msg = "Non-matching distance modulus:"
msg += '\n'+str(distance[:len(unique_distance)])
def read_image_primary_header(self, **kwargs):
return fitsio.read_header(self.imgfn)
def fromFits(fn):
from astrometry.util.fits import fits_table
import fitsio
hdr = fitsio.read_header(fn, ext=1)
T = fits_table(fn)
assert(len(T) == 1)
# for col in T.get_columns():
# if col.strip() != col:
# T.rename_column(col, col.strip())
T = T[0]
t = hdr['PSFEX_T'].strip()
# print 'Type:', t
assert(t == 'tractor.psfex.PsfEx')
psft = hdr['PSF_TYPE']
knowntypes = dict([(typestring(x), x)
for x in [GaussianMixturePSF,
GaussianMixtureEllipsePSF]])
psft = knowntypes[psft]
# print 'PSF type:', psft
except:
print 'coadd_wise failed:'
import traceback
traceback.print_exc()
print 'time up to failure:'
t2 = Time()
print t2 - t1
return
t2 = Time()
print 'coadd_wise:'
print t2 - t1
f,wcsfn = tempfile.mkstemp()
os.close(f)
cowcs.write_to(wcsfn)
hdr = fitsio.read_header(wcsfn)
os.remove(wcsfn)
hdr.add_record(dict(name='MAGZP', value=22.5, comment='Magnitude zeropoint (in Vega mag)'))
hdr.add_record(dict(name='UNW_SKY', value=cosky,
comment='Background value subtracted from coadd img'))
hdr.add_record(dict(name='UNW_VER', value=version['Revision'],
comment='unWISE code SVN revision'))
hdr.add_record(dict(name='UNW_DVER', value=1,
comment='unWISE data model version'))
hdr.add_record(dict(name='UNW_URL', value=version['URL'], comment='SVN URL'))
hdr.add_record(dict(name='UNW_DATE', value=datetime.datetime.now().isoformat(),
comment='unWISE run time'))
hdr.add_record(dict(name='UNW_FR0', value=frame0, comment='unWISE frame start'))
hdr.add_record(dict(name='UNW_FRN', value=nframes, comment='unWISE N frames'))
hdr.add_record(dict(name='UNW_MEDF', value=medfilt, comment='unWISE median filter sz'))