How to use the treecorr.NNCorrelation function in TreeCorr

To help you get started, we’ve selected a few TreeCorr examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rmjarvis / TreeCorr / tests / test_rperp.py View on Github external
# The edge bins may differ slightly from the binning approximations (bin_slop and such),
    # but the differences should be very small.  (When Erika reported the problem, the differences
    # were a few percent, which ended up making a bit difference in the correlation function.)
    np.testing.assert_allclose(dd1.npairs, dd2.npairs[2:-2], rtol=1.e-6)

    if __name__ == '__main__':
        # If we're running from the command line, go ahead and finish the calculation
        # This catalog has 10^6 objects, which takes quite a while.  I should really investigate
        # how to speed up the Rperp distance calculation.  Probably by having a faster over-
        # and under-estimate first, and then only do the full calculation when it seems like we
        # will actually need it.
        # Anyway, until then, let's not take forever by using last_row=200000
        get_from_wiki('nn_perp_rand.dat')
        rcat = treecorr.Catalog('data/nn_perp_rand.dat', config, last_row=200000)

        rr1 = treecorr.NNCorrelation(config)
        rr1.process(rcat, metric='OldRperp')
        rr2 = treecorr.NNCorrelation(config, min_sep=lower_min_sep, nbins=more_nbins)
        rr2.process(rcat, metric='OldRperp')
        print('rr1 npairs = ',rr1.npairs)
        print('rr2 npairs = ',rr2.npairs[2:-2])
        np.testing.assert_allclose(rr1.npairs, rr2.npairs[2:-2], rtol=1.e-6)

        dr1 = treecorr.NNCorrelation(config)
        dr1.process(dcat, rcat, metric='OldRperp')
        dr2 = treecorr.NNCorrelation(config, min_sep=lower_min_sep, nbins=more_nbins)
        dr2.process(dcat, rcat, metric='OldRperp')
        print('dr1 npairs = ',dr1.npairs)
        print('dr2 npairs = ',dr2.npairs[2:-2])
        np.testing.assert_allclose(dr1.npairs, dr2.npairs[2:-2], rtol=1.e-6)

        xi1, varxi1 = dd1.calculateXi(rr1, dr1)
github rmjarvis / TreeCorr / tests / test_twod.py View on Github external
print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
    np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)

    xi_brut = corr2d(x, y, np.ones_like(kappa), kappa, w=1./kappa_err**2, rmax=max_sep, bins=nbins)
    # Check very small, but non-zeo min_sep
    nk = treecorr.NKCorrelation(min_sep=1.e-6, max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nk.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(nk.xi - xi_brut)))
    print('max rel diff = ',np.max(np.abs(nk.xi - xi_brut)/np.abs(nk.xi)))
    np.testing.assert_allclose(nk.xi, xi_brut, atol=1.e-7)

    # Check NN
    xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    nn.process(cat1, cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             w=1./kappa_err**2, rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat2)
    print('max abs diff = ',np.max(np.abs(nn.weight - counts)))
    print('max rel diff = ',np.max(np.abs(nn.weight - counts)/np.abs(nn.weight)))
github rmjarvis / TreeCorr / tests / test_twod.py View on Github external
xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    nn.process(cat1, cat1)
    print('max abs diff = ',np.max(np.abs(nn.npairs - counts)))
    print('max rel diff = ',np.max(np.abs(nn.npairs - counts)/np.abs(nn.npairs)))
    np.testing.assert_allclose(nn.npairs, counts, atol=1.e-7)

    xi_brut, counts = corr2d(x, y, np.ones_like(kappa), np.ones_like(kappa),
                             w=1./kappa_err**2, rmax=max_sep, bins=nbins, return_counts=True)
    nn = treecorr.NNCorrelation(max_sep=max_sep, nbins=nbins, bin_type='TwoD', brute=True)
    nn.process(cat2)
    print('max abs diff = ',np.max(np.abs(nn.weight - counts)))
    print('max rel diff = ',np.max(np.abs(nn.weight - counts)/np.abs(nn.weight)))
    np.testing.assert_allclose(nn.weight, counts, atol=1.e-7)

    nn.process(cat2, cat2)
    print('max abs diff = ',np.max(np.abs(nn.weight - counts)))
    print('max rel diff = ',np.max(np.abs(nn.weight - counts)/np.abs(nn.weight)))
    np.testing.assert_allclose(nn.weight, counts, atol=1.e-7)

    # The other two, NG and KG can't really be checked with the brute force
    # calculator we have here, so we're counting on the above being a sufficient
    # test of all aspects of the twod binning.  I think that it is sufficient, but I
    # admit I would prefer if we had a real test of these other two pairs, along with
    # xi- for GG.
github rmjarvis / TreeCorr / tests / test_patch.py View on Github external
C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1)
    varxi = np.diagonal(C)
    print('NG with randoms:')
    print('treecorr jackknife varxi = ',ng.varxi)
    print('direct jackknife varxi = ',varxi)
    np.testing.assert_allclose(ng.varxi, varxi)

    # Finally, test NN, which is complicated, since several different combinations of randoms.
    # 1. (DD-RR)/RR
    # 2. (DD-2DR+RR)/RR
    # 3. (DD-2RD+RR)/RR
    # 4. (DD-DR-RD+RR)/RR
    dd = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0,
                                var_method='jackknife')
    dd.process(lens_cat, source_cat)
    rr = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0,
                                var_method='jackknife')
    rr.process(rand_lens_cat, rand_source_cat)
    rd = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0,
                                var_method='jackknife')
    rd.process(rand_lens_cat, source_cat)
    dr = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0,
                                var_method='jackknife')
    dr.process(lens_cat, rand_source_cat)

    # Now do this using brute force calculation.
    xi1_list = []
    xi2_list = []
    xi3_list = []
    xi4_list = []
    for i in range(npatch):
        lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
github rmjarvis / TreeCorr / tests / test_ng.py View on Github external
np.testing.assert_allclose(nmx2, 0, atol=5.e-3)

    # Can also skip the randoms (even if listed in the file)
    config['ng_statistic'] = 'simple'
    config['nn_statistic'] = 'simple'  # For the later Norm tests
    treecorr.corr2(config)
    corr2_output = np.genfromtxt(os.path.join('output','ng_nmap.out'), names=True)
    np.testing.assert_allclose(corr2_output['NMap'], nmap, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['NMx'], nmx, atol=1.e-3)
    np.testing.assert_allclose(corr2_output['sig_nmap'], np.sqrt(varnmap), rtol=1.e-3)

    # And check the norm output file, which adds a few columns
    dd = treecorr.NNCorrelation(bin_size=0.1, min_sep=0.5, max_sep=40., sep_units='arcmin',
                                verbose=1)
    dd.process(lens_cat)
    rr = treecorr.NNCorrelation(bin_size=0.1, min_sep=0.5, max_sep=40., sep_units='arcmin',
                                verbose=1)
    rr.process(rand_cat)
    gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=0.5, max_sep=40., sep_units='arcmin',
                                verbose=1)
    gg.process(source_cat)
    napsq, varnap = dd.calculateNapSq(rr=rr)
    mapsq, mapsq_im, mxsq, mxsq_im, varmap = gg.calculateMapSq(m2_uform='Crittenden')
    nmap_norm = nmap**2 / napsq / mapsq
    napsq_mapsq = napsq / mapsq
    corr2_output = np.genfromtxt(os.path.join('output','ng_norm.out'), names=True)
    np.testing.assert_allclose(corr2_output['NMap'], nmap, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['NMx'], nmx, atol=1.e-3)
    np.testing.assert_allclose(corr2_output['sig_nmap'], np.sqrt(varnmap), rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['Napsq'], napsq, rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['sig_napsq'], np.sqrt(varnap), rtol=1.e-3)
    np.testing.assert_allclose(corr2_output['Mapsq'], mapsq, rtol=1.e-3)
github rmjarvis / TreeCorr / tests / test_nn.py View on Github external
np.testing.assert_almost_equal(nn.logr[-1], math.log(nn.max_sep) - 0.5*nn.bin_size)
    assert len(nn.logr) == nn.nbins

    # Omit min_sep
    nn = treecorr.NNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
    print(nn.min_sep,nn.max_sep,nn.bin_size,nn.nbins)
    assert nn.bin_size == 0.1
    assert nn.max_sep == 20.
    assert nn.nbins == 20
    np.testing.assert_almost_equal(nn.bin_size * nn.nbins, math.log(nn.max_sep/nn.min_sep))
    np.testing.assert_almost_equal(nn.logr[0], math.log(nn.min_sep) + 0.5*nn.bin_size)
    np.testing.assert_almost_equal(nn.logr[-1], math.log(nn.max_sep) - 0.5*nn.bin_size)
    assert len(nn.logr) == nn.nbins

    # Omit max_sep
    nn = treecorr.NNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
    print(nn.min_sep,nn.max_sep,nn.bin_size,nn.nbins)
    assert nn.bin_size == 0.1
    assert nn.min_sep == 5.
    assert nn.nbins == 20
    np.testing.assert_almost_equal(nn.bin_size * nn.nbins, math.log(nn.max_sep/nn.min_sep))
    np.testing.assert_almost_equal(nn.logr[0], math.log(nn.min_sep) + 0.5*nn.bin_size)
    np.testing.assert_almost_equal(nn.logr[-1], math.log(nn.max_sep) - 0.5*nn.bin_size)
    assert len(nn.logr) == nn.nbins

    # Omit nbins
    nn = treecorr.NNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
    print(nn.min_sep,nn.max_sep,nn.bin_size,nn.nbins)
    assert nn.bin_size <= 0.1
    assert nn.min_sep == 5.
    assert nn.max_sep == 20.
    np.testing.assert_almost_equal(nn.bin_size * nn.nbins, math.log(nn.max_sep/nn.min_sep))
github rmjarvis / TreeCorr / tests / test_index.py View on Github external
y2 = rng.random_sample(nobj)
    z2 = rng.random_sample(nobj)
    w2 = rng.random_sample(nobj)
    use = rng.randint(30, size=nobj).astype(float)
    w2[use == 0] = 0
    g12 = rng.random_sample(nobj)
    g22 = rng.random_sample(nobj)
    k2 = rng.random_sample(nobj)

    # Start with flat coords

    cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21, k=k1, keep_zero_weight=True)
    cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22, k=k2, keep_zero_weight=True)

    # Note: extend range low enough that some bins have < 100 pairs.
    nn = treecorr.NNCorrelation(min_sep=0.001, max_sep=0.01, bin_size=0.1, max_top=0)
    nn.process(cat1, cat2)
    print('rnom = ',nn.rnom)
    print('npairs = ',nn.npairs.astype(int))

    # Start with a bin near the bottom with < 100 pairs
    # This only exercises case 1 in the sampleFrom function.
    b = 1
    i1, i2, sep = nn.sample_pairs(100, cat1, cat2,
                                  min_sep=nn.left_edges[b], max_sep=nn.right_edges[b])

    print('i1 = ',i1)
    print('i2 = ',i2)
    print('sep = ',sep)
    assert nn.npairs[b] <= 100  # i.e. make sure these next tests are what we want to do.
    assert len(i1) == nn.npairs[b]
    assert len(i2) == nn.npairs[b]
github rmjarvis / TreeCorr / tests / test_nn.py View on Github external
nn = treecorr.NNCorrelation(min_sep=0, max_sep=20, bin_size=1, bin_slop=0.0,
                                bin_type='Linear')
    print(nn.bin_size,nn.bin_slop,nn.b)
    assert nn.bin_size == 1
    assert nn.bin_slop == 0.0
    assert nn.b == 0.0

    nn = treecorr.NNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=2.0, verbose=0,
                                bin_type='Linear')
    print(nn.bin_size,nn.bin_slop,nn.b)
    assert nn.bin_size == 0.1
    assert nn.bin_slop == 2.0
    np.testing.assert_almost_equal(nn.b, 0.01)

    nn = treecorr.NNCorrelation(min_sep=0, max_sep=20, bin_size=0.4, bin_type='Linear')
    print(nn.bin_size,nn.bin_slop,nn.b)
    assert nn.bin_size == 0.4
    np.testing.assert_almost_equal(nn.b, 0.001)
    np.testing.assert_almost_equal(nn.bin_slop, 0.05)

    nn = treecorr.NNCorrelation(min_sep=5, max_sep=20, bin_size=0.05, bin_type='Linear')
    print(nn.bin_size,nn.bin_slop,nn.b)
    assert nn.bin_size == 0.05
    np.testing.assert_almost_equal(nn.bin_slop, 1)
    np.testing.assert_almost_equal(nn.b, 0.0025)
github rmjarvis / TreeCorr / tests / test_patch.py View on Github external
xi3_list = []
    xi4_list = []
    for i in range(npatch):
        lens_cat1 = treecorr.Catalog(x=lens_cat.x[lens_cat.patch != i],
                                     y=lens_cat.y[lens_cat.patch != i])
        source_cat1 = treecorr.Catalog(x=source_cat.x[source_cat.patch != i],
                                       y=source_cat.y[source_cat.patch != i])
        rand_lens_cat1 = treecorr.Catalog(x=rand_lens_cat.x[rand_lens_cat.patch != i],
                                          y=rand_lens_cat.y[rand_lens_cat.patch != i])
        rand_source_cat1 = treecorr.Catalog(x=rand_source_cat.x[rand_source_cat.patch != i],
                                            y=rand_source_cat.y[rand_source_cat.patch != i])
        dd1 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0)
        dd1.process(lens_cat1, source_cat1)
        rr1 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0)
        rr1.process(rand_lens_cat1, rand_source_cat1)
        rd1 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0)
        rd1.process(rand_lens_cat1, source_cat1)
        dr1 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0)
        dr1.process(lens_cat1, rand_source_cat1)
        xi1_list.append(dd1.calculateXi(rr1)[0])
        xi2_list.append(dd1.calculateXi(rr1,dr=dr1)[0])
        xi3_list.append(dd1.calculateXi(rr1,rd=rd1)[0])
        xi4_list.append(dd1.calculateXi(rr1,dr=dr1,rd=rd1)[0])

    print('(DD-RR)/RR')
    xi1_list = np.array(xi1_list)
    xi1, varxi1 = dd.calculateXi(rr)
    varxi = np.diagonal(np.cov(xi1_list.T, bias=True)) * (len(xi1_list)-1)
    print('treecorr jackknife varxi = ',varxi1)
    print('direct jackknife varxi = ',varxi)
    np.testing.assert_allclose(dd.varxi, varxi)
github rmjarvis / TreeCorr / tests / test_nn.py View on Github external
ncats = 3
    data_cats = []
    rand_cats = []

    s = 10.
    L = 50. * s

    x = rng.normal(0,s, (nobj,ncats) )
    y = rng.normal(0,s, (nobj,ncats) )
    data_cats = [ treecorr.Catalog(x=x[:,k],y=y[:,k]) for k in range(ncats) ]
    rx = (rng.random_sample((nobj,ncats))-0.5) * L
    ry = (rng.random_sample((nobj,ncats))-0.5) * L
    rand_cats = [ treecorr.Catalog(x=rx[:,k],y=ry[:,k]) for k in range(ncats) ]

    dd = treecorr.NNCorrelation(bin_size=0.1, min_sep=1., max_sep=25., verbose=1)
    dd.process(data_cats)
    print('dd.npairs = ',dd.npairs)

    rr = treecorr.NNCorrelation(bin_size=0.1, min_sep=1., max_sep=25., verbose=1)
    rr.process(rand_cats)
    print('rr.npairs = ',rr.npairs)

    xi, varxi = dd.calculateXi(rr)
    print('xi = ',xi)

    # Now do the same thing with one big catalog for each.
    ddx = treecorr.NNCorrelation(bin_size=0.1, min_sep=1., max_sep=25., verbose=1)
    rrx = treecorr.NNCorrelation(bin_size=0.1, min_sep=1., max_sep=25., verbose=1)
    data_catx = treecorr.Catalog(x=x.reshape( (nobj*ncats,) ), y=y.reshape( (nobj*ncats,) ))
    rand_catx = treecorr.Catalog(x=rx.reshape( (nobj*ncats,) ), y=ry.reshape( (nobj*ncats,) ))
    ddx.process(data_catx)