How to use the coffea.util.numpy.zeros function in coffea

To help you get started, we’ve selected a few coffea examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github CoffeaTeam / coffea / tests / test_lumi_tools.py View on Github external
def test_lumilist():
    lumidata = LumiData("tests/samples/lumi_small.csv")
    
    runslumis1 = np.zeros((10,2), dtype=np.uint32)
    runslumis1[:, 0] = lumidata._lumidata[0:10, 0]
    runslumis1[:, 1] = lumidata._lumidata[0:10, 1]

    runslumis2 = np.zeros((10,2), dtype=np.uint32)
    runslumis2[:, 0] = lumidata._lumidata[10:20, 0]
    runslumis2[:, 1] = lumidata._lumidata[10:20, 1]

    llist1 = LumiList(runs=runslumis1[:, 0], lumis=runslumis1[:, 1])
    llist2 = LumiList(runs=runslumis2[:, 0], lumis=runslumis2[:, 1])
    llist3 = LumiList()

    llist3 += llist1
    llist3 += llist2

    lumi1 = lumidata.get_lumi(llist1)
    lumi2 = lumidata.get_lumi(llist2)
github CoffeaTeam / coffea / tests / test_lumi_tools.py View on Github external
print("lumi:", l, "diff:", diff)
    assert(diff < 1e-4)

    # test build_lumi_table_kernel
    py_index = Dict.empty(
        key_type=types.Tuple([types.uint32, types.uint32]),
        value_type=types.float64
    )
    pyruns = lumidata._lumidata[:, 0].astype('u4')
    pylumis = lumidata._lumidata[:, 1].astype('u4')
    LumiData._build_lumi_table_kernel.py_func(pyruns, pylumis, lumidata._lumidata, py_index)

    assert(len(py_index) == len(lumidata.index))

    # test get_lumi_kernel
    py_tot_lumi = np.zeros((1, ), dtype=np.float64)
    LumiData._get_lumi_kernel.py_func(runslumis[:, 0], runslumis[:, 1], py_index, py_tot_lumi)

    assert(abs(py_tot_lumi[0] - l) < 1e-4)
github CoffeaTeam / coffea / coffea / lumi_tools / lumi_tools.py View on Github external
def clear(self):
        """Clear current lumi list"""
        self.array = np.zeros(shape=(0, 2))
github CoffeaTeam / coffea / coffea / hist / hist_tools.py View on Github external
def dense_op(array):
            anew = np.zeros(out._dense_shape, dtype=out._dtype)
            for iold, inew in enumerate(binmap):
                anew[view_ax(inew)] += array[view_ax(iold)]
            return anew
github CoffeaTeam / coffea / coffea / lookup_tools / txt_converters.py View on Github external
offset_col = 0
    offset_name = 1
    bin_order = []
    for i in range(nBinnedVars):
        binMins = None
        binMaxs = None
        if i == 0:
            binMins = np.unique(pars[columns[0]])
            binMaxs = np.unique(pars[columns[1]])
            if np.all(binMins[1:] == binMaxs[:-1]):
                bins[layout[i + offset_name]] = np.union1d(binMins, binMaxs)
            else:
                warnings.warn('binning for file for %s is malformed in variable %s' % (name, layout[i + offset_name]))
                bins[layout[i + offset_name]] = np.union1d(binMins, binMaxs[-1:])
        else:
            counts = np.zeros(0, dtype=np.int)
            allBins = np.zeros(0, dtype=np.double)
            for binMin in bins[bin_order[0]][:-1]:
                binMins = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i + offset_col]])
                binMaxs = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i + offset_col + 1]])
                theBins = None
                if np.all(binMins[1:] == binMaxs[:-1]):
                    theBins = np.union1d(binMins, binMaxs)
                else:
                    warnings.warn('binning for file for %s is malformed in variable %s' % (name, layout[i + offset_name]))
                    theBins = np.union1d(binMins, binMaxs[-1:])
                allBins = np.append(allBins, theBins)
                counts = np.append(counts, theBins.size)
            bins[layout[i + offset_name]] = awkward.JaggedArray.fromcounts(counts, allBins)
        bin_order.append(layout[i + offset_name])
        offset_col += 1
github CoffeaTeam / coffea / coffea / lookup_tools / txt_converters.py View on Github external
offset_name = 1
    bin_order = []
    for i in range(nBinnedVars):
        binMins = None
        binMaxs = None
        if i == 0:
            binMins = np.unique(pars[columns[0]])
            binMaxs = np.unique(pars[columns[1]])
            if np.all(binMins[1:] == binMaxs[:-1]):
                bins[layout[i + offset_name]] = np.union1d(binMins, binMaxs)
            else:
                warnings.warn('binning for file for %s is malformed in variable %s' % (name, layout[i + offset_name]))
                bins[layout[i + offset_name]] = np.union1d(binMins, binMaxs[-1:])
        else:
            counts = np.zeros(0, dtype=np.int)
            allBins = np.zeros(0, dtype=np.double)
            for binMin in bins[bin_order[0]][:-1]:
                binMins = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i + offset_col]])
                binMaxs = np.unique(pars[np.where(pars[columns[0]] == binMin)][columns[i + offset_col + 1]])
                theBins = None
                if np.all(binMins[1:] == binMaxs[:-1]):
                    theBins = np.union1d(binMins, binMaxs)
                else:
                    warnings.warn('binning for file for %s is malformed in variable %s' % (name, layout[i + offset_name]))
                    theBins = np.union1d(binMins, binMaxs[-1:])
                allBins = np.append(allBins, theBins)
                counts = np.append(counts, theBins.size)
            bins[layout[i + offset_name]] = awkward.JaggedArray.fromcounts(counts, allBins)
        bin_order.append(layout[i + offset_name])
        offset_col += 1

    # skip nvars to the variable columns
github CoffeaTeam / coffea / coffea / lookup_tools / dense_evaluated_lookup.py View on Github external
self._dimension = len(dims)
        if self._dimension == 0:
            raise Exception('Could not define dimension for {}'.format(whattype))
        self._axes = deepcopy(dims)
        self._feval_dim = None
        vals_are_strings = ('string' in values.dtype.name or
                            'str' in values.dtype.name or
                            'unicode' in values.dtype.name or
                            'bytes' in values.dtype.name)  # ....
        if not isinstance(values, np.ndarray):
            raise TypeError('values is not a numpy array, but %r' % type(values))
        if not vals_are_strings:
            raise Exception('Non-string values passed to dense_evaluated_lookup!')
        if feval_dim is None:
            raise Exception('Evaluation dimensions not specified in dense_evaluated_lookup')
        funcs = np.zeros(shape=values.shape, dtype='O')
        for i in range(values.size):
            idx = np.unravel_index(i, shape=values.shape)
            funcs[idx] = numbaize(values[idx], ['x'])
        self._values = deepcopy(funcs)
        # TODO: support for multidimensional functions and functions with variables other than 'x'
        if len(feval_dim) > 1:
            raise Exception('lookup_tools.evaluator only accepts 1D functions right now!')
        self._feval_dim = feval_dim[0]
github CoffeaTeam / coffea / coffea / lookup_tools / csv_converters.py View on Github external
)

    all_names = corrections[[columns[i] for i in range(4)]]
    labels = np.unique(corrections[[columns[i] for i in range(4)]])
    wrapped_up = {}
    for label in labels:
        etaMins = np.unique(corrections[np.where(all_names == label)][columns[4]])
        etaMaxs = np.unique(corrections[np.where(all_names == label)][columns[5]])
        etaBins = np.union1d(etaMins, etaMaxs).astype(np.double)
        ptMins = np.unique(corrections[np.where(all_names == label)][columns[6]])
        ptMaxs = np.unique(corrections[np.where(all_names == label)][columns[7]])
        ptBins = np.union1d(ptMins, ptMaxs).astype(np.double)
        discrMins = np.unique(corrections[np.where(all_names == label)][columns[8]])
        discrMaxs = np.unique(corrections[np.where(all_names == label)][columns[9]])
        discrBins = np.union1d(discrMins, discrMaxs).astype(np.double)
        vals = np.zeros(shape=(len(discrBins) - 1, len(ptBins) - 1, len(etaBins) - 1),
                        dtype=corrections.dtype[10])
        for i, eta_bin in enumerate(etaBins[:-1]):
            for j, pt_bin in enumerate(ptBins[:-1]):
                for k, discr_bin in enumerate(discrBins[:-1]):
                    this_bin = np.where((all_names == label) &
                                        (corrections[columns[4]] == eta_bin) &
                                        (corrections[columns[6]] == pt_bin) &
                                        (corrections[columns[8]] == discr_bin))
                    vals[k, j, i] = corrections[this_bin][columns[10]][0]
        label_decode = []
        for i in range(len(label)):
            label_decode.append(label[i])
            if isinstance(label_decode[i], bytes):
                label_decode[i] = label_decode[i].decode()
            else:
                label_decode[i] = str(label_decode[i])