How to use numpy - 10 common examples

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Algomorph / LevelSetFusion-Python / archive / warpAKAP2D.py View on Github external
def main():
    image = cv2.imread("test_image1.png")
    step_size_px = 10
    vertex_row_count = image.shape[0] // step_size_px
    vertex_col_count = image.shape[1] // step_size_px
    vertex_count = vertex_row_count * vertex_col_count

    face_row_count = vertex_row_count - 1
    face_col_count = vertex_col_count - 1

    print("Grid size: ", vertex_row_count, " x ", vertex_col_count)
    print("Vertex count: ", vertex_count)
    warp_coefficient_count = 2 * vertex_count

    # G = np.zeros((2 * face_col_count * face_row_count, vertex_col_count * vertex_row_count), np.float32)
    G = np.zeros(
        (face_col_count * vertex_row_count + face_row_count * vertex_col_count, vertex_count),
        np.float32)

    ix_G_row = 0
    for ix_dx_row in range(vertex_row_count):
        for ix_dx_col in range(face_col_count):
            col_index0 = vertex_col_count * ix_dx_row + ix_dx_col
            col_index1 = col_index0 + 1
            G[ix_G_row, col_index0] = -1.0
            G[ix_G_row, col_index1] = 1.0
            ix_G_row += 1
    for ix_dy_row in range(face_row_count):
        for ix_dy_col in range(vertex_col_count):
            col_index0 = vertex_col_count * ix_dy_row + ix_dy_col
            col_index1 = col_index0 + vertex_col_count
            G[ix_G_row, col_index0] = -1.0
github robmcmullen / asmgen / asmgen.py View on Github external
def split_bit_stream(self, width, bit_stream, high_bits):
        # print bit_stream
        # print high_bits

        # Split bitstream into bytes
        byte_width = width // 7
        bit_pos = 0
        filler_bit = "0"
        byte_splits = np.zeros((byte_width), dtype=np.uint8)

        for byte_index in range(byte_width):
            remaining_bits = len(bit_stream) - bit_pos
                
            bit_chunk = ""
            
            if remaining_bits < 0:
                bit_chunk = filler_bit * 7
            else:   
                if remaining_bits < 7:
                    bit_chunk = bit_stream[bit_pos:]
                    bit_chunk += filler_bit * (7-remaining_bits)
                else:   
                    bit_chunk = bit_stream[bit_pos:bit_pos+7]
            
            bit_chunk = bit_chunk[::-1]
github phulin / rebook / rebook / block.py View on Github external
import cv2
import numpy as np
import scipy.spatial
import sys

import algorithm
import binarize
import lib
from lib import GREEN

N_values = np.array([64, 64, 64, 128, 128, 128, 256, 256, 256, 256])
k_values = np.array([5, 4, 3, 5, 4, 3, 5, 4, 3, 2])
s_values = N_values.astype(np.float64) / k_values

theta_values = np.arange(32) / np.pi

# radius of circle for "nearby" CCs projection
radius = 100
radius_sq = radius ** 2

epsilon = 2.8

def pack_label(s, theta):
    return theta * s_values.shape[0] + s

def unpack_label(label):
    s_len = s_values.shape[0]
    return label % s_len, label // s_len

def V_p(nearby_centroids, centroids_rotated, ellipses_sheared):
    result = np.zeros((centroids_rotated[0].shape[0],
github albumentations-team / albumentations / albumentations / augmentations / functional.py View on Github external
def _equalize_pil(img, mask=None):
    histogram = cv2.calcHist([img], [0], mask, [256], (0, 256)).ravel()
    h = [_f for _f in histogram if _f]

    if len(h) <= 1:
        return img.copy()

    step = np.sum(h[:-1]) // 255
    if not step:
        return img.copy()

    lut = np.empty(256, dtype=np.uint8)
    n = step // 2
    for i in range(256):
        lut[i] = min(n // step, 255)
        n += histogram[i]

    return cv2.LUT(img, np.array(lut))
github QMCPACK / qmcpack / utils / afqmctools / afqmctools / hamiltonian / mol.py View on Github external
# ERI[:,jl]
    eri_col = mol.intor('int2e_sph',
                         shls_slice=(0,mol.nbas,0,mol.nbas,sj,sj+1,sl,sl+1))
    cj, cl = max(j-dims[sj],0), max(l-dims[sl],0)
    chol_vecs[0] = numpy.copy(eri_col[:,:,cj,cl].reshape(nao*nao)) / delta_max**0.5

    nchol = 0
    while abs(delta_max) > max_error:
        # Update cholesky vector
        start = time.time()
        # M'_ii = \sum_x L_i^x L_i^x
        Mapprox += chol_vecs[nchol] * chol_vecs[nchol]
        # D_ii = M_ii - M'_ii
        delta = diag - Mapprox
        nu = numpy.argmax(numpy.abs(delta))
        delta_max = numpy.abs(delta[nu])
        # Compute ERI chunk.
        # shls_slice computes shells of integrals as determined by the angular
        # momentum of the basis function and the number of contraction
        # coefficients. Need to search for AO index within this shell indexing
        # scheme.
        # AO index.
        j = nu // nao
        l = nu % nao
        # Associated shell index.
        sj = numpy.searchsorted(dims, j)
        sl = numpy.searchsorted(dims, l)
        if dims[sj] != j and j != 0:
            sj -= 1
        if dims[sl] != l and l != 0:
            sl -= 1
        # Compute ERI chunk.
github lucasmaystre / choix / tests / test_utils.py View on Github external
def test_compare_rankings():
    """``compare`` should work as expected for rankings."""
    params = np.array([0, 100, -100, -100, -100])
    x1 = compare((3, 0), params, rank=True)
    assert np.array_equal(x1, np.array([0, 3]))
    x2 = compare((3, 0, 1), params, rank=True)
    assert np.array_equal(x2, np.array([1, 0, 3]))
github simpeg / simpeg / tests / em / new_fdem / forward / test_FDEM_primsec.py View on Github external
# secondary mesh
h = [(csz, npadz-4, -pf), (csz, ncz), (csz, npadz-4, pf)]
meshs = Mesh.TensorMesh(3*[h], x0 = 'CCC')

# mappings
primaryMapping = (
    Maps.ExpMap(meshp) *
    Maps.SurjectFull(meshp) *
    Maps.Projection(nP=8, index=[0])
)

mapping = (
    Maps.ExpMap(meshs) *
    Maps.ParametrizedBlockInLayer(meshs) *
    Maps.Projection(
        nP=8, index=np.hstack([np.r_[0], np.arange(0, 8)])
    )
)

primaryMap2Meshs = (
    Maps.ExpMap(meshs) *
    Maps.SurjectFull(meshs) *
    Maps.Projection(nP=8, index=[0])
)


class PrimSecFDEMTest(object):

    # --------------------- Run some tests! --------------------- #
    def DataTest(self):
        print('\nTesting Data')
        dpred_primsec = self.secondaryProblem.dpred(
github robmcmullen / omnivore / test / test_add_file.py View on Github external
def test_small(self):
        assert len(self.image.files) == self.num_files_in_sample

        data = np.asarray([0xff, 0xff, 0x00, 0x60, 0x01, 0x60, 1, 2], dtype=np.uint8)
        self.image.write_file("TEST.XEX", None, data)
        assert len(self.image.files) == self.num_files_in_sample + 1

        data2 = np.frombuffer(self.image.find_file("TEST.XEX"), dtype=np.uint8)
        assert np.array_equal(data, data2[0:len(data)])
github ucbrise / clipper / containers / python / test_sklearn_cifar_container.py View on Github external
def load_cifar(cifar_location, cifar_filename="train.data", norm=False):
    cifar_path = cifar_location + "/" + cifar_filename
    print("Source file: %s" % cifar_path)
    df = pd.read_csv(cifar_path, sep=",", header=None)
    data = df.values
    print("Number of image files: %d" % len(data))
    y = data[:, 0]
    X = data[:, 1:]
    Z = X
    if norm:
        mu = np.mean(X.T, 0)
        sigma = np.var(X.T, 0)
        Z = (X.T - mu) / np.array([np.sqrt(z) if z > 0 else 1. for z in sigma])
        Z = Z.T
    return (Z, y)
github oxfordcontrol / osqp-python / tests / qp_problems / utils / data_struct.py View on Github external
# Initialize scaling
        d = np.ones(n + m)
        d_temp = np.ones(n + m)

        # Define reduced KKT matrix to scale
        KKT = spa.vstack([
              spa.hstack([P, A.T]),
              spa.hstack([A, spa.csc_matrix((m, m))])]).tocsc()

        # Iterate Scaling
        for i in range(settings.scaling):
            for j in range(n + m):
                norm_col_j = spa.linalg.norm(KKT[:, j],
                                             np.inf)
                if norm_col_j > SCALING_REG:
                    d_temp[j] = 1./(np.sqrt(norm_col_j))

            S_temp = spa.diags(d_temp)
            d = np.multiply(d, d_temp)
            KKT = S_temp.dot(KKT.dot(S_temp))

        # Obtain Scaler Matrices
        D = spa.diags(d[:n])
        if m == 0:
            # spa.diags() will throw an error if fed with an empty array
            E = spa.csc_matrix((0, 0))
        else:
            E = spa.diags(d[n:])

        # Scale problem Matrices
        P = D.dot(P.dot(D)).tocsc()
        A = E.dot(A.dot(D)).tocsc()