How to use the numpy.array function in numpy

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lucasmaystre / choix / tests / test_utils.py View on Github external
def test_compare_rankings():
    """``compare`` should work as expected for rankings."""
    params = np.array([0, 100, -100, -100, -100])
    x1 = compare((3, 0), params, rank=True)
    assert np.array_equal(x1, np.array([0, 3]))
    x2 = compare((3, 0, 1), params, rank=True)
    assert np.array_equal(x2, np.array([1, 0, 3]))
github imatge-upc / retrieval-2016-deepvision / rerank.py View on Github external
def rerank_top_n(self,query_feats,ranking,query_name):

        distances = []
        locations = []
        frames = []
        class_ids = []
        #query_feats = query_feats.T

        # query class (+1 because class 0 is the background)
        cls_ind = np.where(np.array(self.queries) == str(query_name))[0][0] + 1

        for im_ in ranking[0:self.top_n]:

            if self.dataset is 'paris':
                frame_to_read = os.path.join(self.image_path,im_.split('_')[1],im_ + '.jpg')
            elif self.dataset is 'oxford':
                frame_to_read = os.path.join(self.image_path,im_ + '.jpg')

            frames.append(frame_to_read)
            # Get features of current element
            feats,boxes,scores = self.extract_feat_image(frame_to_read)

            # we rank based on class scores
            if self.class_scores:

                scores = feats[:,cls_ind]
github joshspeagle / dynesty / dynesty / dynamicsampler.py View on Github external
saved_n = np.array(self.saved_n)
        saved_bounditer = np.array(self.saved_bounditer)
        saved_scale = np.array(self.saved_scale)
        saved_batch = np.array(self.saved_batch)
        nsaved = len(saved_n)

        # Grab results from new run.
        new_id = np.array(self.new_id) + max(saved_id) + 1
        new_u = np.array(self.new_u)
        new_v = np.array(self.new_v)
        new_logl = np.array(self.new_logl)
        new_nc = np.array(self.new_nc)
        new_boundidx = np.array(self.new_boundidx)
        new_it = np.array(self.new_it)
        new_n = np.array(self.new_n)
        new_bounditer = np.array(self.new_bounditer)
        new_scale = np.array(self.new_scale)
        nnew = len(new_n)
        llmin, llmax = self.new_logl_min, self.new_logl_max

        # Reset saved results.
        self.saved_id = []
        self.saved_u = []
        self.saved_v = []
        self.saved_logl = []
        self.saved_logvol = []
        self.saved_logwt = []
        self.saved_logz = []
        self.saved_logzvar = []
        self.saved_h = []
        self.saved_nc = []
        self.saved_boundidx = []
github polmorenoc / opendr / chumpy / ch_ops.py View on Github external
a2 = np.arange(a.size).reshape(a.shape) if wrt is a else np.zeros(a.shape)
            b2 = np.arange(b.size).reshape(b.shape) if (wrt is b and wrt is not a) else np.zeros(b.shape)
            IS = np.arange(output_sz)
            JS = np.asarray((np.add(a2,b2)).ravel(), np.uint32)

            _bs_setup_data2[uid] = sp.csc_matrix((np.arange(IS.size), (IS, JS)), shape=(output_sz, input_sz))

        result = copy_copy(_bs_setup_data2[uid])
        if isinstance(data, np.ndarray):
            result.data = data[result.data]
        else: # assumed scalar
            result.data = np.empty(result.nnz)
            result.data.fill(data)

    if np.prod(result.shape) == 1:
        return np.array(data)
    else:
        return result
github sibyjackgrove / SolarPV-DER-simulation-utility / pvder / dynamic_simulation.py View on Github external
self.va_t = self._va_t = np.array(self.PV_model.va)
        
        self.ma_absolute_t = self._ma_absolute_t = np.array(abs(self.PV_model.ma))
        self.Varms_t  = self._Varms_t = np.array(abs(self.PV_model.va)/math.sqrt(2))
        
        if type(self.PV_model).__name__ == 'SolarPV_DER_ThreePhase':
            self.mb_absolute_t = self._mb_absolute_t = np.array(abs(self.PV_model.mb))
            self.mc_absolute_t = self._mc_absolute_t = np.array(abs(self.PV_model.mc))
            
            self.Vbrms_t  = self._Vbrms_t = np.array(abs(self.PV_model.vb)/math.sqrt(2))
            self.Vcrms_t  = self._Vcrms_t = np.array(abs(self.PV_model.vc)/math.sqrt(2))
            
            self.ib_t = self._ib_t = np.array(self.PV_model.ib)
            self.mb_t = self._mb_t = np.array(self.PV_model.mb)
            self.vtb_t = self._vtb_t = np.array(self.PV_model.vtb)
            self.vb_t = self._vb_t = np.array(self.PV_model.vb)
            
            self.ic_t = self._ic_t = np.array(self.PV_model.ic)
            self.mc_t = self._mc_t = np.array(self.PV_model.mc)
            self.vtc_t = self._vtc_t = np.array(self.PV_model.vtc)
            self.vc_t = self._vc_t = np.array(self.PV_model.vc)
        
        self.Irms_t = self._Irms_t = np.array(self.PV_model.Irms)
        self.Ppv_t = self._Ppv_t = np.array(self.PV_model.Ppv)
        self.S_PCC_t = self._S_PCC_t = np.array(self.PV_model.S_PCC)
        self.S_t = self._S_t = np.array(self.PV_model.S)
        self.Vtrms_t = self._Vtrms_t = np.array(self.PV_model.Vtrms)
        self.Vrms_t = self._Vrms_t = np.array(self.PV_model.Vrms)
github uwrobotics / MarsRover2019 / Workspace / src / tennis_ball_tracker / src / Calibration Utility / calibration_utility.py View on Github external
self.upperImageCanvas.create_image(0, 0, image=photo,
                                                   anchor=tk.NW)

                # run tracking
                frame = imutils.resize(frame, height=400)
                height, width = frame.shape[:2]
                frame = frame[:, 0:int(width / self.resizeFactor)]

                # apply bilateral filter to preserve edges
                blurred = cv2.bilateralFilter(frame, 9, 75, 75)

                # convert to hsv colour space
                hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

                # threshold image to find green tennis ball
                mask = cv2.inRange(hsv, np.array(
                    [self.slider_pos[3], self.slider_pos[4],
                     self.slider_pos[5]]),
                                   np.array(
                                       [self.slider_pos[0], self.slider_pos[1],
                                        self.slider_pos[2]]))

                # perform a series of dilations and erosions to remove noise
                mask = cv2.erode(mask, None, iterations=10)
                mask = cv2.dilate(mask, None, iterations=10)

                # find contours in thresholded image
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = imutils.grab_contours(cnts)
                center = None
github DO-CV / sara / python / do / sara / sfm / essential_matrix_estimation.py View on Github external
def poly_hartley_sturm(a, b, c, d, fl, fr):
    r_coeff = np.array([
        -a*b*d**2 + b**2*c*d,
        # t
        -a**2*d**2 + b**4 + b**2*c**2 + 2*b**2*d**2*fr**2 + d**4*fr**4,
        # t ** 2
        -a**2*c*d + 4*a*b**3 + a*b*c**2 - 2*a*b*d**2*fl**2 + 4*a*b*d**2*fr**2
        + 2*b**2*c*d*fl**2 + 4*b**2*c*d*fr**2 + 4*c*d**3*fr**4,
        # t ** 3
        6*a**2*b**2 - 2*a**2*d**2*fl**2 + 2*a**2*d**2*fr**2 + 8*a*b*c*d*fr**2
        + 2*b**2*c**2*fl**2 + 2*b**2*c**2*fr**2 + 6*c**2*d**2*fr**4,
        # t ** 4
        4*a**3*b - 2*a**2*c*d*fl**2 + 4*a**2*c*d*fr**2 + 2*a*b*c**2*fl**2 +
        4*a*b*c**2*fr**2 - a*b*d**2*fl**4 + b**2*c*d*fl**4 + 4*c**3*d*fr**4,
        # t ** 5
        a**4 + 2*a**2*c**2*fr**2 - a**2*d**2*fl**4 + b**2*c**2*fl**4 +
        c**4*fr**4,
        # t ** 6
github FonzieTree / Attention-is-all-you-need / module.py View on Github external
keys,
                        attention_w):
    Q = np.dot(queries, attention_w[0,:,:])
    K = np.dot(keys, attention_w[1,:,:])
    V = np.dot(keys, attention_w[2,:,:])
    # Multiplication
    outputs1 = np.array([np.dot(Q[i,:,:], K[i,:,:].T) for i in range(batch_size)])
    # Scale
    outputs2 = outputs1/(K.shape[2]** 0.5)
    outputs2[outputs2==0] = -2**32 + 1
    # SoftMax
    outputs3 = np.exp(outputs2)
    outputs4 = np.sum(outputs3,axis=2)
    outputs5 = np.array([outputs3[i,:,:]/outputs4[i,:].reshape(10,1) for i in range(batch_size)])
    outputs6 = np.array([np.dot(outputs5[i,:,:], V[i,:,:]) for i in range(batch_size)])
    outputs7 = np.array([np.dot(outputs6[i,:,:], attention_w[3,:,:]) for i in range(batch_size)])
    # Add residual connections
    outputs8 = outputs7 + queries
    return [outputs1, outputs2, outputs5,  outputs6, outputs7, outputs8, Q, K, V, queries, keys]
github selective-inference / Python-software / selectinf / sandbox / bayesian / dual_lasso.py View on Github external
def posterior_samples(self, ndraw=1500, burnin=50):
        state = self.initial_state
        gradient_map = lambda x: -self.smooth_objective(x, 'grad')
        projection_map = lambda x: x
        stepsize = 1. / self.E
        sampler = projected_langevin(state, gradient_map, projection_map, stepsize)

        samples = []

        for i in range(ndraw + burnin):
            sampler.next()
            if i >= burnin:
                samples.append(sampler.state.copy())

        samples = np.array(samples)
        return samples
github tmontaigu / pylas / pylas / lasdatas / base.py View on Github external
def unscale_dimension(array_dim, scale, offset):
    return np.round((np.array(array_dim) - offset) / scale)