How to use the numpy.sqrt function in numpy

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github oxfordcontrol / osqp-python / tests / qp_problems / utils / data_struct.py View on Github external
# Initialize scaling
        d = np.ones(n + m)
        d_temp = np.ones(n + m)

        # Define reduced KKT matrix to scale
        KKT = spa.vstack([
              spa.hstack([P, A.T]),
              spa.hstack([A, spa.csc_matrix((m, m))])]).tocsc()

        # Iterate Scaling
        for i in range(settings.scaling):
            for j in range(n + m):
                norm_col_j = spa.linalg.norm(KKT[:, j],
                                             np.inf)
                if norm_col_j > SCALING_REG:
                    d_temp[j] = 1./(np.sqrt(norm_col_j))

            S_temp = spa.diags(d_temp)
            d = np.multiply(d, d_temp)
            KKT = S_temp.dot(KKT.dot(S_temp))

        # Obtain Scaler Matrices
        D = spa.diags(d[:n])
        if m == 0:
            # spa.diags() will throw an error if fed with an empty array
            E = spa.csc_matrix((0, 0))
        else:
            E = spa.diags(d[n:])

        # Scale problem Matrices
        P = D.dot(P.dot(D)).tocsc()
        A = E.dot(A.dot(D)).tocsc()
github armandmcqueen / tensorpack-mask-rcnn / MaskRCNN / eval.py View on Github external
Run detection on one image, using the TF callable.
    This function should handle the preprocessing internally.

    Args:
        img: an image
        model_func: a callable from the TF model.
            It takes image and returns (boxes, probs, labels, [masks])

    Returns:
        [DetectionResult]
    """

    orig_shape = img.shape[:2]
    resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)
    resized_img = resizer.augment(img)
    scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1])
    boxes, probs, labels, *masks = model_func(resized_img)
    boxes = boxes / scale
    # boxes are already clipped inside the graph, but after the floating point scaling, this may not be true any more.
    boxes = clip_boxes(boxes, orig_shape)

    if masks:
        # has mask
        full_masks = [_paste_mask(box, mask, orig_shape)
                      for box, mask in zip(boxes, masks[0])]
        masks = full_masks
    else:
        # fill with none
        masks = [None] * len(boxes)

    results = [DetectionResult(*args) for args in zip(boxes, probs, labels, masks)]
    return results
github HERA-Team / aipy / src / phs.py View on Github external
def resolve_src(self, u, v, srcshape=(0,0,0)):
        """Adjust amplitudes to reflect resolution effects for a uniform 
        elliptical disk characterized by srcshape:
        srcshape = (a1,a2,th) where a1,a2 are angular sizes along the 
            semimajor, semiminor axes, and th is the angle (in radians) of
            the semimajor axis from E."""
        a1,a2,th = srcshape
        try:
            if len(u.shape) > len(a1.shape): 
                a1.shape += (1,); a2.shape += (1,); th.shape += (1,)
        except(AttributeError): pass
        ru = a1 * (u*np.cos(th) - v*np.sin(th))
        rv = a2 * (u*np.sin(th) + v*np.cos(th))
        x = 2 * np.pi * np.sqrt(ru**2 + rv**2)
        # Use first Bessel function of the first kind (J_1)
        return np.where(x == 0, 1, 2 * ss.jv(1,x)/x).squeeze()
    def refract(self, u_sf, v_sf, mfreq=.150, ionref=(0.,0.)):
github robotlearn / pyrobolearn / examples / kinematics / inverse / ik_libraries.py View on Github external
def position_pd(x, xd, v, vd=np.zeros(3), ad=np.zeros(3), kp=100, kd=None):
        # if damping is not specified, make it critically damped
        if kd is None:
            kd = 2.0 * np.sqrt(kp)

        # return PD error
        return kp * (xd - x) + kd * (vd - v) + ad
github nexpy / nexpy / src / nexpy / examples / plugins / chopper / convert_qe.py View on Github external
def convert_tof(self, tof):
        ki = np.sqrt(self.Ei / 2.0721)
        ts = self.t_m1 + 1588.254 * (self.L1 - self.d_m1) / ki
        kf = 1588.254 * self.L2 / (tof - ts)
        eps = self.Ei - 2.0721*kf**2
        return eps
github LCAV / pyroomacoustics / pyroomacoustics / experimental / point_cloud.py View on Github external
def trilateration(self, D):
        '''
        Find the location of points based on their distance matrix using trilateration

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        dist = np.sqrt(D)

        # Simpler algorithm (no denoising)
        self.X = np.zeros((self.dim, self.m))

        self.X[:,1] = np.array([0, dist[0,1]])
        for i in range(2,m):
            self.X[:,i] = self.trilateration_single_point(self.X[1,1],
                    dist[0,i], dist[1,i])
github suavecode / SUAVE / trunk / SUAVE / Methods / Missions / Segments / Climb / Constant_Throttle_Constant_Dynamic_Pressure.py View on Github external
def update_velocity_vector_from_wind_angle(segment,state):
    
    # unpack
    conditions = state.conditions 
    q          = segment.dynamic_pressure
    alpha      = state.unknowns.wind_angle[:,0][:,None]
    theta      = state.unknowns.body_angle[:,0][:,None]
    
    # Update freestream to get density
    SUAVE.Methods.Missions.Segments.Common.Aerodynamics.update_atmosphere(segment,state)
    rho        = conditions.freestream.density[:,0]    
    
    v_mag  = np.sqrt(q/rho)    
    
    # Flight path angle
    gamma = theta-alpha    

    # process
    v_x =  v_mag[:,None] * np.cos(gamma)
    v_z = -v_mag[:,None] * np.sin(gamma) # z points down

    # pack
    conditions.frames.inertial.velocity_vector[:,0] = v_x[:,0]
    conditions.frames.inertial.velocity_vector[:,2] = v_z[:,0]

    return conditions
github aerospaceresearch / orbitdeterminator / orbitdeterminator / kep_determination / lamberts_method.py View on Github external
if z == 0:
        C_z_i = c2(0)
        S_z_i = c3(0)
        y_0 = r1 + r2 + A * (0 * S_z_i - 1.0) / np.sqrt(C_z_i)

        dF = np.sqrt(2) / 40.0 * y_0**1.5 + A / 8.0 * (np.sqrt(y_0) + A * np.sqrt(1 / 2 / y_0))

    else:
        C_z_i = c2(z)
        S_z_i = c3(z)
        y_z = r1 + r2 + A * (z * S_z_i - 1.0) / np.sqrt(C_z_i)

        dF = (y_z / C_z_i)**1.5 * \
             (1.0 / 2.0 / z * (C_z_i - 3.0 * S_z_i/ 2.0 / C_z_i) + 3.0 * S_z_i + 2.0 / 4.0 / C_z_i) +\
             A / 8.0 * (3.0 * S_z_i / C_z_i * np.sqrt(y_z) +  A * np.sqrt(C_z_i / y_z))

    return dF
github DTMilodowski / LiDAR_canopy / src / structural_sensitivity_analysis_revised_resolution.py View on Github external
temp_shots = np.delete(temp_shots,idx_start)

        #pts_iter=pts.copy()
        #-------------------
        starting_ids, trees = io.create_KDTree(pts_iter)
        # loop through each sampling resolution
        for ss in range(0,sample_res.size):
            print('\t - sample res = ', keys[ss])
            n_subplots = len(subplots[keys[ss]])
            # for each of the subplots, clip point cloud and model PAD and get the metrics
            for pp in range(0,n_subplots):
                # query the tree to locate points of interest
                # note that we will only have one tree for the number of points in sensitivity analysis
                centre_x = np.mean(subplots[keys[ss]][pp][0:4,0])
                centre_y = np.mean(subplots[keys[ss]][pp][0:4,1])
                radius = np.sqrt(sample_res[ss]**2/2.)
                ids = trees[0].query_ball_point([centre_x,centre_y], radius)
                sp_pts = lidar.filter_lidar_data_by_polygon(pts_iter[ids],subplots[keys[ss]][pp],
                                                                filter_by_first_return_location=True)
                #------
                if np.sum(sp_pts[:,3]==1)>0: # check for returns within this column
                    heights,first_return_profile,n_ground_returns = LAD1.bin_returns(sp_pts, max_height, layer_thickness)
                    mh_profile = LAD1.estimate_LAD_MacArthurHorn(first_return_profile, n_ground_returns, layer_thickness, kappa)
                    pen_limit = np.cumsum(first_return_profile)==0
                    #------
                    heights,weighted_return_profile,weighted_n_ground_returns = LAD1.bin_returns_weighted_by_num_returns(sp_pts, max_height, layer_thickness)
                    mh_wt_profile = LAD1.estimate_LAD_MacArthurHorn(weighted_return_profile,n_ground_returns,layer_thickness,kappa,zero_nodata=False)
                    #------
                    u,n,I,U = LAD2.calculate_LAD(sp_pts,heights_rad,max_k,'spherical',test_sensitivity=True)
                    rad1_profile=u[::-1][1:].copy()
                    #------
                    u,n,I,U = LAD2.calculate_LAD_DTM(sp_pts,heights_rad,max_k,'spherical',test_sensitivity=True)
github kiyukuta / lencon / code / models / len_init.py View on Github external
def __init__(self,
                 src_vcb_num,
                 trg_vcb_num,
                 dim_emb,
                 dim_hid):

        super(LenInitEarlyAttn, self).__init__(src_vcb_num,
                                               trg_vcb_num,
                                               dim_emb,
                                               dim_hid)
        # length
        self.encoder.add_param("c0", (1, dim_hid))
        param = np.random.normal(0, np.sqrt(1. / dim_hid), (1, dim_hid))
        self.encoder.c0.data[...] = param