How to use the cupy.matmul function in cupy

To help you get started, we’ve selected a few cupy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kwj2104 / Simple-Variational-Autoencoder / vae.py View on Github external
db0_e_r = d_b_mu_r.dot(self.e_W_mu.T) * lrelu(self.e_h0_l, derivative=True)
        dW0_e_r = np.matmul(np.expand_dims(y, axis=-1), np.expand_dims(db0_e_r, axis=1)) 
        
        d_b_logvar_r = d_b_mu_r * np.exp(self.e_logvar * .5) * .5 * self.rand_sample
        d_W_logvar_r = np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(d_b_logvar_r, axis=1))
        
        db0_e_r_2 = d_b_logvar_r.dot(self.e_W_logvar.T) * lrelu(self.e_h0_l, derivative=True)
        dW0_e_r_2 = np.matmul(np.expand_dims(y, axis=-1), np.expand_dims(db0_e_r_2, axis=1))
        
        ########################################
        #Calculate encoder gradients from K-L
        ########################################
    
        #logvar terms
        dKL_b_log = -.5 * (1 - np.exp(self.e_logvar))
        dKL_W_log = np.matmul(np.expand_dims(self.e_h0_a, axis= -1), np.expand_dims(dKL_b_log, axis= 1))
        
        #Heaviside step function
        dlrelu = lrelu(self.e_h0_l, derivative=True)  

        dKL_e_b0_1 = .5 * dlrelu * (np.exp(self.e_logvar) - 1).dot(self.e_W_logvar.T)
        dKL_e_W0_1 = np.matmul(np.expand_dims(y, axis= -1), np.expand_dims(dKL_e_b0_1, axis= 1))
        
        #m^2 term
        dKL_W_m = .5 * (2 * np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(self.e_mu, axis=1)))
        dKL_b_m = .5 * (2 * self.e_mu)
        
        dKL_e_b0_2 = .5 * dlrelu * (2 * self.e_mu).dot(self.e_W_mu.T)
        dKL_e_W0_2 = np.matmul(np.expand_dims(y, axis= -1), np.expand_dims(dKL_e_b0_2, axis= 1))
        
        # Combine gradients for encoder from recon and KL
        grad_b_logvar = dKL_b_log + d_b_logvar_l + d_b_logvar_r
github BorisMuzellec / EllipticalEmbeddings / embeddings.py View on Github external
def compose(self, from_word, to_word, test_word, k=10, metric='bures_distance'):
        n = len(self.means)
        t, T = self.get_push_forward(from_word, to_word)

        test_sigma = cp.array(self.c_vars[self.words_to_idxs[test_word]])

        mu = cp.array(self.c_means[self.words_to_idxs[test_word]]) + t
        sigma = cp.matmul(T, cp.matmul(test_sigma.reshape(-1, test_sigma.shape[0], test_sigma.shape[1]), T))

        if metric == 'bures_distance':
            dists = wr.batch_W2(mu.reshape(1, -1).repeat(n, axis=0), cp.array(self.c_means), sigma.repeat(n, axis=0), cp.array(self.c_vars), numIters=20)[0]
        elif metric == 'product':
            dists = wr.batch_W2(mu.reshape(1, -1).repeat(n, axis=0), cp.array(self.c_means), sigma.repeat(n, axis=0), cp.array(self.c_vars), numIters=20, prod=True)[0]
            dists = -dists
        elif metric == 'bures_cosine':
            dists = wr.bures_cosine(mu.reshape(1, -1).repeat(n, axis=0), cp.array(self.c_means), sigma.repeat(n, axis=0), cp.array(self.c_vars), numIters=20)
            dists = -dists

        idxs = np.argsort(cp.asnumpy(dists))[:k]

        for i in range(k):
            print(self.vocab_words[idxs[i]])
github rossant / pykilosort / pykilosort / postprocess.py View on Github external
Q12 = (Qi / max(Q00, Q01)).min()  # refractoriness metric 1
        R = rir.min()  # refractoriness metric 2

        # if the CCG has a dip, don't do the split.
        # These thresholds are consistent with the ones from merges.
        if (Q12 < 0.25) and (R < 0.05):  # if both metrics are below threshold.
            nccg += 1  # keep track of how many splits were voided by the CCG criterion
            continue

        # now decide if the split would result in waveforms that are too similar
        # the reconstructed mean waveforms for putative cluster 1
        # c1 = cp.matmul(wPCA, cp.reshape((mean(clp0[ilow, :], 0), 3, -1), order='F'))
        c1 = cp.matmul(wPCA, mean(clp0[ilow, :], 0).reshape((3, -1), order='F'))
        # the reconstructed mean waveforms for putative cluster 2
        # c2 = cp.matmul(wPCA, cp.reshape((mean(clp0[~ilow, :], 0), 3, -1), order='F'))
        c2 = cp.matmul(wPCA, mean(clp0[~ilow, :], 0).reshape((3, -1), order='F'))

        cc = cp.corrcoef(c1.ravel(), c2.ravel())  # correlation of mean waveforms
        n1 = sqrt(cp.sum(c1 ** 2))  # the amplitude estimate 1
        n2 = sqrt(cp.sum(c2 ** 2))  # the amplitude estimate 2

        r0 = 2 * abs((n1 - n2) / (n1 + n2))

        # if the templates are correlated, and their amplitudes are similar, stop the split!!!

        if (cc[0, 1] > 0.9) and (r0 < 0.2):
            continue

        # finaly criteria to continue with the split: if the split piece is more than 5% of all
        # spikes, if the split piece is more than 300 spikes, and if the confidences for
        # assigning spikes to # both clusters exceeds a preset criterion ccsplit
        if (nremove > 0.05) and (min(plow, phigh) > ccsplit) and (
github BorisMuzellec / EllipticalEmbeddings / utils.py View on Github external
def batch_Tuv2(U, V, mid=None, inv_sU=None, numIters = 2):
    """
    Returns the transportation matrix from N(U) to N(V):
    V^{-1/2}[V^{1/2}UV^{1/2}]^{1/2}V^{-1/2}
    """
    if (inv_sU is None) or (mid is None):
        sU, inv_sU = batch_sqrtm(U, numIters = numIters)
    if mid is None:
        mid, _ = batch_sqrtm(cp.matmul(cp.matmul(sU, V), sU), numIters = numIters)
    return cp.matmul(inv_sU, cp.matmul(mid, inv_sU))
github BorisMuzellec / EllipticalEmbeddings / utils.py View on Github external
batchSize = A.shape[0]
    dim = A.shape[1]
    #Renormalize so that the each matrix has a norm lesser than 1/reg, but only normalize when necessary
    normA = reg * cp.linalg.norm(A, axis=(1, 2))
    renorm_factor = cp.ones_like(normA)
    renorm_factor[cp.where(normA > 1.0)] = normA[cp.where(normA > 1.0)]
    renorm_factor = renorm_factor.reshape(batchSize, 1, 1)

    Y = cp.divide(A, renorm_factor)
    I = cp.eye(dim).reshape(1, dim, dim).repeat(batchSize, axis=0)
    Z = cp.eye(dim).reshape(1, dim, dim).repeat(batchSize, axis=0)
    for i in range(numIters):
        T = 0.5 * (3.0 * I - cp.matmul(Z, Y))
        Y = cp.matmul(Y, T)
        Z = cp.matmul(T, Z)
    sA = Y * cp.sqrt(renorm_factor)
    sAinv = Z / cp.sqrt(renorm_factor)
    return sA, sAinv
github kwj2104 / Simple-Variational-Autoencoder / vae.py View on Github external
dW1_d_l = np.matmul(np.expand_dims(self.d_h0_a, axis=-1), np.expand_dims(dL_dsig_l, axis=1))
        db1_d_l = dL_dsig_l 
        
        db0_d_l = dL_dsig_l.dot(self.d_W1.T) * drelu
        dW0_d_l = np.matmul(np.expand_dims(self.sample_z, axis=-1), np.expand_dims(db0_d_l, axis=1))
        
        #Right side term
        dL_r = (1 - y) * (1 / (1 - out))
        dL_dsig_r = dL_r * dsig
        
        dW1_d_r = np.matmul(np.expand_dims(self.d_h0_a, axis=-1), np.expand_dims(dL_dsig_r, axis=1))
        db1_d_r = dL_dsig_r
        
        db0_d_r = dL_dsig_r.dot(self.d_W1.T) * drelu
        dW0_d_r = np.matmul(np.expand_dims(self.sample_z, axis=-1), np.expand_dims(db0_d_r, axis=1))
        
        # Combine gradients for decoder
        grad_d_W0 = dW0_d_l + dW0_d_r
        grad_d_b0 = db0_d_l + db0_d_r
        grad_d_W1 = dW1_d_l + dW1_d_r
        grad_d_b1 = db1_d_l + db1_d_r
         
        #Calculate encoder gradients from reconstruction
        #Left side term
        d_b_mu_l  = db0_d_l.dot(self.d_W0.T)
        d_W_mu_l = np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(d_b_mu_l, axis=1))
        
        db0_e_l = d_b_mu_l.dot(self.e_W_mu.T) * lrelu(self.e_h0_l, derivative=True)
        dW0_e_l = np.matmul(np.expand_dims(y, axis=-1), np.expand_dims(db0_e_l, axis=1)) 
        
        d_b_logvar_l = d_b_mu_l * np.exp(self.e_logvar * .5) * .5 * self.rand_sample
github kwj2104 / Simple-Variational-Autoencoder / vae.py View on Github external
db0_d_r = dL_dsig_r.dot(self.d_W1.T) * drelu
        dW0_d_r = np.matmul(np.expand_dims(self.sample_z, axis=-1), np.expand_dims(db0_d_r, axis=1))
        
        # Combine gradients for decoder
        grad_d_W0 = dW0_d_l + dW0_d_r
        grad_d_b0 = db0_d_l + db0_d_r
        grad_d_W1 = dW1_d_l + dW1_d_r
        grad_d_b1 = db1_d_l + db1_d_r
         
        #Calculate encoder gradients from reconstruction
        #Left side term
        d_b_mu_l  = db0_d_l.dot(self.d_W0.T)
        d_W_mu_l = np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(d_b_mu_l, axis=1))
        
        db0_e_l = d_b_mu_l.dot(self.e_W_mu.T) * lrelu(self.e_h0_l, derivative=True)
        dW0_e_l = np.matmul(np.expand_dims(y, axis=-1), np.expand_dims(db0_e_l, axis=1)) 
        
        d_b_logvar_l = d_b_mu_l * np.exp(self.e_logvar * .5) * .5 * self.rand_sample
        d_W_logvar_l = np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(d_b_logvar_l, axis=1))
        
        db0_e_l_2 = d_b_logvar_l.dot(self.e_W_logvar.T) * lrelu(self.e_h0_l, derivative=True)
        dW0_e_l_2 = np.matmul(np.expand_dims(y, axis=-1), np.expand_dims(db0_e_l_2, axis=1)) 
        
        #Right side term
        d_b_mu_r  = db0_d_r.dot(self.d_W0.T)
        d_W_mu_r = np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(d_b_mu_r, axis=1))
        
        db0_e_r = d_b_mu_r.dot(self.e_W_mu.T) * lrelu(self.e_h0_l, derivative=True)
        dW0_e_r = np.matmul(np.expand_dims(y, axis=-1), np.expand_dims(db0_e_r, axis=1)) 
        
        d_b_logvar_r = d_b_mu_r * np.exp(self.e_logvar * .5) * .5 * self.rand_sample
        d_W_logvar_r = np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(d_b_logvar_r, axis=1))
github kwj2104 / Simple-Variational-Autoencoder / vae.py View on Github external
db0_e_r_2 = d_b_logvar_r.dot(self.e_W_logvar.T) * lrelu(self.e_h0_l, derivative=True)
        dW0_e_r_2 = np.matmul(np.expand_dims(y, axis=-1), np.expand_dims(db0_e_r_2, axis=1))
        
        ########################################
        #Calculate encoder gradients from K-L
        ########################################
    
        #logvar terms
        dKL_b_log = -.5 * (1 - np.exp(self.e_logvar))
        dKL_W_log = np.matmul(np.expand_dims(self.e_h0_a, axis= -1), np.expand_dims(dKL_b_log, axis= 1))
        
        #Heaviside step function
        dlrelu = lrelu(self.e_h0_l, derivative=True)  

        dKL_e_b0_1 = .5 * dlrelu * (np.exp(self.e_logvar) - 1).dot(self.e_W_logvar.T)
        dKL_e_W0_1 = np.matmul(np.expand_dims(y, axis= -1), np.expand_dims(dKL_e_b0_1, axis= 1))
        
        #m^2 term
        dKL_W_m = .5 * (2 * np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(self.e_mu, axis=1)))
        dKL_b_m = .5 * (2 * self.e_mu)
        
        dKL_e_b0_2 = .5 * dlrelu * (2 * self.e_mu).dot(self.e_W_mu.T)
        dKL_e_W0_2 = np.matmul(np.expand_dims(y, axis= -1), np.expand_dims(dKL_e_b0_2, axis= 1))
        
        # Combine gradients for encoder from recon and KL
        grad_b_logvar = dKL_b_log + d_b_logvar_l + d_b_logvar_r
        grad_W_logvar = dKL_W_log + d_W_logvar_l + d_W_logvar_r
        grad_b_mu = dKL_b_m + d_b_mu_l + d_b_mu_r
        grad_W_mu = dKL_W_m + d_W_mu_l + d_W_mu_r
        grad_e_b0 = dKL_e_b0_1 + dKL_e_b0_2 + db0_e_l + db0_e_l_2 + db0_e_r + db0_e_r_2
        grad_e_W0 = dKL_e_W0_1 + dKL_e_W0_2 + dW0_e_l + dW0_e_l_2 + dW0_e_r + dW0_e_r_2
github kwj2104 / Simple-Variational-Autoencoder / vae.py View on Github external
########################################
        #Calculate encoder gradients from K-L
        ########################################
    
        #logvar terms
        dKL_b_log = -.5 * (1 - np.exp(self.e_logvar))
        dKL_W_log = np.matmul(np.expand_dims(self.e_h0_a, axis= -1), np.expand_dims(dKL_b_log, axis= 1))
        
        #Heaviside step function
        dlrelu = lrelu(self.e_h0_l, derivative=True)  

        dKL_e_b0_1 = .5 * dlrelu * (np.exp(self.e_logvar) - 1).dot(self.e_W_logvar.T)
        dKL_e_W0_1 = np.matmul(np.expand_dims(y, axis= -1), np.expand_dims(dKL_e_b0_1, axis= 1))
        
        #m^2 term
        dKL_W_m = .5 * (2 * np.matmul(np.expand_dims(self.e_h0_a, axis=-1), np.expand_dims(self.e_mu, axis=1)))
        dKL_b_m = .5 * (2 * self.e_mu)
        
        dKL_e_b0_2 = .5 * dlrelu * (2 * self.e_mu).dot(self.e_W_mu.T)
        dKL_e_W0_2 = np.matmul(np.expand_dims(y, axis= -1), np.expand_dims(dKL_e_b0_2, axis= 1))
        
        # Combine gradients for encoder from recon and KL
        grad_b_logvar = dKL_b_log + d_b_logvar_l + d_b_logvar_r
        grad_W_logvar = dKL_W_log + d_W_logvar_l + d_W_logvar_r
        grad_b_mu = dKL_b_m + d_b_mu_l + d_b_mu_r
        grad_W_mu = dKL_W_m + d_W_mu_l + d_W_mu_r
        grad_e_b0 = dKL_e_b0_1 + dKL_e_b0_2 + db0_e_l + db0_e_l_2 + db0_e_r + db0_e_r_2
        grad_e_W0 = dKL_e_W0_1 + dKL_e_W0_2 + dW0_e_l + dW0_e_l_2 + dW0_e_r + dW0_e_r_2
        
        
        grad_list = [grad_e_W0, grad_e_b0, grad_W_mu, grad_b_mu, grad_W_logvar, grad_b_logvar,
                     grad_d_W0, grad_d_b0, grad_d_W1, grad_d_b1]