How to use the pyglm.utils.utils.logistic function in pyglm

To help you get started, we’ve selected a few pyglm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github slinderman / pyglm / test / debug_collapsed_resample_with_temp.py View on Github external
def resample_x(z):
    # Resample x from its (scaled) Bernoulli lkhd
    p = logistic(z / T)
    x_smpl = np.random.rand() < p
    return x_smpl
github slinderman / pyglm / test / test_lds.py View on Github external
def update(model):
    model.resample_model()
    z_inf = model.states_list[0].stateseq
    C_inf = model.C
    psi_inf = z_inf.dot(C_inf.T)
    p_inf = logistic(psi_inf)
    return model.log_likelihood(), p_inf
github slinderman / pyglm / pyglm / internals / weights.py View on Github external
logdet_prior_cov = 0.5*np.log(prior_sigmasq).sum()
        logdet_post_cov  = 0.5*np.log(post_sigmasq).sum()
        logit_rho_post   = logit(rho) \
                           + 0.5 * (logdet_post_cov - logdet_prior_cov) \
                           + 0.5 * post_mu.dot(post_prec).dot(post_mu) \
                           - 0.5 * prior_mu.dot(np.linalg.solve(prior_cov, prior_mu))

        # The truncated normal prior introduces another term,
        # the ratio of the normalizers of the truncated distributions
        logit_rho_post += np.log(normal_cdf((self.ub-post_mu) / np.sqrt(post_sigmasq)) -
                                 normal_cdf((self.lb-post_mu) / np.sqrt(post_sigmasq))).sum()

        logit_rho_post -= np.log(normal_cdf((self.ub-prior_mu) / np.sqrt(prior_sigmasq)) -
                                 normal_cdf((self.lb-prior_mu) / np.sqrt(prior_sigmasq))).sum()

        rho_post = logistic(logit_rho_post)

        # Sample the binary indicator of an edge
        self.A[n_pre, n_post] = np.random.rand() < rho_post
github slinderman / pyglm / pyglm / simple_models.py View on Github external
def log_likelihood(self, augmented_data=None, n=None):
        """
        Compute the log likelihood of the augmented data
        :return:
        """
        if augmented_data is None:
            datas = self.data_list
        else:
            datas = [augmented_data]

        ll = 0
        for data in datas:
            S = data["S"][:,n] if n is not None else data["S"]
            Z = self.log_normalizer(S)
            X = self.compute_activation(data, n=n)
            P = logistic(X)
            P = np.clip(P, 1e-32, 1-1e-32)
            ll += (Z + S * np.log(P) + self.xi * np.log(1-P)).sum()

        return ll
github slinderman / pyglm / pyglm / internals / observations.py View on Github external
def rvs(self, Psi):
        p = logistic(Psi)
        return np.random.rand(*p.shape) < p
github slinderman / pyglm / pyglm / internals / observations.py View on Github external
def _resample_xi_discrete(self, augmented_data_list, xi_max=20):
        from pybasicbayes.util.stats import sample_discrete_from_log
        from pyglm.internals.negbin import nb_likelihood_xi

        # Resample xi with uniform prior over discrete set
        # p(\xi | \psi, s) \propto p(\xi) * p(s | \xi, \psi)
        lp_xis = np.zeros((self.N, xi_max))
        for d in augmented_data_list:
            psi = self.activation.compute_psi(d)
            for n in xrange(self.N):
                Sn   = d["S"][:,n].copy()
                psin = psi[:,n].copy()
                pn   = logistic(psin)
                pn   = np.clip(pn, 1e-32, 1-1e-32)

                xis = np.arange(1, xi_max+1).astype(np.float)
                lp_xi = np.zeros(xi_max)
                nb_likelihood_xi(Sn, pn, xis, lp_xi)
                lp_xis[n] += lp_xi

                # lp_xi2 = (gammaln(Sn[:,None]+xis[None,:]) - gammaln(xis[None,:])).sum(0)
                # lp_xi2 += (xis[None,:] * np.log(1-pn)[:,None]).sum(0)
                #
                # assert np.allclose(lp_xi, lp_xi2)

        for n in xrange(self.N):
            self.xi[0,n] = xis[sample_discrete_from_log(lp_xis[n])]
github slinderman / pyglm / pyglmdos / internals / observations.py View on Github external
def get_vlb(self, augmented_data):
        # 1. E[ \ln p(s | \psi) ]
        # Compute this with Monte Carlo integration over \psi
        Psis = self.activation.mf_sample_activation(augmented_data, N_samples=10)
        ps = logistic(Psis)
        E_lnp = np.log(ps).mean(axis=0)
        E_ln_notp = np.log(1-ps).mean(axis=0)

        vlb = self.expected_log_likelihood(augmented_data,
                                           (E_lnp, E_ln_notp)).sum()
        return vlb