How to use the pyglm.inference.log_sum_exp.log_sum_exp_sample function in PyGLM

To help you get started, we’ve selected a few PyGLM examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
print weighted_log_L
            raise Exception("log_G not finie")

        # Compute log Pr(A_nn=1) given prior and estimate of log lkhd after integrating out W
        log_pr_A = prior_lp_A + log_G
        # Compute log Pr(A_nn = 0 | {s,c}) = log Pr({s,c} | A_nn = 0) + log Pr(A_nn = 0)
        log_pr_noA = prior_lp_noA + \
                     self._glm_ll(n_pre, n_post, 0.0, x,
                                      I_bias, I_stim, I_imp, I_other)

        if np.isnan(log_pr_noA):
            log_pr_noA = -np.Inf

        # Sample A
        try:
            A[n_pre, n_post] = log_sum_exp_sample([log_pr_noA, log_pr_A])
            if np.allclose(p_A[n_pre, n_post], 1.0) and not A[n_pre, n_post]:
                print log_pr_noA
                print log_pr_A
                raise Exception("Sampled no self edge")
        except Exception as e:

            raise e
            # import pdb; pdb.set_trace()
        set_vars('A', x['net']['graph'], A)

        # Sample W from its posterior, i.e. log_L with denominator log_G
        # If A_nn = 0, we don't actually need to resample W since it has no effect
        if A[n_pre,n_post] == 1:
            # W[n_pre, n_post] = self._inverse_cdf_sample_w(mu_w, sigma_w, W_nns, log_L)
            W[n_pre, n_post] = self._adaptive_rejection_sample_w(n_pre, n_post, x, mu_w, sigma_w,
                                                                 W_nns, log_L, I_bias, I_stim, I_imp, I_other)
github slinderman / theano_pyglm / pyglm / inference / kayak_gibbs.py View on Github external
log_G = logsumexp(weighted_log_L)
        if not np.isfinite(log_G):
            print weighted_log_L
            raise Exception("log_G not finie")

        # Compute log Pr(A_nn=1) given prior and estimate of log lkhd after integrating out W
        log_pr_A = prior_lp_A + log_G
        # Compute log Pr(A_nn = 0 | {s,c}) = log Pr({s,c} | A_nn = 0) + log Pr(A_nn = 0)
        log_pr_noA = prior_lp_noA + self._glm_ll_noA(n_pre, n_post)

        if np.isnan(log_pr_noA):
            log_pr_noA = -np.Inf

        # Sample A
        try:
            A[n_pre, n_post] = log_sum_exp_sample([log_pr_noA, log_pr_A])
            if np.allclose(p_A[n_pre, n_post], 1.0) and not A[n_pre, n_post]:
                print "Sampled no self edge"
                print log_pr_noA
                print log_pr_A
                # raise Exception("Sampled no self edge")
        except Exception as e:

            raise e

        x['net']['graph']['A'] = A
        self.population.network.graph.A.value = A

        # Sample W from its posterior, i.e. log_L with denominator log_G
        # If A_nn = 0, we don't actually need to resample W since it has no effect
        if A[n_pre,n_post] == 1:
            W[n_pre, n_post] = self._adaptive_rejection_sample_w(n_pre, n_post, mu_w, sigma_w, W_nns, log_L)
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
N = A.shape[0]

            # Sample coupling filters from other neurons
            for n_pre in np.arange(N):
                # print "Sampling A[%d,%d]" % (n_pre,n_post)
                # WARNING Setting A is somewhat of a hack. It only works
                # because nvars copies x's pointer to A rather than making
                # a deep copy of the adjacency matrix.
                A[n_pre,n_post] = 0
                log_pr_noA = self._lp_A(A, x, n_post, I_bias, I_stim, I_imp)

                A[n_pre,n_post] = 1
                log_pr_A = self._lp_A(A, x, n_post, I_bias, I_stim, I_imp)

                # Sample A[n_pre,n_post]
                A[n_pre,n_post] = log_sum_exp_sample([log_pr_noA, log_pr_A])

                if not np.isfinite(log_pr_noA) or not np.isfinite(log_pr_A):
                    import pdb; pdb.set_trace()

                if n_pre == n_post and not A[n_pre, n_post]:
                    import pdb; pdb.set_trace()
github slinderman / theano_pyglm / pyglm / inference / kayak_gibbs.py View on Github external
# Precompute currents
            I_bias, I_stim_xt, I_net = self._precompute_vars(x, n)

            # Compute the probability of each neighboring location
            lnp_cache = {}
            curr_loc = L[n,:]
            curr_neighbors = self._get_neighbors(L[n,:])
            curr_lnps = []
            for ne in curr_neighbors:
                L[n,:] = np.array(ne)
                lnp_ne = self._lp_L(L, x, n, I_bias, I_stim_xt, I_net)
                lnp_cache[ne] = lnp_ne
                curr_lnps.append(lnp_ne)

            # Propose a neighbor according to its relative probability
            prop_loc = curr_neighbors[log_sum_exp_sample(curr_lnps)]

            # Compute acceptance probability
            prop_neighbors = self._get_neighbors(prop_loc)
            prop_lnps = []
            for ne in prop_neighbors:
                if ne in lnp_cache:
                    prop_lnps.append(lnp_cache[ne])
                else:
                    L[n,:] = np.array(ne)
                    lnp_ne = self._lp_L(L, x, n, I_bias, I_stim_xt, I_net)
                    lnp_cache[ne] = lnp_ne
                    prop_lnps.append(lnp_ne)

            # Acceptance probability is the ratio of normalizing constants
            lnp_accept = logsumexp(curr_lnps) - logsumexp(prop_lnps)
github slinderman / theano_pyglm / pyglm / inference / kayak_gibbs.py View on Github external
"""
        from pyglm.inference.log_sum_exp import log_sum_exp_sample
        for latent_type in self.latent_types:
            # Update the latent types
            R = latent_type.R
            Y = x['latent'][latent_type.name]['Y']
            print "Y: ", Y

            for n in range(self.N):
                print "Sampling latent type of neuron ", n
                lpr = np.zeros(R)
                for r in range(R):
                    Y[n] = r
                    lpr[r] = self._lp_L(latent_type, Y, x, n)

                Y[n] = log_sum_exp_sample(lpr)

            x['latent'][latent_type.name]['Y'] = Y

            # Update alpha with the conjugate dirichlet prior
            from pyglm.components.priors import Dirichlet
            if isinstance(latent_type.alpha_prior, Dirichlet):
                suffstats = latent_type.alpha_prior.alpha0.get_value()
                suffstats += np.bincount(Y, minlength=R)
                alpha = np.random.dirichlet(suffstats)
                x['latent'][latent_type.name]['alpha'] = alpha
            else:
                raise Warning('Cannot update alpha prior!')

        return x
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
L[n] = prior.min + log_sum_exp_sample(lnp)

            elif isinstance(prior, JointCategorical):
                d1 = prior.max0-prior.min0+1
                d2 = prior.max1-prior.min1+1
                lnp = np.zeros((d1,d2))
                for i,l1 in enumerate(range(prior.min0, prior.max0+1)):
                    for j,l2 in enumerate(range(prior.min1, prior.max1+1)):
                        L[n,0] = l1
                        L[n,1] = l2
                        lnp[i,j] = self._lp_L(L, x, n)

                # import pdb; pdb.set_trace()
                # Gibbs sample from the 2d distribution
                ij = log_sum_exp_sample(lnp.ravel(order='C'))
                i,j = np.unravel_index(ij, (d1,d2), order='C')
                L[n,0] = prior.min0 + i
                L[n,1] = prior.min1 + j

            else:
                raise Exception('Only supporting Categorical and JointCategorical location priors')
        # Update current L
        if not self._check_bounds(L):
            import pdb; pdb.set_trace()
        x['latent'][self.location.name]['L'] = L.ravel()


        return x
github slinderman / theano_pyglm / pyglm / inference / kayak_gibbs.py View on Github external
"""
        Sample each entry in L using Metropolis Hastings
        """
        from pyglm.components.priors import Categorical, JointCategorical
        prior = self.location.location_prior
        L = seval(self.location.Lmatrix, self.syms['latent'], x['latent'])
        # print "L: ", L
        for n in range(self.N):
            # Compute the probability of each possible location
            if isinstance(prior, Categorical):
                lnp = np.zeros(prior.max-prior.min + 1)
                for i,l in enumerate(range(prior.min, prior.max+1)):
                    L[n,0] = l
                    lnp[i] = self._lp_L(L, x, n)

                L[n] = prior.min + log_sum_exp_sample(lnp)

            elif isinstance(prior, JointCategorical):
                d1 = prior.max0-prior.min0+1
                d2 = prior.max1-prior.min1+1
                lnp = np.zeros((d1,d2))
                for i,l1 in enumerate(range(prior.min0, prior.max0+1)):
                    for j,l2 in enumerate(range(prior.min1, prior.max1+1)):
                        L[n,0] = l1
                        L[n,1] = l2
                        lnp[i,j] = self._lp_L(L, x, n)

                # import pdb; pdb.set_trace()
                # Gibbs sample from the 2d distribution
                ij = log_sum_exp_sample(lnp.ravel(order='C'))
                i,j = np.unravel_index(ij, (d1,d2), order='C')
                L[n,0] = prior.min0 + i
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
"""
        from pyglm.inference.log_sum_exp import log_sum_exp_sample
        for latent_type in self.latent_types:
            # Update the latent types
            R = latent_type.R
            Y = x['latent'][latent_type.name]['Y']
            print "Y: ", Y

            for n in range(self.N):
                print "Sampling latent type of neuron ", n
                lpr = np.zeros(R)
                for r in range(R):
                    Y[n] = r
                    lpr[r] = self._lp_L(latent_type, Y, x, n)

                Y[n] = log_sum_exp_sample(lpr)

            x['latent'][latent_type.name]['Y'] = Y

            # Update alpha with the conjugate dirichlet prior
            from pyglm.components.priors import Dirichlet
            if isinstance(latent_type.alpha_prior, Dirichlet):
                suffstats = latent_type.alpha_prior.alpha0.get_value()
                suffstats += np.bincount(Y, minlength=R)
                alpha = np.random.dirichlet(suffstats)
                x['latent'][latent_type.name]['alpha'] = alpha
            else:
                raise Warning('Cannot update alpha prior!')

        return x
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
# Precompute currents
            I_bias, I_stim_xt, I_net = self._precompute_vars(x, n)

            # Compute the probability of each neighboring location
            lnp_cache = {}
            curr_loc = L[n,:]
            curr_neighbors = self._get_neighbors(L[n,:])
            curr_lnps = []
            for ne in curr_neighbors:
                L[n,:] = np.array(ne)
                lnp_ne = self._lp_L(L, x, n, I_bias, I_stim_xt, I_net)
                lnp_cache[ne] = lnp_ne
                curr_lnps.append(lnp_ne)

            # Propose a neighbor according to its relative probability
            prop_loc = curr_neighbors[log_sum_exp_sample(curr_lnps)]

            # Compute acceptance probability
            prop_neighbors = self._get_neighbors(prop_loc)
            prop_lnps = []
            for ne in prop_neighbors:
                if ne in lnp_cache:
                    prop_lnps.append(lnp_cache[ne])
                else:
                    L[n,:] = np.array(ne)
                    lnp_ne = self._lp_L(L, x, n, I_bias, I_stim_xt, I_net)
                    lnp_cache[ne] = lnp_ne
                    prop_lnps.append(lnp_ne)

            # Acceptance probability is the ratio of normalizing constants
            lnp_accept = logsumexp(curr_lnps) - logsumexp(prop_lnps)