How to use the pyglm.utils.theano_func_wrapper.seval function in pyglm

To help you get started, we’ve selected a few pyglm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
def _glm_logp(self, x_vec, x_all, I_stim, I_net):
        """
        Compute the log probability (or gradients and Hessians thereof)
        of the given GLM variables. We also need the rest of the population variables,
        i.e. those that are not being sampled currently, in order to evaluate the log
        probability.
        """
        # Extract the glm parameters
        x_bias = unpackdict(x_vec, self.glm_shapes)
        set_vars(self.bias_syms, x_all['glm']['bias'], x_bias)
        lp = seval(self.bias_model.log_p,
                   self.syms,
                   x_all)

        # Compute the log likelihood for each data sequence
        for data in self.population.data_sequences:
            self.population.set_data(data)
            lp += seval(self.glm.ll,
                        {'I_stim' : self.glm.bkgd_model.I_stim,
                         'I_net' : self.glm.I_net,
                         'bias' : self.bias_model.bias,
                         'n' : self.glm.n
                        },
                        {'I_stim' : I_stim,
                         'I_net' : I_net,
                         'bias' : x_vec,
                         'n' : x_all['glm']['n']
github slinderman / theano_pyglm / pyglm / inference / kayak_gibbs.py View on Github external
def _lp_L(self, latent_type, Y, x, n):
        # Set Yin state dict x
        xn = self.population.extract_vars(x, n)
        set_vars('Y', xn['latent'][latent_type.name], Y.ravel())
        lp = seval(latent_type.log_p, self.syms, xn)
        lp += seval(self.net_log_lkhd, self.syms, xn)

        # Compute the log likelihood for each data sequence
        for data in self.population.data_sequences:
            self.population.set_data(data)
            lp += seval(self.glm_log_lkhd, self.syms, xn)

        return lp
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
def _precompute_currents(self, x, n_post):
        """ Precompute currents for sampling A and W
        """
        nvars = self.population.extract_vars(x, n_post)

        I_bias = seval(self.glm.bias_model.I_bias,
                       self.syms,
                       nvars)

        I_stim = seval(self.glm.bkgd_model.I_stim,
                       self.syms,
                       nvars)

        I_imp = seval(self.glm.imp_model.I_imp,
                      self.syms,
                      nvars)

        return I_bias, I_stim, I_imp
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
def _precompute_vars(self, x, n):
        """ Precompute currents for sampling A and W
        """
        nvars = self.population.extract_vars(x, n)

        I_bias = seval(self.glm.bias_model.I_bias,
                       self.syms,
                       nvars)

        I_stim_xt = seval(self.glm.bkgd_model.I_stim_xt,
                       self.syms,
                       nvars)

        I_net = seval(self.glm.I_net,
                       self.syms,
                       nvars)

        return I_bias, I_stim_xt, I_net
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
'n' : self.glm.n,
            }
            s.update(self.syms['latent'])

            xv = \
            {
                'I_net' : I_nets[n],
                'I_bias' : I_biases[n],
                'n' : n,
            }
            xv.update(xn['latent'])

            # Compute the GLM log likelihood for each data sequence
            for data in self.population.data_sequences:
                self.population.set_data(data)
                g_lp += seval(self.g_log_lkhd_wrt_wt, s, xv)

        return g_lp
github slinderman / theano_pyglm / pyglm / inference / kayak_gibbs.py View on Github external
def _precompute_vars(self, x, n):
        """ Precompute currents for sampling A and W
        """
        nvars = self.population.extract_vars(x, n)

        I_bias = seval(self.glm.bias_model.I_bias,
                       self.syms,
                       nvars)

        I_stim_xt = seval(self.glm.bkgd_model.I_stim_xt,
                       self.syms,
                       nvars)

        I_net = seval(self.glm.I_net,
                       self.syms,
                       nvars)

        return I_bias, I_stim_xt, I_net
github slinderman / theano_pyglm / pyglm / population.py View on Github external
def compute_ll(self, vars):
        """ Compute the log likelihood under a given set of variables
        """
        ll = 0.0

        # Get set of symbolic variables
        syms = self.get_variables()

        # Add the likelihood from each GLM
        for n in range(self.N):
            nvars = self.extract_vars(vars, n)
            ll += seval(self.glm.ll,
                        syms,
                        nvars)

        return ll
github slinderman / theano_pyglm / pyglm / population.py View on Github external
# Initialize the background rate
        X = np.zeros((nT,N))
        for n in np.arange(N):
            nvars = self.extract_vars(vars, n)
            X[:,n] = seval(self.glm.bias_model.I_bias,
                           syms,
                           nvars)

        # Add stimulus induced currents if given
        temp_data = {'S' : np.zeros((nT, N)),
                     'stim' : stim,
                     'dt_stim': dt_stim}
        self.add_data(temp_data)
        for n in np.arange(N):
            nvars = self.extract_vars(vars, n)
            X[:,n] += seval(self.glm.bkgd_model.I_stim,
                            syms,
                            nvars)
        print "Max background rate: %s" % str(self.glm.nlin_model.f_nlin(np.amax(X)))

        # Remove the temp data from the population data sequences
        self.data_sequences.pop()

        # Get the impulse response functions
        imps = []
        for n_post in np.arange(N):
            nvars = self.extract_vars(vars, n_post)
            imps.append(seval(self.glm.imp_model.impulse,
                                  syms,
                                  nvars))
        imps = np.transpose(np.array(imps), axes=[1,0,2])
        T_imp = imps.shape[2]
github slinderman / theano_pyglm / pyglm / inference / gibbs.py View on Github external
"""
        nvars = self.population.extract_vars(x, n_post)

        I_bias = seval(self.glm.bias_model.I_bias,
                       self.syms,
                       nvars)

        I_stim = seval(self.glm.bkgd_model.I_stim,
                       self.syms,
                       nvars)

        I_imp = seval(self.glm.imp_model.I_imp,
                      self.syms,
                      nvars)

        p_A = seval(self.network.graph.pA,
                    self.syms['net'],
                    x['net'])

        return I_bias, I_stim, I_imp, p_A
github slinderman / theano_pyglm / pyglm / population.py View on Github external
self.add_data(temp_data)
        for n in np.arange(N):
            nvars = self.extract_vars(vars, n)
            X[:,n] += seval(self.glm.bkgd_model.I_stim,
                            syms,
                            nvars)
        print "Max background rate: %s" % str(self.glm.nlin_model.f_nlin(np.amax(X)))

        # Remove the temp data from the population data sequences
        self.data_sequences.pop()

        # Get the impulse response functions
        imps = []
        for n_post in np.arange(N):
            nvars = self.extract_vars(vars, n_post)
            imps.append(seval(self.glm.imp_model.impulse,
                                  syms,
                                  nvars))
        imps = np.transpose(np.array(imps), axes=[1,0,2])
        T_imp = imps.shape[2]

        # Debug: compute effective weights
        # tt_imp = dt*np.arange(T_imp)
        # Weff = np.trapz(imps, tt_imp, axis=2)
        # print "Effective impulse weights: "
        # print Weff

        # Iterate over each time step and generate spikes
        S = np.zeros((nT,N))
        acc = np.zeros(N)
        thr = -np.log(np.random.rand(N))