Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# to tile the input properly (obsolete -- for high dim)
seedR4 = 4 # for the nengonetexpect layer to generate reference signal
seedRin = 2
np.random.seed([seedRin])# this seed generates the inpfn below (and non-nengo anything random)
tau = 0.02 # second # as for the rate network
#tau = 0.1 # second
# original is 0.02, but 0.1 gives longer response
tau_AMPA = 1e-3 # second # fast E to I connections
spikingNeurons = False # whether to use Ensemble (LIF neurons) or just Node
# the L2 has to be neurons to apply PES learning rule,
# rest can be Ensemble or Node
if spikingNeurons:
neuronType = nengo.neurons.LIF()
# use LIF neurons for all ensembles
else:
#neuronType = nengo.neurons.LIFRate()
# use LIFRate neurons for all ensembles
# only about 10% faster than LIF for same dt=0.001
# perhaps the plasticity calculations overpower
# gave overflow error in synapses.py for dt = 0.01
neuronType = None # use a Node() instead of Ensemble()
# OOPS! doesn't work as the PES rule only works with neurons
# in any case, non-linear proof only works with large number of neurons
###
### choose dynamics evolution matrix ###
###
#init_vec_idx = -1
init_vec_idx = 0 # first / largest response vector
# seed for the W file is in rate_evolve.py
# output is very sensitive to this seedR
# as I possibly don't have enough neurons
# to tile the input properly (obsolete -- for high dim)
seedR4 = 5 # for the nengonetexpect layer to generate reference signal
seedRin = 2
np.random.seed([seedRin])# this seed generates the inpfn below (and non-nengo anything random)
tau = 0.02 # second, synaptic tau
tau_AMPA = 1e-3 # second # fast E to I connections
spikingNeurons = False # whether to use Ensemble (LIF neurons) or just Node
# the L2 has to be neurons to apply PES learning rule,
# rest can be Ensemble or Node
if spikingNeurons:
neuronType = nengo.neurons.LIF()
# use LIF neurons for all ensembles
else:
#neuronType = nengo.neurons.LIFRate()
# use LIFRate neurons for all ensembles
# only about 10% faster than LIF for same dt=0.001
# perhaps the plasticity calculations overpower
# gave overflow error in synapses.py for dt = 0.01
neuronType = None # use a Node() instead of Ensemble()
# OOPS! doesn't work as the PES rule only works with neurons
# in any case, non-linear proof only works with large number of neurons
###
### choose dynamics evolution matrix ###
###
#init_vec_idx = -1
init_vec_idx = 0 # first / largest response vector
## build an ensemble exactly as in the test file simulation
## and find the bises of those neurons
## NOTE: Set the seeds and other params manually below,
## as they could be ambiguous from name of the file
import nengo
Nexc, N, reprRadius, nrngain = 3000, 2, 5, 2
seedR0, seedR2 = 2, 4
gain_bias_set = True
#biaslow, biashigh = 1 - nrngain, 1 + nrngain
biaslow, biashigh = -nrngain, nrngain
print('building model')
mainModel = nengo.Network(label="Single layer network", seed=seedR0)
with mainModel:
ratorOut = nengo.Ensemble( Nexc, dimensions=N, radius=reprRadius,
neuron_type=nengo.neurons.LIF(),
bias=nengo.dists.Uniform(biaslow,biashigh), gain=np.ones(Nexc)*nrngain,
#max_rates=nengo.dists.Uniform(200, 400),
noise=None, seed=seedR2, label='ratorOut' )
sim = nengo.Simulator(mainModel,dt)
biases = sim.data[ratorOut].bias
zerofiringbiases = biases[zeroidxs]
gains = sim.data[ratorOut].gain
zerofiringgains = gains[zeroidxs]
if gain_bias_set: histrange, biasrange = 5, 5
else: histrange, biasrange = 500, 100
fig = plt.figure(facecolor='w')
ax1 = plt.subplot(231)
vals,_,_ = ax1.hist(gains,bins=50,range=(0,histrange),color='k',histtype='step')
ax1.set_xlabel('all gains')
ax2 = plt.subplot(232)
nengo_to_app_graph_map):
""" This converts a nengo ensemble into a nengo operator used in the \
nengo operator graph.
:param nengo_ensemble: the nengo ensemble to be converted
:param random_number_generator: the random number generator of the \
simulator.
:param nengo_operator_graph: the application graph holding nengo \
operators.
:param nengo_to_app_graph_map: map between nengo object and nengo \
operators.
:param utilise_extra_core_for_output_types_probe: flag that allows \
the user to decide if probes should be on separate vertices or not.
:rtype: None
"""
if isinstance(nengo_ensemble.neuron_type, nengo.neurons.LIF):
operator = LIFApplicationVertex(
label="LIF neurons for ensemble {}".format(
nengo_ensemble.label),
rng=random_number_generator,
size_in=nengo_ensemble.size_in,
seed=helpful_functions.get_seed(nengo_ensemble),
utilise_extra_core_for_output_types_probe=(
utilise_extra_core_for_output_types_probe),
**LIFApplicationVertex.generate_parameters_from_ensemble(
nengo_ensemble, random_number_generator))
else:
raise NeuronTypeConstructorNotFoundException(
"could not find a constructor for neuron type {}. I have "
"constructors for the following neuron types LIF".format(
nengo_ensemble.neuron_type))
# update objects
def execute_trial(self, p):
if p.debug:
logging.basicConfig(level=logging.DEBUG)
model = self.model(p)
import nengo
if not isinstance(model, nengo.Network):
raise ValueError('model() must return a nengo.Network')
if p.neuron_type != 'default':
if isinstance(p.neuron_type, basestring):
neuron_type = eval(p.neuron_type)
else:
neuron_type = p.neuron_type
if not isinstance(neuron_type, nengo.neurons.NeuronType):
raise AttributeError('%s is not a NeuronType' % p.neuron_type)
for ens in model.all_ensembles:
ens.neuron_type = neuron_type
if p.gui:
locals_dict = getattr(self, 'locals', dict(model=model))
import nengo_gui
import webbrowser
if hasattr(nengo_gui, 'guibackend'):
host = 'localhost'
port = 8080
server_settings = nengo_gui.guibackend.GuiServerSettings((host, port))
model_context = nengo_gui.guibackend.ModelContext(
model=model,
else:
processIn = None
processOut = None
nodeIn = nengo.Node( size_in=N, output = lambda timeval,currval: inpfn(timeval) )
# with zero bias, at reprRadius, if you want 50Hz, gain=1.685, if 100Hz, gain=3.033, if 400Hz, 40.5
#nrngain = 1.5#2#3.033
# input layer from which feedforward weights to ratorOut are computed
ratorIn = nengo.Ensemble( Nexc, dimensions=N, radius=reprRadiusIn,
neuron_type=nengo.neurons.LIF(),
#bias=nengo.dists.Uniform(-nrngain,nrngain), gain=np.ones(Nexc)*nrngain,
max_rates=nengo.dists.Uniform(200, 400),
noise=processIn, seed=seedR1, label='ratorIn' )
nengo.Connection(nodeIn, ratorIn, synapse=None) # No filtering here as no filtering/delay in the plant/arm
# another layer with learning incorporated
ratorOut = nengo.Ensemble( Nexc, dimensions=N, radius=reprRadius,
neuron_type=nengo.neurons.LIF(),
#bias=nengo.dists.Uniform(-nrngain,nrngain), gain=np.ones(Nexc)*nrngain,
max_rates=nengo.dists.Uniform(200, 400),
noise=processOut, seed=seedR2, label='ratorOut' )
if trialClamp:
# clamp ratorIn and ratorOut at the end of each trial (Tperiod) for 100ms.
# Error clamped below during end of the trial for 100ms.
clampValsZeros = np.zeros(Nexc)
clampValsNegs = -100.*np.ones(Nexc)
endTrialClamp = nengo.Node(lambda t: clampValsZeros if (t%Tperiod)<(Tperiod-Tclamp) else clampValsNegs)
nengo.Connection(endTrialClamp,ratorIn.neurons,synapse=1e-3)
nengo.Connection(endTrialClamp,ratorOut.neurons,synapse=1e-3)
# fast synapse for fast-reacting clamp
if plastDecoders:
# don't use the same seeds across the connections,
if trialClamp:
# clamp ratorOut at the end of each trial (Tperiod) for 100ms.
# Error clamped below during end of the trial for 100ms.
clampValsZeros = np.zeros(Nexc)
clampValsNegs = -100.*np.ones(Nexc)
endTrialClamp = nengo.Node(lambda t: clampValsZeros if (t%Tperiod)<(Tperiod-Tclamp) else clampValsNegs)
nengo.Connection(endTrialClamp,ratorOut.neurons,synapse=1e-3)
# fast synapse for fast-reacting clamp
if inhibition and not plastDecoders: # excClipType='clip<0' only works with weights
Ninh = Nexc/4
IreprRadius = 1.0
inhibrator = nengo.Ensemble( Ninh, dimensions=1,\
intercepts=np.random.uniform(-0.1*IreprRadius,IreprRadius,size=Ninh),\
encoders=np.ones(shape=(Ninh,1))*IreprRadius,radius=IreprRadius,\
neuron_type=nengo.neurons.LIF(),seed=seedR2)
# only represents biasing function f(x) hence dimension = 1
# some neurons have negative intercept # i.e. baseline firing,
# encoders from f(x) to neurons are all 1 (a la Parisien et al 2008)
excClipType = 'clip<0'
phi_f = 1.0/(Nexc*400.0) / 1.5 # a positive constant to scale
# biasing function f(ExcActivityVec) between 0 and 1
# max firing of Nexc neurons is 400Hz, /1.5 adhoc,
# this ensures f between 0 and 1
EtoI = nengo.Connection(ratorOut.neurons, inhibrator,\
transform = phi_f*np.ones(shape=(1,Nexc)),\
synapse=tau_AMPA)
ItoE = nengo.Connection(inhibrator.neurons, ratorOut.neurons,
transform = np.zeros(shape=(Nexc,Ninh)),\
synapse=tau) # need neurons->neurons for InhSVG
ItoE.learning_rule_type = nengo.InhVSG(
learning_rate=2e-8,pre_tau=tau,