Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_combine_param_redundant_fct_class_spec(self):
t1 = pnl.TransferMechanism(size=2)
t2 = pnl.TransferMechanism(size=2)
t3 = pnl.TransferMechanism(
size=2,
input_ports=pnl.InputPort(function=psyneulink.core.components.functions.combinationfunctions
.LinearCombination,
combine=pnl.PRODUCT))
c = pnl.Composition(pathways=[[t1, t3],[t2, t3]])
input_dict = {t1:[1,2],t2:[3,4]}
val = c.run(inputs=input_dict)
assert np.allclose(val, [[3, 8]])
def test_agent_rep_assignement_as_controller_and_replacement(self):
mech = pnl.ProcessingMechanism()
comp = pnl.Composition(name='comp',
pathways=[mech],
controller=pnl.OptimizationControlMechanism(agent_rep=None,
control_signals=(pnl.SLOPE, mech)))
assert comp.controller.composition == comp
assert any(pnl.SLOPE in p_name for p_name in comp.projections.names)
assert not any(pnl.INTERCEPT in p_name for p_name in comp.projections.names)
new_ocm = pnl.OptimizationControlMechanism(agent_rep=None, control_signals=(pnl.INTERCEPT, mech))
old_ocm = comp.controller
comp.add_controller(new_ocm)
assert comp.controller == new_ocm
assert old_ocm.composition == None
assert not any(pnl.SLOPE in p_name for p_name in comp.projections.names)
assert any(pnl.INTERCEPT in p_name for p_name in comp.projections.names)
function=pnl.Logistic(gain=1),
leak=.5,
competition=2,
noise=0,
time_step_size=.1,
termination_measure=pnl.TimeScale.TRIAL,
termination_threshold=3,
name='Task Activations [Act 1, Act 2]'
)
csiController = pnl.ControlMechanism(
name='Control Mechanism',
monitor_for_control=cueInterval,
control_signals=[(pnl.TERMINATION_THRESHOLD, activation)],
modulation=pnl.OVERRIDE
)
comp = pnl.Composition()
comp.add_linear_processing_pathway(pathway=[taskLayer, activation])
comp.add_node(cueInterval)
comp.add_node(csiController)
expected_dependencies = {
cueInterval: set(),
taskLayer: set(),
activation: set([csiController, taskLayer]),
csiController: set([cueInterval])
}
assert comp.scheduler.dependency_dict == expected_dependencies
def test_get_output_values_prop(self):
A = pnl.ProcessingMechanism()
c = pnl.Composition()
c.add_node(A)
result = c.run(inputs={A: [1]}, num_trials=2)
assert result == c.output_values == [np.array([1])]
def test_dot_notation():
c = pnl.Composition()
d = pnl.Composition()
t = pnl.TransferMechanism()
c.add_node(t)
d.add_node(t)
t.execute(1)
assert t.value == 1
c.run({t: 5})
assert t.value == 5
d.run({t: 10})
assert t.value == 10
c.run({t: 20}, context='custom execution id')
assert t.value == 20
# context None
assert t.parameters.value.get() == 1
assert t.parameters.value.get(c) == 5
output_comp = pnl.TransferMechanism(name='output_comp',
default_variable=np.zeros(1),
function=pnl.Logistic())
in_to_hidden_comp = pnl.MappingProjection(name='in_to_hidden_comp',
matrix=in_to_hidden_matrix.copy(),
sender=input_comp,
receiver=hidden_comp)
hidden_to_out_comp = pnl.MappingProjection(name='hidden_to_out_comp',
matrix=hidden_to_out_matrix.copy(),
sender=hidden_comp,
receiver=output_comp)
xor_comp = pnl.Composition()
backprop_pathway = xor_comp.add_backpropagation_learning_pathway([input_comp,
in_to_hidden_comp,
hidden_comp,
hidden_to_out_comp,
output_comp],
learning_rate=10)
# Try to run without any targets (non-learning
xor_inputs = np.array( # the inputs we will provide to the model
[[0, 0],
[0, 1],
[1, 0],
[1, 1]])
xor_comp.run(inputs={input_comp:xor_inputs})
def show_target(comp):
i = comp.external_input_values
t = comp.pathways[0].target.input_ports[0].parameters.value.get(comp)
print('\nOLD WEIGHTS: \n')
print('- Input Weights: \n', Input_Weights.parameters.matrix.get(comp))
print('- Middle Weights: \n', Middle_Weights.parameters.matrix.get(comp))
print('- Output Weights: \n', Output_Weights.parameters.matrix.get(comp))
print('\nSTIMULI:\n\n- Input: {}\n- Target: {}\n'.format(i, t))
print('ACTIVITY FROM OLD WEIGHTS: \n')
print('- Middle 1: \n', Hidden_Layer_1.parameters.value.get(comp))
print('- Middle 2: \n', Hidden_Layer_2.parameters.value.get(comp))
print('- Output:\n', Output_Layer.parameters.value.get(comp))
comp = pnl.Composition(name='Multilayer-Learning',
pathways=[(z, pnl.BackPropagation)],
targets=[0, 0, 1],
learning_rate=2.0,
prefs={pnl.VERBOSE_PREF: False,
pnl.REPORT_OUTPUT_PREF: True}
)
# Log Middle_Weights of MappingProjection to Hidden_Layer_2
# Hidden_Layer_2.set_log_conditions('Middle Weights')
Middle_Weights.set_log_conditions('mod_matrix')
comp.reportOutputPref = True
# Shows graph will full information:
comp.show_graph(show_dimensions=pnl.ALL)
comp.show_graph(show_learning=pnl.ALL)
# comp.show_graph(show_learning=pnl.ALL, show_processes=True)
# Currently necessary to manually reset the execution count of the LCA for each trial
# Call this in run using call_after_trial
def reset_lca_count():
decisionMaker.execution_count=0
taskLayer.set_log_conditions([pnl.RESULT])
stimulusInfo.set_log_conditions([pnl.RESULT])
activation.set_log_conditions([pnl.RESULT, "mod_gain"])
nonAutomaticComponent.set_log_conditions([pnl.RESULT])
lcaCombination.set_log_conditions([pnl.RESULT])
decisionMaker.set_log_conditions([pnl.RESULT, pnl.VALUE])
# Composition Creation
stabilityFlexibility = pnl.Composition(controller_mode=pnl.BEFORE)
# Node Creation
stabilityFlexibility.add_node(taskLayer)
stabilityFlexibility.add_node(activation)
stabilityFlexibility.add_node(nonAutomaticComponent)
stabilityFlexibility.add_node(stimulusInfo)
stabilityFlexibility.add_node(lcaCombination)
#stabilityFlexibility.add_node(decisionMaker, required_roles=pnl.NodeRole.OUTPUT)
# Projection Creation
stabilityFlexibility.add_projection(sender=taskLayer, receiver=activation)
stabilityFlexibility.add_projection(activation_to_nonAutomaticComponent)
stabilityFlexibility.add_projection(sender=stimulusInfo, receiver=nonAutomaticComponent)
stabilityFlexibility.add_projection(stimulusInfo_to_lcaCombination)
stabilityFlexibility.add_projection(sender=nonAutomaticComponent, receiver=lcaCombination)
#stabilityFlexibility.add_projection(sender=lcaCombination, receiver=decisionMaker)
decisionMaker = pnl.DDM(function=pnl.DriftDiffusionAnalytical(drift_rate = DRIFT,
starting_point = STARTING_POINT,
threshold = THRESHOLD,
noise = NOISE,
t0 = T0),
output_states = [pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD],
name='DDM')
decisionMaker.set_log_conditions([pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD,
pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME])
########### Composition
stabilityFlexibility = pnl.Composition()
### NODE CREATION
stabilityFlexibility.add_node(inputLayer)
stabilityFlexibility.add_node(activation)
stabilityFlexibility.add_node(congruenceWeighting)
stabilityFlexibility.add_node(controlledElement)
stabilityFlexibility.add_node(stimulusInfo)
stabilityFlexibility.add_node(ddmCombination)
stabilityFlexibility.add_node(decisionMaker)
stabilityFlexibility.add_projection(sender = inputLayer, receiver = activation)
stabilityFlexibility.add_projection(sender = activation, receiver = controlledElement)
stabilityFlexibility.add_projection(sender = stimulusInfo, receiver = congruenceWeighting)
stabilityFlexibility.add_projection(sender = stimulusInfo, receiver = controlledElement)
pnl.RESPONSE_TIME,
pnl.PROBABILITY_UPPER_THRESHOLD,
{
pnl.NAME: 'OFFSET RT',
pnl.VARIABLE: (pnl.OWNER_VALUE, 2),
pnl.FUNCTION: pnl.Linear(0, slope=1.0, intercept=1)
}
],) #drift_rate=(1.0),threshold=(0.2645),noise=(0.5),starting_point=(0), t0=0.15
Decision.set_log_conditions('InputPort-0')#, log_condition=pnl.PROCESSING)
# Outcome Mechanisms:
Reward = pnl.TransferMechanism(name='Reward')
# Composition
Umemoto_comp = pnl.Composition(name="Umemoto_System")
# ADD pathways
TargetControl_pathway = [Target_Stim, Target_Rep, Decision]
Umemoto_comp.add_linear_processing_pathway(TargetControl_pathway)
FlankerControl_pathway = [Distractor_Stim, Distractor_Rep, Decision]
Umemoto_comp.add_linear_processing_pathway(FlankerControl_pathway)
TargetAutomatic_pathway = [Target_Stim, Automatic_Component, Decision]
Umemoto_comp.add_linear_processing_pathway(TargetAutomatic_pathway)
FlankerAutomatic_pathway = [Distractor_Stim, Automatic_Component, Decision]
Umemoto_comp.add_linear_processing_pathway(FlankerAutomatic_pathway)
Reward_pathway = [Reward]
Umemoto_comp.add_linear_processing_pathway(Reward_pathway)