Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_create_environment_get_observed(self, db_session):
net = models.Network()
db_session.add(net)
environment = nodes.Environment(network=net)
information.State(origin=environment, contents="foo")
agent = nodes.ReplicatorAgent(network=net)
environment.connect(direction="to", whom=agent)
environment.transmit(to_whom=agent)
agent.receive()
assert agent.infos()[0].contents == "foo"
def test_random_walk_from_source(self, db_session):
net = models.Network()
db_session.add(net)
db_session.commit()
agent1 = nodes.ReplicatorAgent(network=net)
agent2 = nodes.ReplicatorAgent(network=net)
agent3 = nodes.ReplicatorAgent(network=net)
agent1.connect(whom=agent2)
agent2.connect(whom=agent3)
source = nodes.RandomBinaryStringSource(network=net)
from operator import attrgetter
source.connect(whom=min(net.nodes(type=Agent), key=attrgetter("creation_time")))
source.create_information()
def test_create_environment(self, db_session):
"""Create an environment"""
net = models.Network()
db_session.add(net)
environment = nodes.Environment(network=net)
db_session.commit()
assert isinstance(environment.id, int)
assert environment.type == "environment"
assert environment.creation_time
assert environment.state() is None
information.State(origin=environment, contents="foo")
db_session.commit()
assert environment.state().contents == "foo"
def test_ingest_zip_recreates_transmissions(self, db_session, zip_path):
dallinger.data.ingest_zip(zip_path)
assert len(dallinger.models.Transmission.query.all()) == 4
def test_ingest_zip_recreates_nodes(self, db_session, zip_path):
dallinger.data.ingest_zip(zip_path)
assert len(dallinger.models.Node.query.all()) == 5
def test_ingest_zip_recreates_questions(self, db_session, zip_path):
dallinger.data.ingest_zip(zip_path)
model = dallinger.models.Question
p1_questions = model.query.filter_by(participant_id=1).all()
for q in p1_questions:
if q.response:
assert q.response == u"5"
def _worker_complete(participant_id):
participants = models.Participant.query.filter_by(id=participant_id).all()
if not participants:
raise KeyError()
participant = participants[0]
participant.end_time = datetime.now()
session.add(participant)
session.commit()
# Notify recruiter for possible qualification assignment, etc.
participant.recruiter.notify_completed(participant)
event_type = participant.recruiter.submitted_event()
if event_type is None:
return
db.logger.debug("rq: worker_function working on job id: %s",
get_current_job().id)
db.logger.debug('rq: Received Queue Length: %d (%s)', len(q),
', '.join(q.job_ids))
except AttributeError:
db.logger.debug('Debug worker_function called synchronously')
exp = Experiment(session)
key = "-----"
exp.log("Received an {} notification for assignment {}, participant {}"
.format(event_type, assignment_id, participant_id), key)
if assignment_id is not None:
# save the notification to the notification table
notif = models.Notification(
assignment_id=assignment_id,
event_type=event_type)
session.add(notif)
session.commit()
# try to identify the participant
participants = models.Participant.query\
.filter_by(assignment_id=assignment_id)\
.all()
# if there are one or more participants select the most recent
if participants:
participant = max(participants,
key=attrgetter('creation_time'))
# if there are none print an error
def summary():
"""Summarize the participants' status codes."""
state = {
"status": "success",
"summary": Experiment(session).log_summary(),
"completed": False,
}
unfilled_nets = models.Network.query.filter(
models.Network.full != true()
).with_entities(models.Network.id, models.Network.max_size).all()
working = models.Participant.query.filter_by(
status='working'
).with_entities(func.count(models.Participant.id)).scalar()
state['unfilled_networks'] = len(unfilled_nets)
nodes_remaining = 0
required_nodes = 0
if state['unfilled_networks'] == 0:
if working == 0:
state['completed'] = True
else:
for net in unfilled_nets:
node_count = models.Node.query.filter_by(
network_id=net.id
).with_entities(func.count(models.Node.id)).scalar()
net_size = net.max_size
required_nodes += net_size
if duplicate:
msg = """
AWS has reused assignment_id while existing participant is
working. Replacing older participant {}.
"""
app.logger.warning(msg.format(duplicate.id))
q.enqueue(worker_function, "AssignmentReassigned", None, duplicate.id)
# Count working or beyond participants.
nonfailed_count = (
models.Participant.query.filter(
(models.Participant.status == "working")
| (models.Participant.status == "overrecruited")
| (models.Participant.status == "submitted")
| (models.Participant.status == "approved")
).count()
+ 1
)
recruiter_name = request.args.get("recruiter", "undefined")
if not recruiter_name or recruiter_name == "undefined":
recruiter = recruiters.from_config(_config())
if recruiter:
recruiter_name = recruiter.nickname
# Create the new participant.
participant = models.Participant(
recruiter_id=recruiter_name,
worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,