Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def setup(self):
self.db = db.init_db(drop_all=True)
os.chdir(os.path.join("demos", "bartlett1932"))
def live_participants(node_id, get_all):
try:
exp = MafiaExperiment(db.session)
this_node = Node.query.filter_by(id=node_id).one()
if get_all == 1:
nodes = Node.query.filter_by(network_id=this_node.network_id,
property2='True').all()
else:
nodes = Node.query.filter_by(network_id=this_node.network_id,
property2='True',
type='mafioso').all()
participants = []
for node in nodes:
if node.property1 == this_node.property1:
participants.append(node.property1 + ' (you!)')
else:
participants.append(node.property1)
random.shuffle(participants)
import json
import os
from apscheduler.schedulers.blocking import BlockingScheduler
from boto.mturk.connection import MTurkConnection
import requests
import dallinger
from dallinger import db
from dallinger.models import Participant
from dallinger.heroku.messages import NullHITMessager
# Import the experiment.
experiment = dallinger.experiment.load()
session = db.session
scheduler = BlockingScheduler()
config = dallinger.config.get_config()
def run_check(config, mturk, participants, session, reference_time):
# get experiment duration in seconds
duration_seconds = config.get('duration') * 60.0 * 60.0
# for each participant, if they've been active for longer than the
# experiment duration + 5 minutes, we take action.
for p in participants:
time_active = (reference_time - p.creation_time).total_seconds()
if time_active > (duration_seconds + 120):
def worker_function(event_type, assignment_id, participant_id):
"""Process the notification."""
try:
db.logger.debug("rq: worker_function working on job id: %s",
get_current_job().id)
db.logger.debug('rq: Received Queue Length: %d (%s)', len(q),
', '.join(q.job_ids))
except AttributeError:
db.logger.debug('Debug worker_function called synchronously')
exp = Experiment(session)
key = "-----"
exp.log("Received an {} notification for assignment {}, participant {}"
.format(event_type, assignment_id, participant_id), key)
if assignment_id is not None:
# save the notification to the notification table
notif = models.Notification(
assignment_id=assignment_id,
event_type=event_type)
session.add(notif)
from .replay import ReplayBackend
from .worker_events import worker_function
from .utils import (
crossdomain,
nocache,
ValidatesBrowser,
error_page,
error_response,
success_response,
ExperimentError,
)
# Initialize the Dallinger database.
session = db.session
redis_conn = db.redis_conn
# Connect to the Redis queue for notifications.
q = Queue(connection=redis_conn)
WAITING_ROOM_CHANNEL = "quorum"
app = Flask("Experiment_Server")
@app.before_first_request
def _config():
config = get_config()
if not config.ready:
config.load()
return config
def phase(node_id, switches, was_daytime):
try:
exp = MafiaExperiment(db.session)
this_node = Node.query.filter_by(id=node_id).one()
net = Network.query.filter_by(id=this_node.network_id).one()
nodes = Node.query.filter_by(network_id=net.id).order_by(
'creation_time').all()
node = nodes[-1]
elapsed_time = timenow() - node.creation_time
daytime = (net.daytime == 'True')
day_round_duration = 150
night_round_duration = 30
break_duration = 3
daybreak_duration = day_round_duration + break_duration
nightbreak_duration = night_round_duration + break_duration
time = elapsed_time.total_seconds()
if switches % 2 == 0:
time = night_round_duration - (
elapsed_time.total_seconds() -
def ingest_to_model(file, model, engine=None):
"""Load data from a CSV file handle into storage for a
SQLAlchemy model class.
"""
if engine is None:
engine = db.engine
reader = csv.reader(file)
columns = tuple('"{}"'.format(n) for n in next(reader))
postgres_copy.copy_from(
file, model, engine, columns=columns, format="csv", HEADER=False
)
fix_autoincrement(model.__table__.name)
@db.scoped_session_decorator
def worker_complete():
"""Complete worker."""
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_complete(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(status="success")
def _get_queue(name="default"):
# Connect to Redis Queue
return Queue(name, connection=db.redis_conn)
def export(id, local=False, scrub_pii=False):
"""Export data from an experiment."""
print("Preparing to export the data...")
if local:
db_uri = db.db_url
else:
db_uri = HerokuApp(id).db_uri
# Create the data package if it doesn't already exist.
subdata_path = os.path.join("data", id, "data")
try:
os.makedirs(subdata_path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(subdata_path):
raise
# Copy in the data.
copy_db_to_csv(db_uri, subdata_path, scrub_pii=scrub_pii)
# Copy the experiment code into a code/ subdirectory.