Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if(pattern_header.search(filename)):
whandler.write("\nbuildtime='{}'\nproject_release='{}'\n".format(buildtime,project_release))
#--------------------------
#--------- MAIN -----------
#--------------------------
print(len(sys.argv))
if(len(sys.argv)<2):
print ('Error: please specify version number !!!')
exit(0)
project_release=str(sys.argv[1])
print ("project_release="+project_release)
buildtime=datetime.now().isoformat(timespec='seconds')
pattern=re.compile(r'import\sproject_ressource')
pattern_import_01=re.compile(r'from\sPyQt5\simport')
pattern_import_02=re.compile(r'from\smyclickablelabel')
pattern_header=re.compile(r'header\.py')
print("generating ressource file: tmp/project_ressource.py")
subprocess.check_output(['pyrcc5','project_ressource.qrc','-o','tmp/project_ressource.py'])
for uifile in (
'about',
'dockerif_dialog',
'dockernet_dialog',
'if_dialog',
'iso_dialog',
'iterate_dialog',
'kvm_virt_disk_dialog',
def dashboard(request):
user = request.user
new_task_count = 0
for assignment in Assignment.objects.filter(code_review_end_date__gt=datetime.datetime.now()):
active_sub = Submission.objects.filter(name=user.username).filter(assignment=assignment)
#do not give tasks to students who got extensions
if len(active_sub) == 0 or active_sub[0].duedate < datetime.datetime.now():
new_task_count += assign_tasks(assignment, user)
active_tasks = user.get_profile().tasks \
.select_related('chunk__file__submission_assignment') \
.exclude(status='C') \
.annotate(comment_count=Count('chunk__comments', distinct=True),
reviewer_count=Count('chunk__tasks', distinct=True))
completed_tasks = user.get_profile().tasks \
.select_related('chunk__file__submission__assignment') \
.filter(status='C') \
.annotate(comment_count=Count('chunk__comments', distinct=True),
reviewer_count=Count('chunk__tasks', distinct=True))
#get all the submissions that the user submitted
submissions = Submission.objects.filter(name=user.username) \
def main():
parser = argparse.ArgumentParser()
parser.add_argument('name', nargs='*')
parser.add_argument('--eval', dest='eval_only', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--resume', nargs='*')
args = parser.parse_args()
if args.test:
args.eval_only = True
src = open('model.py').read()
if args.name:
name = ' '.join(args.name)
else:
from datetime import datetime
name = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
target_name = os.path.join('logs', '{}.pth'.format(name))
if not args.test:
# target_name won't be used in test mode
print('will save to {}'.format(target_name))
if args.resume:
logs = torch.load(' '.join(args.resume))
# hacky way to tell the VQA classes that they should use the vocab without passing more params around
data.preloaded_vocab = logs['vocab']
cudnn.benchmark = True
if not args.eval_only:
train_loader = data.get_loader(train=True)
if not args.test:
val_loader = data.get_loader(val=True)
else:
def stdout_thread(self, sock):
last = datetime.datetime.now()
try:
fd = sys.stdout.fileno()
while True:
if (datetime.datetime.now()-last
def save_profile(profile, form):
nickname=striptags(form.cleaned_data['nickname'].strip())
profile.nickname=nickname
profile.email= form.cleaned_data['email'].strip()
profile.slug=unicode(slugify(nickname))
profile.confirmed_at=datetime.now()
profile.link=form.cleaned_data['link'] or None
profile.put()
if profile.subscribes:
site=get_site()
chimp=site.chimp
taskqueue.add(url='/subscriptions/subscribe_email/',
params={'apikey': chimp.apikey,
'list_id': chimp.listid,
'email': form.cleaned_data['email']})
def __init__(self, **kwargs):
super(Incident, self).__init__(**kwargs)
if self.date is None:
self.date = datetime.now(pytz.timezone(
current_app.config['TIMEZONE']))
self.date = self.date.replace(tzinfo=None)
self.description = self.description.replace('\n', ' ').strip()
self.description = self.description.replace('\r', ' ').strip()
def fetch_job_result(name):
service = get_speech_service()
service_request = service.operations().get(name=name)
while True:
# Get the long running operation with response.
response = service_request.execute()
if 'done' in response and response['done']:
break
else:
# Give the server a few seconds to process.
print('%s, waiting for results from job, %s' % (datetime.now().replace(second=0, microsecond=0), name))
time.sleep(60)
print(json.dumps(response))
def _save_to_file(self, data: Sequence[Dict[str, Union[int, str]]]):
"""Save results to a file with name {command}_date_time.csv"""
date = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
filename = f"{self.cmd}_{date}.csv"
with open(filename, "w", newline="") as csvfile:
# Replace "\n" which is required when printing, with ','
data = list(
map(
lambda x: {k: str(v).replace("\n", ",") for k, v in x.items()},
data,
)
)
writer = csv.DictWriter(csvfile, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
def main():
"""Go Main"""
t0 = datetime.datetime.now()
station, count = check()
t1 = datetime.datetime.now()
delta = (t1 - t0).seconds + float((t1 - t0).microseconds) / 1000000.0
if delta < 5:
print(
("OK - %.3f %s %s |qtime=%.3f;5;10;15")
% (delta, station, count, delta)
)
return 0
elif delta < 10:
print(
("WARNING - %.3f %s %s |qtime=%.3f;5;10;15")
% (delta, station, count, delta)
)
return 1
print(
val_error_mbatch = sess.run(net.loss, feed_dict={net.input_ph: mbatch[0],
net.target_ph: mbatch[1], net.nframes_ph: mbatch[2], net.training_ph: False}) # validation error for each frame in mini-batch.
val_error += np.sum(val_error_mbatch)
frames += mbatch[1].shape[0] # total number of frames.
print("Validation error for Epoch %d: %3.2f%% complete. " %
(epoch_comp + 1, 100*(end_idx/args.val_s_len.shape[0])), end="\r")
start_idx += args.mbatch_size; end_idx += args.mbatch_size
if end_idx > args.val_s_len.shape[0]: end_idx = args.val_s_len.shape[0]
val_error /= frames # validation error.
epoch_comp += 1 # an epoch has been completed.
net.saver.save(sess, args.model_path + '/epoch', global_step=epoch_comp) # save model.
print("E%d: train err=%3.2f, val err=%3.2f. " %
(epoch_comp, train_err/mbatch_count, val_error))
with open("log/" + args.ver + ".csv", "a") as results:
results.write("%g, %g, %d, %s\n" % (val_error, train_err/mbatch_count,
epoch_comp, datetime.now().strftime('%Y-%m-%d/%H:%M:%S')))
train_err = 0; mbatch_count = 0; start_idx = 0; end_idx = args.mbatch_size
if epoch_comp >= args.max_epochs:
args.train = False
print('\nTraining complete. Validation error for epoch %d: %g. ' %
(epoch_comp, val_error))