Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
logger.info("###################################RUNNING EXPERIMENT NUM %s#########################", str(experiment_number))
logger.info("Program Arguments:")
args_dict = vars(args)
for key, value in args_dict.iteritems() :
logger.info("%s=%s" % (str(key), str(value)))
test_suite = Tests(logger, args)
target_test, Y_pred, cost_list, cost_test_list, learning_rates, rmse = test_suite.run_tests()
Y_pred_copy = np.copy(Y_pred)
accuracy_score_Y_pred = np.rint(Y_pred_copy).astype(int)
if args.test_type != 'f':
logger.info('###################################Accuracy Results###############################')
logger.info('Accuracy: ' + str(accuracy_score(target_test, accuracy_score_Y_pred)))
logger.info('\n' + str(classification_report(target_test, accuracy_score_Y_pred)))
else:
logger.info('###################################Accuracy Results###############################')
target_test_1d = target_test.ravel()
Y_pred_1d = Y_pred.ravel()
distance = 0
for i in range(len(target_test_1d)):
distance += abs(Y_pred_1d[i] - target_test_1d[i])
avg_distance = distance / len(target_test_1d)
logger.info("Accuracy Score: %s" % (str(avg_distance)))
logger.info("NOTE: Accuracy Score is avg. distance between expected and predicted y-values")
logger.info("NOTE: Computed using the following code:")
logger.info("for i in range(len(target_test_1d)):")
logger.info("\tdistance += abs(Y_pred_1d[i] - target_test_1d[i])")
def _report_classifier(clf, expected: np.ndarray, predicted: np.ndarray):
print("Detailed classification report:")
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(expected, predicted)))
cm = metrics.confusion_matrix(expected, predicted)
cm = cm / cm.sum(axis=1)[:, None] * 100
#np.set_printoptions(formatter={'float': '{: 2.2f}'.format})
print(f"Confusion matrix:\n {cm}")
f1_score = metrics.f1_score(expected, predicted, average='weighted')
precision = metrics.precision_score(expected, predicted, average='weighted')
recall = metrics.recall_score(expected, predicted, average='weighted')
accuracy = metrics.accuracy_score(expected, predicted)
print(f"f1_score: {f1_score:{2}.{4}}")
print(f"precision: {precision:{2}.{4}}")
print(f"recall: {recall:{2}.{4}}")
print(f"accuracy: {accuracy:{2}.{4}}")
_clf = SKSVM()
else:
_clf = SKLinearSVM()
rs = main(_clf)
acc_records.append(rs[0])
y_records += rs[1]
bar.update()
acc_records = np.array(acc_records) * 100
plt.figure()
plt.boxplot(acc_records, vert=False, showmeans=True)
plt.show()
from Util.DataToolkit import DataToolkit
idx = np.argmax(acc_records) # type: int
print(metrics.classification_report(y_records[idx][0], y_records[idx][1], target_names=np.load(os.path.join(
"_Data", "LABEL_DIC.npy"
))))
toolkit = DataToolkit(acc_records[np.argmax(np.average(acc_records, axis=1))])
print("Acc Mean : {:8.6}".format(toolkit.mean))
print("Acc Variance : {:8.6}".format(toolkit.variance))
print("Done")
def SVM(data, target):
svc = svm.SVC(kernel='linear')
svc.fit(data, target)
expected = target
predicted = svc.predict(data)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
model_CNN.summary()
model_CNN.fit(X_train_Glove, y_train,
validation_data=(X_test_Glove, y_test),
epochs=1000,
batch_size=128,
verbose=2)
predicted = model_CNN.predict(X_test_Glove)
predicted = np.argmax(predicted, axis=1)
print(metrics.classification_report(y_test, predicted))
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlim(0, epochs)
plt.xticks(np.arange(0, epochs+1, 5))
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
report = classification_report(y_true, y_pred)
print(report)
meanIoU = jaccard_similarity_score(y_true, y_pred)
# Compute mean IoU
#meanAcc, labelIou, meanIoUfromcf = computeMeanIou(cfMatrix)
print('----- Mean IoU ----- ')
print('------ %s -----------'%(str(meanIoU)))
#print('---- Manual mean Iou from CF ------')
#print('------ %s -----------'%(str(meanIoUfromcf)))
#print('------ Pixel Accuracy ----')
#print('---------- {} -------------'.format(meanAcc))
# Remove the last label
if cfMatrix:
cfMatrix = confusion_matrix(y_true, y_pred)
y_pred = []
y_test_for_loss = Variable(torch.LongTensor(y_test))
for ind, dialog in tqdm(enumerate(X_test)):
out = forward_pass(model, dialog)
top_n, top_i = out.data.topk(1)
y_pred.append(top_i[0][0])
loss = loss_function(out, y_test_for_loss[ind])
avg_loss += loss.data[0]
avg_loss = avg_loss / len(X_test)
print("Test loss: {}".format(avg_loss))
f1 = f1_score(y_test, y_pred, average=None)[1]
print("Test F1 label X: {}".format(f1))
print(classification_report(y_test, y_pred))
if f1 >= prev_best_f1 and with_save:
print('SAVED')
prev_best_f1 = f1
model.save()
return prev_best_f1
if kcore_par=="A1" or kcore_par=="A2" or kcore_par=="B1" or kcore_par=="B2" or kcore_par=="A0":
text_file = open(path+"output_tw_"+idf_par+"_centr_"+centrality_par+"_centrcol_"+centrality_col_par+"_sliding_"+str(sliding_window)+"_kcore_"+kcore_par+"_"+str(kcore_par_int)+"_UPDATED.txt", "w")
else:
text_file = open(path+"output_tw_"+idf_par+"_centr_"+centrality_par+"_centrcol_"+centrality_col_par+"_sliding_"+str(sliding_window)+"_"+kcore_par+"_UPDATED.txt", "w")
# training score
score = metrics.accuracy_score(classes_in_integers, pred_train)
#score = metrics.f1_score(y_test, pred_test, pos_label=list(set(y_test)))
acc = "Accuracy in training set:"+str(score)
print acc
mac = "Macro:"+str(metrics.precision_recall_fscore_support(classes_in_integers, pred_train, average='macro'))
print mac
mic = "Micro:"+str(metrics.precision_recall_fscore_support(classes_in_integers, pred_train, average='micro'))
print mic
met = metrics.classification_report(classes_in_integers, pred_train, target_names=classLabels, digits=4)
print met
text_file.write("LinearSVC_tw_"+idf_par+"_"+centrality_par+"_sliding_"+str(sliding_window)+"\n\n")
text_file.write(acc+"\n"+mac+"\n"+mic+"\n"+"\n"+met)
end = time.time()
elapsed = end - start
print "Total time:"+str(elapsed)
## Testing set
test = pd.read_csv("data/r8-test-stemmed.txt", sep="\t", header=None, names=cols)
print test.shape
# Get the number of documents based on the dataframe column size
num_test_documents = test.shape[0]
def task_metric(eval_dict, label_dict):
label_id = eval_dict["label_ids"]
pred_label = eval_dict["pred_label"]
label_dict_id = sorted(list(label_dict["id2label"].keys()))
print(len(label_id), len(pred_label), len(set(label_id)))
accuracy = accuracy_score(label_id, pred_label)
print("==accuracy==", accuracy)
if len(label_dict["id2label"]) < 10:
result = classification_report(label_id, pred_label,
target_names=[label_dict["id2label"][key] for key in label_dict_id],
digits=4)
print(result, task_index)
eval_total_dict["classification_report"] = result
print("==classification report==")
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
Examples
--------
>>> m = model.LogisticRegression()
>>> m.classification_report()
"""
classification_report = sklearn.metrics.classification_report(
self.y_test, self.y_pred, target_names=self.classes, digits=2
)
if self.report:
self.report.report_classification_report(classification_report)
print(classification_report)