Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print('----------EVAL WITHOUT FILTER-----------------')
print('----------Subj and obj corrupted separately-----------------')
mr_sep = mr_score(ranks_sep)
print('MAR:', mr_sep)
print('Mrr:', mrr_score(ranks_sep))
print('hits10:', hits_at_n_score(ranks_sep, 10))
print('hits3:', hits_at_n_score(ranks_sep, 3))
print('hits1:', hits_at_n_score(ranks_sep, 1))
from ampligraph.evaluation import evaluate_performance
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='s+o',
use_default_protocol=True)
print('----------corrupted with default protocol-----------------')
mr_joint = mr_score(ranks)
mrr_joint = mrr_score(ranks)
print('MAR:', mr_joint)
print('Mrr:', mrr_score(ranks))
print('hits10:', hits_at_n_score(ranks, 10))
print('hits3:', hits_at_n_score(ranks, 3))
print('hits1:', hits_at_n_score(ranks, 1))
np.testing.assert_equal(mr_sep, mr_joint)
assert(mrr_joint is not np.Inf)
from ampligraph.evaluation import evaluate_performance
ranks_sep = []
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='o',
use_default_protocol=False)
ranks_sep.extend(ranks)
from ampligraph.evaluation import evaluate_performance
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='s',
use_default_protocol=False)
ranks_sep.extend(ranks)
print('----------EVAL WITHOUT FILTER-----------------')
print('----------Subj and obj corrupted separately-----------------')
mr_sep = mr_score(ranks_sep)
print('MAR:', mr_sep)
print('Mrr:', mrr_score(ranks_sep))
print('hits10:', hits_at_n_score(ranks_sep, 10))
print('hits3:', hits_at_n_score(ranks_sep, 3))
print('hits1:', hits_at_n_score(ranks_sep, 1))
from ampligraph.evaluation import evaluate_performance
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='s+o',
use_default_protocol=True)
print('----------corrupted with default protocol-----------------')
mr_joint = mr_score(ranks)
mrr_joint = mrr_score(ranks)
print('MAR:', mr_joint)
print('Mrr:', mrr_score(ranks))
print('----------EVAL WITH FILTER-----------------')
print('----------Subj and obj corrupted separately-----------------')
mr_sep = mr_score(ranks_sep)
print('MAR:', mr_sep)
print('Mrr:', mrr_score(ranks_sep))
print('hits10:', hits_at_n_score(ranks_sep, 10))
print('hits3:', hits_at_n_score(ranks_sep, 3))
print('hits1:', hits_at_n_score(ranks_sep, 1))
from ampligraph.evaluation import evaluate_performance
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='s+o',
use_default_protocol=True)
print('----------corrupted with default protocol-----------------')
mr_joint = mr_score(ranks)
mrr_joint = mrr_score(ranks)
print('MAR:', mr_joint)
print('Mrr:', mrr_joint)
print('hits10:', hits_at_n_score(ranks, 10))
print('hits3:', hits_at_n_score(ranks, 3))
print('hits1:', hits_at_n_score(ranks, 1))
np.testing.assert_equal(mr_sep, mr_joint)
assert(mrr_joint is not np.Inf)
from ampligraph.evaluation import evaluate_performance
ranks_sep = []
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='o',
use_default_protocol=False)
ranks_sep.extend(ranks)
from ampligraph.evaluation import evaluate_performance
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='s',
use_default_protocol=False)
ranks_sep.extend(ranks)
print('----------EVAL WITH FILTER-----------------')
print('----------Subj and obj corrupted separately-----------------')
mr_sep = mr_score(ranks_sep)
print('MAR:', mr_sep)
print('Mrr:', mrr_score(ranks_sep))
print('hits10:', hits_at_n_score(ranks_sep, 10))
print('hits3:', hits_at_n_score(ranks_sep, 3))
print('hits1:', hits_at_n_score(ranks_sep, 1))
from ampligraph.evaluation import evaluate_performance
from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
ranks = evaluate_performance(wn18['test'][::100], model, X_filter, verbose=True, corrupt_side='s+o',
use_default_protocol=True)
print('----------corrupted with default protocol-----------------')
mr_joint = mr_score(ranks)
mrr_joint = mrr_score(ranks)
print('MAR:', mr_joint)
print('Mrr:', mrr_joint)
})
if not hasattr(model, 'early_stopping_epoch') or model.early_stopping_epoch is None:
early_stopping_epoch = np.nan
else:
early_stopping_epoch = model.early_stopping_epoch
# Run the evaluation procedure on the test set. Will create filtered rankings.
# To disable filtering: filter_triples=None
ranks = evaluate_performance(X['test'],
model,
filter,
verbose=False)
# compute and print metrics:
mr = mr_score(ranks)
mrr = mrr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
return {
"mr": mr,
"mrr": mrr,
"H@1": hits_1,
"H@3": hits_3,
"H@10": hits_10,
"hyperparams": hyperparams,
"time": time.time() - start_time,
"early_stopping_epoch": early_stopping_epoch
}
def evaluation(ranks):
mrr = mrr_score(ranks)
mr = mr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
return mrr, mr, hits_1, hits_3, hits_10