Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'criteria':'mrr', 'x_filter':filter,
'stop_interval': 2,
'burn_in':0,
'check_interval':100
})
# model.fit(np.concatenate((X['train'], X['valid'])))
# Run the evaluation procedure on the test set. Will create filtered rankings.
# To disable filtering: filter_triples=None
ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
verbose=True)
# compute and print metrics:
mr = mar_score(ranks)
mrr = mrr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
# created by the corruption procedure:
filter = np.concatenate((X['train'], X['valid'], X['test']))
print("Start fitting...no early stopping")
model.fit(np.concatenate((X['train'], X['valid'])))
# Run the evaluation procedure on the test set. Will create filtered rankings.
# To disable filtering: filter_triples=None
ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
verbose=True)
# compute and print metrics:
mr = mar_score(ranks)
mrr = mrr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
'burn_in':0,
'check_interval':100
})
# model.fit(np.concatenate((X['train'], X['valid'])))
# Run the evaluation procedure on the test set. Will create filtered rankings.
# To disable filtering: filter_triples=None
ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
verbose=True)
# compute and print metrics:
mr = mar_score(ranks)
mrr = mrr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
if not hasattr(model, 'early_stopping_epoch') or model.early_stopping_epoch is None:
early_stopping_epoch = np.nan
else:
early_stopping_epoch = model.early_stopping_epoch
# Run the evaluation procedure on the test set. Will create filtered rankings.
# To disable filtering: filter_triples=None
ranks = evaluate_performance(X['test'],
model,
filter,
verbose=False)
# compute and print metrics:
mr = mr_score(ranks)
mrr = mrr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
return {
"mr": mr,
"mrr": mrr,
"H@1": hits_1,
"H@3": hits_3,
"H@10": hits_10,
"hyperparams": hyperparams,
"time": time.time() - start_time,
"early_stopping_epoch": early_stopping_epoch
}
and epoch % self.early_stopping_params.get('check_interval',
DEFAULT_CHECK_INTERVAL_EARLY_STOPPING) == 0:
# compute and store test_loss
ranks = []
# Get each triple and compute the rank for that triple
for x_test_triple in range(self.eval_dataset_handle.get_size("valid")):
rank_triple = self.sess_train.run(self.rank)
ranks.append(rank_triple)
if self.early_stopping_criteria == 'hits10':
current_test_value = hits_at_n_score(ranks, 10)
elif self.early_stopping_criteria == 'hits3':
current_test_value = hits_at_n_score(ranks, 3)
elif self.early_stopping_criteria == 'hits1':
current_test_value = hits_at_n_score(ranks, 1)
elif self.early_stopping_criteria == 'mrr':
current_test_value = mrr_score(ranks)
if self.early_stopping_best_value is None: # First validation iteration
self.early_stopping_best_value = current_test_value
self.early_stopping_first_value = current_test_value
elif self.early_stopping_best_value >= current_test_value:
self.early_stopping_stop_counter += 1
if self.early_stopping_stop_counter == self.early_stopping_params.get(
'stop_interval', DEFAULT_STOP_INTERVAL_EARLY_STOPPING):
# If the best value for the criteria has not changed from
# initial value then
# save the model before early stopping
if self.early_stopping_best_value == self.early_stopping_first_value:
self._save_trained_params()
# Train the model on all possibile combinations of hyperparameters.
# Models are validated on the validation set.
# It returnes a model re-trained on training and validation sets.
print("start executing to find the best...")
best_model, best_params, best_mrr_train, \
ranks_test, mrr_test = select_best_model_ranking(model_class, X_dict,
param_grid,
filter_retrain=True,
eval_splits=100,
verbose=True)
mr_test = mar_score(ranks_test)
hits_1 = hits_at_n_score(ranks_test, n=1)
hits_3 = hits_at_n_score(ranks_test, n=3)
hits_10 = hits_at_n_score(ranks_test, n=10)
with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
fo.write("type(best_model).__name__: {0}\n".format(type(best_model).__name__))
fo.write("best_params: {0}\n".format(best_params))
fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr_test, mrr_test, hits_1, hits_3, hits_10))
def evaluation(ranks):
mrr = mrr_score(ranks)
mr = mr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
return mrr, mr, hits_1, hits_3, hits_10
early_stopping_epoch = np.nan
else:
early_stopping_epoch = model.early_stopping_epoch
# Run the evaluation procedure on the test set. Will create filtered rankings.
# To disable filtering: filter_triples=None
ranks = evaluate_performance(X['test'],
model,
filter,
verbose=False)
# compute and print metrics:
mr = mr_score(ranks)
mrr = mrr_score(ranks)
hits_1 = hits_at_n_score(ranks, n=1)
hits_3 = hits_at_n_score(ranks, n=3)
hits_10 = hits_at_n_score(ranks, n=10)
return {
"mr": mr,
"mrr": mrr,
"H@1": hits_1,
"H@3": hits_3,
"H@10": hits_10,
"hyperparams": hyperparams,
"time": time.time() - start_time,
"early_stopping_epoch": early_stopping_epoch
}
"""
if epoch >= self.early_stopping_params.get('burn_in',
DEFAULT_BURN_IN_EARLY_STOPPING) \
and epoch % self.early_stopping_params.get('check_interval',
DEFAULT_CHECK_INTERVAL_EARLY_STOPPING) == 0:
# compute and store test_loss
ranks = []
# Get each triple and compute the rank for that triple
for x_test_triple in range(self.eval_dataset_handle.get_size("valid")):
rank_triple = self.sess_train.run(self.rank)
ranks.append(rank_triple)
if self.early_stopping_criteria == 'hits10':
current_test_value = hits_at_n_score(ranks, 10)
elif self.early_stopping_criteria == 'hits3':
current_test_value = hits_at_n_score(ranks, 3)
elif self.early_stopping_criteria == 'hits1':
current_test_value = hits_at_n_score(ranks, 1)
elif self.early_stopping_criteria == 'mrr':
current_test_value = mrr_score(ranks)
if self.early_stopping_best_value is None: # First validation iteration
self.early_stopping_best_value = current_test_value
self.early_stopping_first_value = current_test_value
elif self.early_stopping_best_value >= current_test_value:
self.early_stopping_stop_counter += 1
if self.early_stopping_stop_counter == self.early_stopping_params.get(
'stop_interval', DEFAULT_STOP_INTERVAL_EARLY_STOPPING):
# If the best value for the criteria has not changed from