How to use the ampligraph.evaluation.hits_at_n_score function in ampligraph

To help you get started, we’ve selected a few ampligraph examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Accenture / AmpliGraph / experiments / single_exp_append_unseen.py View on Github external
'criteria':'mrr', 'x_filter':filter,
        'stop_interval': 2, 
        'burn_in':0, 
        'check_interval':100
    })
    # model.fit(np.concatenate((X['train'], X['valid'])))

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
                                verbose=True)

    # compute and print metrics:
    mr = mar_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
        fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
github Accenture / AmpliGraph / experiments / single_exp_0_es.py View on Github external
# created by the corruption procedure:
    filter = np.concatenate((X['train'], X['valid'], X['test']))
    
    print("Start fitting...no early stopping")

    model.fit(np.concatenate((X['train'], X['valid'])))

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
                                verbose=True)

    # compute and print metrics:
    mr = mar_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
        fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
github Accenture / AmpliGraph / experiments / single_exp_append_unseen.py View on Github external
'burn_in':0, 
        'check_interval':100
    })
    # model.fit(np.concatenate((X['train'], X['valid'])))

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
                                verbose=True)

    # compute and print metrics:
    mr = mar_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
        fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
github Accenture / AmpliGraph / experiments / predictive_performance.py View on Github external
if not hasattr(model, 'early_stopping_epoch') or model.early_stopping_epoch is None:
        early_stopping_epoch = np.nan
    else:
        early_stopping_epoch = model.early_stopping_epoch

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'],
                                 model,
                                 filter,
                                 verbose=False)

    # compute and print metrics:
    mr = mr_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    return {
        "mr": mr,
        "mrr": mrr,
        "H@1": hits_1,
        "H@3": hits_3,
        "H@10": hits_10,
        "hyperparams": hyperparams,
        "time": time.time() - start_time,
        "early_stopping_epoch": early_stopping_epoch
    }
github Accenture / AmpliGraph / ampligraph / latent_features / models.py View on Github external
and epoch % self.early_stopping_params.get('check_interval',
                                                           DEFAULT_CHECK_INTERVAL_EARLY_STOPPING) == 0:
            # compute and store test_loss
            ranks = []
            
            # Get each triple and compute the rank for that triple
            for x_test_triple in range(self.eval_dataset_handle.get_size("valid")):
                rank_triple = self.sess_train.run(self.rank)
                ranks.append(rank_triple)
                
            if self.early_stopping_criteria == 'hits10':
                current_test_value = hits_at_n_score(ranks, 10)
            elif self.early_stopping_criteria == 'hits3':
                current_test_value = hits_at_n_score(ranks, 3)
            elif self.early_stopping_criteria == 'hits1':
                current_test_value = hits_at_n_score(ranks, 1)
            elif self.early_stopping_criteria == 'mrr':
                current_test_value = mrr_score(ranks)

            if self.early_stopping_best_value is None:  # First validation iteration
                self.early_stopping_best_value = current_test_value
                self.early_stopping_first_value = current_test_value
            elif self.early_stopping_best_value >= current_test_value:
                self.early_stopping_stop_counter += 1
                if self.early_stopping_stop_counter == self.early_stopping_params.get(
                        'stop_interval', DEFAULT_STOP_INTERVAL_EARLY_STOPPING):

                    # If the best value for the criteria has not changed from
                    #  initial value then
                    # save the model before early stopping
                    if self.early_stopping_best_value == self.early_stopping_first_value:
                        self._save_trained_params()
github Accenture / AmpliGraph / experiments / grid_search_exp.py View on Github external
# Train the model on all possibile combinations of hyperparameters.
    # Models are validated on the validation set.
    # It returnes a model re-trained on training and validation sets.
    print("start executing to find the best...")
    best_model, best_params, best_mrr_train, \
    ranks_test, mrr_test = select_best_model_ranking(model_class, X_dict,
                                                      param_grid,
                                                      filter_retrain=True,
                                                      eval_splits=100,
                                                      verbose=True)

    mr_test = mar_score(ranks_test)
    hits_1 = hits_at_n_score(ranks_test, n=1)
    hits_3 = hits_at_n_score(ranks_test, n=3)
    hits_10 = hits_at_n_score(ranks_test, n=10)

    with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
        fo.write("type(best_model).__name__: {0}\n".format(type(best_model).__name__))
        fo.write("best_params: {0}\n".format(best_params))
        fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr_test, mrr_test, hits_1, hits_3, hits_10))
github Accenture / AmpliGraph / ampligraph / evaluation / protocol.py View on Github external
def evaluation(ranks):
        mrr = mrr_score(ranks)
        mr = mr_score(ranks)
        hits_1 = hits_at_n_score(ranks, n=1)
        hits_3 = hits_at_n_score(ranks, n=3)
        hits_10 = hits_at_n_score(ranks, n=10)
        return mrr, mr, hits_1, hits_3, hits_10
github Accenture / AmpliGraph / experiments / predictive_performance.py View on Github external
early_stopping_epoch = np.nan
    else:
        early_stopping_epoch = model.early_stopping_epoch

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'],
                                 model,
                                 filter,
                                 verbose=False)

    # compute and print metrics:
    mr = mr_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    return {
        "mr": mr,
        "mrr": mrr,
        "H@1": hits_1,
        "H@3": hits_3,
        "H@10": hits_10,
        "hyperparams": hyperparams,
        "time": time.time() - start_time,
        "early_stopping_epoch": early_stopping_epoch
    }
github Accenture / AmpliGraph / ampligraph / latent_features / models.py View on Github external
"""

        if epoch >= self.early_stopping_params.get('burn_in',
                                                   DEFAULT_BURN_IN_EARLY_STOPPING) \
                and epoch % self.early_stopping_params.get('check_interval',
                                                           DEFAULT_CHECK_INTERVAL_EARLY_STOPPING) == 0:
            # compute and store test_loss
            ranks = []
            
            # Get each triple and compute the rank for that triple
            for x_test_triple in range(self.eval_dataset_handle.get_size("valid")):
                rank_triple = self.sess_train.run(self.rank)
                ranks.append(rank_triple)
                
            if self.early_stopping_criteria == 'hits10':
                current_test_value = hits_at_n_score(ranks, 10)
            elif self.early_stopping_criteria == 'hits3':
                current_test_value = hits_at_n_score(ranks, 3)
            elif self.early_stopping_criteria == 'hits1':
                current_test_value = hits_at_n_score(ranks, 1)
            elif self.early_stopping_criteria == 'mrr':
                current_test_value = mrr_score(ranks)

            if self.early_stopping_best_value is None:  # First validation iteration
                self.early_stopping_best_value = current_test_value
                self.early_stopping_first_value = current_test_value
            elif self.early_stopping_best_value >= current_test_value:
                self.early_stopping_stop_counter += 1
                if self.early_stopping_stop_counter == self.early_stopping_params.get(
                        'stop_interval', DEFAULT_STOP_INTERVAL_EARLY_STOPPING):

                    # If the best value for the criteria has not changed from