How to use the ampligraph.evaluation.evaluate_performance function in ampligraph

To help you get started, we’ve selected a few ampligraph examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Accenture / AmpliGraph / tests / ampligraph / evaluation / test_protocol.py View on Github external
def test_evaluate_performance_default_protocol_without_filter():
    wn18 = load_wn18()

    model = TransE(batches_count=10, seed=0, epochs=1,
                   k=50, eta=10,  verbose=True,
                   embedding_model_params={'normalize_ent_emb':False, 'norm': 1},
                   loss='self_adversarial', loss_params={'margin': 1, 'alpha': 0.5},
                   optimizer='adam',
                   optimizer_params={'lr': 0.0005})

    model.fit(wn18['train'])

    from ampligraph.evaluation import evaluate_performance
    ranks_sep = []
    from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
    ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='o',
                                 use_default_protocol=False)

    ranks_sep.extend(ranks)
    from ampligraph.evaluation import evaluate_performance

    from ampligraph.evaluation import hits_at_n_score, mrr_score, mr_score
    ranks = evaluate_performance(wn18['test'][::100], model, verbose=True, corrupt_side='s',
                                 use_default_protocol=False)
    ranks_sep.extend(ranks)
    print('----------EVAL WITHOUT FILTER-----------------')
    print('----------Subj and obj corrupted separately-----------------')
    mr_sep = mr_score(ranks_sep)
    print('MAR:', mr_sep)
    print('Mrr:', mrr_score(ranks_sep))
    print('hits10:', hits_at_n_score(ranks_sep, 10))
    print('hits3:', hits_at_n_score(ranks_sep, 3))
github Accenture / AmpliGraph / tests / ampligraph / evaluation / test_protocol.py View on Github external
def test_evaluate_performance_so_side_corruptions_without_filter():
    X = load_wn18()
    model = ComplEx(batches_count=10, seed=0, epochs=5, k=200, eta=10, loss='nll',
                    regularizer=None, optimizer='adam', optimizer_params={'lr': 0.01}, verbose=True)
    model.fit(X['train'])

    X_filter = np.concatenate((X['train'], X['valid'], X['test']))
    ranks = evaluate_performance(X['test'][::20], model, X_filter,  verbose=True,
                                 use_default_protocol=False, corrupt_side='s+o')
    mrr = mrr_score(ranks)
    hits_10 = hits_at_n_score(ranks, n=10)
    print("ranks: %s" % ranks)
    print("MRR: %f" % mrr)
    print("Hits@10: %f" % hits_10)
    assert(mrr is not np.Inf)
github Accenture / AmpliGraph / tests / ampligraph / latent_features / test_models.py View on Github external
def test_evaluate_RandomBaseline():
    model = RandomBaseline(seed=0)
    X = load_wn18()
    model.fit(X["train"])
    ranks = evaluate_performance(X["test"], 
                                 model=model, 
                                 use_default_protocol=False,
                                 corrupt_side='s+o',
                                 verbose=False)
    hits10 = hits_at_n_score(ranks, n=10)
    hits1 = hits_at_n_score(ranks, n=1)
    assert hits10 < 0.01 and hits1 == 0.0
github Accenture / AmpliGraph / experiments / single_exp_0_es.py View on Github external
model = model_class(**hyperparams)
    # Fit the model on training and validation set


    # The entire dataset will be used to filter out false positives statements
    # created by the corruption procedure:
    filter = np.concatenate((X['train'], X['valid'], X['test']))
    
    print("Start fitting...no early stopping")

    model.fit(np.concatenate((X['train'], X['valid'])))

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
                                verbose=True)

    # compute and print metrics:
    mr = mar_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
        fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
github Accenture / AmpliGraph / experiments / single_exp_append_unseen.py View on Github external
print("Start fitting...with early stopping")
    
    model.fit(np.concatenate((X['train'], X['valid'])), True, 
    {
        'x_valid':X['test'][:1000], 
        'criteria':'mrr', 'x_filter':filter,
        'stop_interval': 2, 
        'burn_in':0, 
        'check_interval':100
    })
    # model.fit(np.concatenate((X['train'], X['valid'])))

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
                                verbose=True)

    # compute and print metrics:
    mr = mar_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
        fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
github Accenture / AmpliGraph / experiments / single_exp.py View on Github external
print("Start fitting...with early stopping")
    
    model.fit(np.concatenate((X['train'], X['valid'])), True, 
    {
        'x_valid':X['test'][:1000], 
        'criteria':'mrr', 'x_filter':filter,
        'stop_interval': 2, 
        'burn_in':0, 
        'check_interval':100
    })
    # model.fit(np.concatenate((X['train'], X['valid'])))

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'], model=model, filter_triples=filter,
                                verbose=True)

    # compute and print metrics:
    mr = mar_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    with open("result_{0}_{1}.txt".format(args.dataset, args.model), "w") as fo:
        fo.write("mr(test): {0} mrr(test): {1} hits 1: {2} hits 3: {3} hits 10: {4}".format(mr, mrr, hits_1, hits_3, hits_10))
github Accenture / AmpliGraph / experiments / predictive_performance.py View on Github external
'x_valid': X['valid'][::2],
                      'criteria': 'mrr',
                      'x_filter': filter,
                      'stop_interval': 4,
                      'burn_in': 0,
                      'check_interval': 50
                  })

    if not hasattr(model, 'early_stopping_epoch') or model.early_stopping_epoch is None:
        early_stopping_epoch = np.nan
    else:
        early_stopping_epoch = model.early_stopping_epoch

    # Run the evaluation procedure on the test set. Will create filtered rankings.
    # To disable filtering: filter_triples=None
    ranks = evaluate_performance(X['test'],
                                 model,
                                 filter,
                                 verbose=False)

    # compute and print metrics:
    mr = mr_score(ranks)
    mrr = mrr_score(ranks)
    hits_1 = hits_at_n_score(ranks, n=1)
    hits_3 = hits_at_n_score(ranks, n=3)
    hits_10 = hits_at_n_score(ranks, n=10)

    return {
        "mr": mr,
        "mrr": mrr,
        "H@1": hits_1,
        "H@3": hits_3,