How to use the ampligraph.latent_features.ComplEx function in ampligraph

To help you get started, we’ve selected a few ampligraph examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Accenture / AmpliGraph / tests / ampligraph / discovery / test_discovery.py View on Github external
def test_discover_facts():

    X = np.array([['a', 'y', 'b'],
                  ['b', 'y', 'a'],
                  ['a', 'y', 'c'],
                  ['c', 'y', 'a'],
                  ['a', 'y', 'd'],
                  ['c', 'y', 'd'],
                  ['b', 'y', 'c'],
                  ['f', 'y', 'e']])
    model = ComplEx(batches_count=1, seed=555, epochs=2, k=5)

    with pytest.raises(ValueError):
        discover_facts(X, model)

    model.fit(X)

    with pytest.raises(ValueError):
        discover_facts(X, model, strategy='error')

    with pytest.raises(ValueError):
        discover_facts(X, model, strategy='random_uniform', target_rel='error')
github Accenture / AmpliGraph / tests / ampligraph / evaluation / test_protocol.py View on Github external
def test_evaluate_performance_so_side_corruptions_with_filter():
    X = load_wn18()
    model = ComplEx(batches_count=10, seed=0, epochs=5, k=200, eta=10, loss='nll',
                    regularizer=None, optimizer='adam', optimizer_params={'lr': 0.01}, verbose=True)
    model.fit(X['train'])

    ranks = evaluate_performance(X['test'][::20], model=model, verbose=True,
                                 use_default_protocol=False, corrupt_side='s+o')
    mrr = mrr_score(ranks)
    hits_10 = hits_at_n_score(ranks, n=10)
    print("ranks: %s" % ranks)
    print("MRR: %f" % mrr)
    print("Hits@10: %f" % hits_10)
    assert(mrr is not np.Inf)
github Accenture / AmpliGraph / tests / ampligraph / latent_features / test_models.py View on Github external
def test_large_graph_mode_adam():
    set_entity_threshold(10)
    X = load_wn18()
    model = ComplEx(batches_count=100, seed=555, epochs=1, k=50, loss='multiclass_nll', loss_params={'margin': 5},
                   verbose=True, optimizer='adam', optimizer_params={'lr': 0.001})
    try:
        model.fit(X['train'])
    except Exception as e:
        print(str(e))
        
    reset_entity_threshold()
github Accenture / AmpliGraph / tests / ampligraph / latent_features / test_models.py View on Github external
def test_fit_predict_CompleEx():
    model = ComplEx(batches_count=1, seed=555, epochs=20, k=10,
                    loss='pairwise', loss_params={'margin': 1}, regularizer='LP',
                    regularizer_params={'lambda': 0.1, 'p': 2}, 
                    optimizer='adagrad', optimizer_params={'lr': 0.1})
    X = np.array([['a', 'y', 'b'],
                  ['b', 'y', 'a'],
                  ['a', 'y', 'c'],
                  ['c', 'y', 'a'],
                  ['a', 'y', 'd'],
                  ['c', 'y', 'd'],
                  ['b', 'y', 'c'],
                  ['f', 'y', 'e']])
    model.fit(X)
    y_pred = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]))
    print(y_pred)
    assert y_pred[0] > y_pred[1]
github Accenture / AmpliGraph / tests / ampligraph / latent_features / test_models.py View on Github external
def test_retrain():
    model = ComplEx(batches_count=1, seed=555, epochs=20, k=10,
                    loss='pairwise', loss_params={'margin': 1}, regularizer='LP',
                    regularizer_params={'lambda': 0.1, 'p': 2}, 
                    optimizer='adagrad', optimizer_params={'lr': 0.1})
    X = np.array([['a', 'y', 'b'],
                  ['b', 'y', 'a'],
                  ['a', 'y', 'c'],
                  ['c', 'y', 'a'],
                  ['a', 'y', 'd'],
                  ['c', 'y', 'd'],
                  ['b', 'y', 'c'],
                  ['f', 'y', 'e']])
    model.fit(X)
    y_pred_1st = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]))
    model.fit(X)
    y_pred_2nd = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]))
    np.testing.assert_array_equal(y_pred_1st, y_pred_2nd)
github Accenture / AmpliGraph / tests / ampligraph / discovery / test_discovery.py View on Github external
def test_find_duplicates():
    X = np.array([['a', 'y', 'b'],
                  ['b', 'y', 'a'],
                  ['a', 'y', 'c'],
                  ['c', 'y', 'a'],
                  ['a', 'y', 'd'],
                  ['c', 'x', 'd'],
                  ['b', 'y', 'c'],
                  ['f', 'y', 'e']])
    model = ComplEx(k=2, batches_count=2)
    model.fit(X)

    entities = set('a b c d e f'.split())
    relations = set('x y'.split())

    def asserts(tol, dups, ent_rel, subspace):
        assert tol > 0.0
        assert len(dups) <= len(ent_rel)
        assert all(len(d) <= len(ent_rel) for d in dups)
        assert all(d.issubset(subspace) for d in dups)

    dups, tol = find_duplicates(X, model, mode='triple', tolerance='auto', expected_fraction_duplicates=0.5)
    asserts(tol, dups, X, {tuple(x) for x in X})

    dups, tol = find_duplicates(X, model, mode='triple', tolerance=1.0)
    assert tol == 1.0
github Accenture / AmpliGraph / tests / ampligraph / discovery / test_discovery.py View on Github external
def test_query_topn():

    X = np.array([['a', 'y', 'b'],
                  ['b', 'y', 'a'],
                  ['a', 'y', 'c'],
                  ['c', 'y', 'a'],
                  ['a', 'y', 'd'],
                  ['c', 'x', 'd'],
                  ['b', 'y', 'c'],
                  ['f', 'y', 'e'],
                  ['a', 'z', 'f'],
                  ['c', 'z', 'f'],
                  ['b', 'z', 'f'],
                  ])

    model = ComplEx(k=2, batches_count=2)

    with pytest.raises(ValueError): # Model not fitted
        query_topn(model, top_n=2)

    model.fit(X)

    with pytest.raises(ValueError):
        query_topn(model, top_n=2)
    with pytest.raises(ValueError):
        query_topn(model, top_n=2, head='a')
    with pytest.raises(ValueError):
        query_topn(model, top_n=2, relation='y')
    with pytest.raises(ValueError):
        query_topn(model, top_n=2, tail='e')
    with pytest.raises(ValueError):
        query_topn(model, top_n=2, head='a', relation='y', tail='e')
github Accenture / AmpliGraph / tests / ampligraph / evaluation / test_protocol.py View on Github external
def test_evaluate_performance_nll_complex():
    X = load_wn18()
    model = ComplEx(batches_count=10, seed=0, epochs=10, k=150, optimizer_params={'lr': 0.1}, eta=10, loss='nll',
                    optimizer='adagrad', verbose=True)
    model.fit(np.concatenate((X['train'], X['valid'])))

    filter_triples = np.concatenate((X['train'], X['valid'], X['test']))
    ranks = evaluate_performance(X['test'][:200], model=model, filter_triples=filter_triples, verbose=True)

    mrr = mrr_score(ranks)
    hits_10 = hits_at_n_score(ranks, n=10)
    print("ranks: %s" % ranks)
    print("MRR: %f" % mrr)
    print("Hits@10: %f" % hits_10)
github Accenture / AmpliGraph / tests / ampligraph / latent_features / test_models.py View on Github external
def test_fit_predict_wn18_ComplEx():
    X = load_wn18()
    model = ComplEx(batches_count=1, seed=555, epochs=5, k=100,
                    loss='pairwise', loss_params={'margin': 1}, regularizer='LP',
                    regularizer_params={'lambda': 0.1, 'p': 2}, 
                    optimizer='adagrad', optimizer_params={'lr': 0.1})
    model.fit(X['train'])
    y = model.predict(X['test'][:1])
    print(y)
github Accenture / AmpliGraph / tests / ampligraph / latent_features / test_models.py View on Github external
def test_missing_entity_ComplEx():

    X = np.array([['a', 'y', 'b'],
                  ['b', 'y', 'a'],
                  ['a', 'y', 'c'],
                  ['c', 'y', 'a'],
                  ['a', 'y', 'd'],
                  ['c', 'y', 'd'],
                  ['b', 'y', 'c'],
                  ['f', 'y', 'e']])
    model = ComplEx(batches_count=1, seed=555, epochs=2, k=5)
    model.fit(X)
    with pytest.raises(ValueError):
        model.predict(['a', 'y', 'zzzzzzzzzzz'])
    with pytest.raises(ValueError):
        model.predict(['a', 'xxxxxxxxxx', 'e'])
    with pytest.raises(ValueError):
        model.predict(['zzzzzzzz', 'y', 'e'])