How to use the lightfm.evaluation function in lightfm

To help you get started, we’ve selected a few lightfm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lyst / lightfm / tests / test_evaluation.py View on Github external
def test_precision_at_k():

    no_users, no_items = (10, 100)

    train, test = _generate_data(no_users, no_items)

    model = LightFM(loss="bpr")

    # We want a high precision to catch the k=1 case
    model.fit_partial(test)

    for k in (10, 5, 1):

        # Without omitting train interactions
        precision = evaluation.precision_at_k(model, test, k=k)
        expected_mean_precision = _precision_at_k(model, test, k)

        assert np.allclose(precision.mean(), expected_mean_precision)
        assert len(precision) == (test.getnnz(axis=1) > 0).sum()
        assert (
            len(evaluation.precision_at_k(model, train, preserve_rows=True))
            == test.shape[0]
        )

        # With omitting train interactions
        precision = evaluation.precision_at_k(
            model, test, k=k, train_interactions=train
        )
        expected_mean_precision = _precision_at_k(model, test, k, train=train)

        assert np.allclose(precision.mean(), expected_mean_precision)
github lyst / lightfm / tests / test_evaluation.py View on Github external
def test_precision_at_k():

    no_users, no_items = (10, 100)

    train = sp.rand(no_users, no_items, format='coo')
    train.data = np.ones_like(train.data)

    model = LightFM(loss='bpr')
    model.fit_partial(train)

    k = 10

    mean_precision = evaluation.precision_at_k(model,
                                               train,
                                               k=k)[train.getnnz(axis=1) > 0].mean()
    expected_mean_precision = _precision_at_k(model,
                                              train,
                                              k)

    assert np.allclose(mean_precision, expected_mean_precision)
github lyst / lightfm / tests / test_evaluation.py View on Github external
def test_auc_score():

    no_users, no_items = (10, 100)

    train = sp.rand(no_users, no_items, format='coo')
    train.data = np.ones_like(train.data)

    model = LightFM(loss='bpr')
    model.fit_partial(train)

    auc = evaluation.auc_score(model,
                               train,
                               num_threads=2)[train.getnnz(axis=1) > 0]
    expected_auc = np.array(_auc(model,
                                 train))

    assert auc.shape == expected_auc.shape
    assert np.abs(auc.mean() - expected_auc.mean()) < 0.01
github lyst / lightfm / tests / test_evaluation.py View on Github external
evaluation.auc_score(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.recall_at_k(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.precision_at_k(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.reciprocal_rank(
            model, train, train_interactions=train, check_intersections=True
        )

    # check no errors raised when train and test have no interactions in common
    evaluation.auc_score(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.recall_at_k(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.precision_at_k(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.reciprocal_rank(
        model, test, train_interactions=train, check_intersections=True
    )