How to use the lightfm.evaluation.precision_at_k function in lightfm

To help you get started, we’ve selected a few lightfm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lyst / lightfm / tests / test_evaluation.py View on Github external
no_users, no_items = (10, 100)

    train, test = _generate_data(no_users, no_items)

    model = LightFM(loss="bpr")
    model.fit_partial(train)

    # Make all predictions zero
    model.user_embeddings = np.zeros_like(model.user_embeddings)
    model.item_embeddings = np.zeros_like(model.item_embeddings)
    model.user_biases = np.zeros_like(model.user_biases)
    model.item_biases = np.zeros_like(model.item_biases)

    k = 10

    precision = evaluation.precision_at_k(model, test, k=k)

    # Pessimistic precision with all ties
    assert precision.mean() == 0.0
github lyst / lightfm / tests / test_movielens.py View on Github external
def _get_metrics(model, train_set, test_set):

    train_set = train_set.tocsr()
    test_set = test_set.tocsr()

    train_set.data[train_set.data < 0] = 0.0
    test_set.data[test_set.data < 0] = 0.0

    train_set.eliminate_zeros()
    test_set.eliminate_zeros()

    return (
        precision_at_k(model, train_set).mean(),
        precision_at_k(model, test_set).mean(),
        auc_score(model, train_set).mean(),
        auc_score(model, test_set).mean(),
    )
github lyst / lightfm / tests / test_evaluation.py View on Github external
model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.reciprocal_rank(
            model, train, train_interactions=train, check_intersections=True
        )

    # check no errors raised when train and test have no interactions in common
    evaluation.auc_score(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.recall_at_k(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.precision_at_k(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.reciprocal_rank(
        model, test, train_interactions=train, check_intersections=True
    )

    # check no error is raised when there are intersections but flag is False
    evaluation.auc_score(
        model, train, train_interactions=train, check_intersections=False
    )
    evaluation.recall_at_k(
        model, train, train_interactions=train, check_intersections=False
    )
    evaluation.precision_at_k(
        model, train, train_interactions=train, check_intersections=False
    )
github lyst / lightfm / tests / test_evaluation.py View on Github external
model = LightFM(loss="bpr")
    model.fit_partial(train)

    # check error is raised when train and test have interactions in common
    with pytest.raises(ValueError):
        evaluation.auc_score(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.recall_at_k(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.precision_at_k(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.reciprocal_rank(
            model, train, train_interactions=train, check_intersections=True
        )

    # check no errors raised when train and test have no interactions in common
    evaluation.auc_score(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.recall_at_k(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.precision_at_k(
github lyst / lightfm / tests / test_evaluation.py View on Github external
model = LightFM(loss="bpr")

    # We want a high precision to catch the k=1 case
    model.fit_partial(test)

    for k in (10, 5, 1):

        # Without omitting train interactions
        precision = evaluation.precision_at_k(model, test, k=k)
        expected_mean_precision = _precision_at_k(model, test, k)

        assert np.allclose(precision.mean(), expected_mean_precision)
        assert len(precision) == (test.getnnz(axis=1) > 0).sum()
        assert (
            len(evaluation.precision_at_k(model, train, preserve_rows=True))
            == test.shape[0]
        )

        # With omitting train interactions
        precision = evaluation.precision_at_k(
            model, test, k=k, train_interactions=train
        )
        expected_mean_precision = _precision_at_k(model, test, k, train=train)

        assert np.allclose(precision.mean(), expected_mean_precision)
github lyst / lightfm / tests / test_evaluation.py View on Github external
for k in (10, 5, 1):

        # Without omitting train interactions
        precision = evaluation.precision_at_k(model, test, k=k)
        expected_mean_precision = _precision_at_k(model, test, k)

        assert np.allclose(precision.mean(), expected_mean_precision)
        assert len(precision) == (test.getnnz(axis=1) > 0).sum()
        assert (
            len(evaluation.precision_at_k(model, train, preserve_rows=True))
            == test.shape[0]
        )

        # With omitting train interactions
        precision = evaluation.precision_at_k(
            model, test, k=k, train_interactions=train
        )
        expected_mean_precision = _precision_at_k(model, test, k, train=train)

        assert np.allclose(precision.mean(), expected_mean_precision)
github lyst / lightfm / tests / test_movielens.py View on Github external
def scorer(est, x, y=None):
        return precision_at_k(est, x).mean()
github DomainGroupOSS / ml-recsys-tools / ml_recsys_tools / evaluation / ranks_scoring.py View on Github external
def precision_at_k_on_ranks(
        ranks, test_interactions, train_interactions=None, k=10, preserve_rows=False):
    return precision_at_k(
        model=ModelMockRanksCacher(ranks.copy()),
        test_interactions=test_interactions,
        train_interactions=train_interactions,
        k=k,
        preserve_rows=preserve_rows)