How to use the cornac.metrics.NDCG function in cornac

To help you get started, we’ve selected a few cornac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PreferredAI / cornac / tests / cornac / metrics / test_ranking.py View on Github external
def test_ndcg(self):
        ndcg = NDCG()

        self.assertEqual(ndcg.type, 'ranking')
        self.assertEqual(ndcg.name, 'NDCG@-1')

        self.assertEqual(1, ndcg.compute(np.asarray([1]), np.asarray([0])))

        ground_truth = np.asarray([1, 0, 1])  # [1, 3]
        rec_list = np.asarray([0, 2, 1])  # [1, 3, 2]
        self.assertEqual(1, ndcg.compute(ground_truth, rec_list))

        ndcg_2 = NDCG(k=2)
        self.assertEqual(ndcg_2.k, 2)

        ground_truth = np.asarray([0, 0, 1])  # [3]
        rec_list = np.asarray([1, 2, 0])  # [2, 3, 1]
        self.assertEqual(0.63, float('{:.2f}'.format(ndcg_2.compute(ground_truth, rec_list))))
github PreferredAI / cornac / examples / mcf_office.py View on Github external
# Load office ratings and item contexts, see C2PF paper for details
ratings = office.load_feedback()
contexts = office.load_graph()

item_graph_modality = GraphModality(data=contexts)

ratio_split = RatioSplit(data=ratings,
                         test_size=0.2, rating_threshold=3.5,
                         exclude_unknowns=True, verbose=True,
                         item_graph=item_graph_modality)

mcf = MCF(k=10, max_iter=40, learning_rate=0.001, verbose=True)

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[mcf],
                 metrics=[rmse, ndcg, rec, pre])
exp.run()

"""
Output:
github PreferredAI / cornac / examples / vaecf_citeulike.py View on Github external
# ============================================================================
"""Example for Variational Autoencoder for Collaborative Filtering for Implicit Feedback Datasets (Citeulike)"""

import cornac
from cornac.datasets import citeulike
from cornac.eval_methods import RatioSplit

data = citeulike.load_feedback()

ratio_split = RatioSplit(data=data, test_size=0.2, exclude_unknowns=True,
                         verbose=True, seed=123, rating_threshold=0.5)

vaecf = cornac.models.VAECF(k=10, h=20, n_epochs=100, batch_size=100, learning_rate=0.001, beta=1.0, seed=123)

rec_20 = cornac.metrics.Recall(k=20)
ndcg_20 = cornac.metrics.NDCG(k=20)
auc = cornac.metrics.AUC()

cornac.Experiment(eval_method=ratio_split,
                  models=[vaecf],
                  metrics=[rec_20, ndcg_20, auc],
                  user_based=True).run()
github PreferredAI / cornac / examples / pcrl_example.py View on Github external
contexts = office.load_graph()

item_graph_modality = GraphModality(data=contexts)

ratio_split = RatioSplit(data=ratings,
                         test_size=0.2, rating_threshold=3.5,
                         exclude_unknowns=True, verbose=True,
                         item_graph=item_graph_modality)

pcrl = PCRL(k=100, z_dims=[300],
            max_iter=300, 
            learning_rate=0.001)


# Evaluation metrics
nDgc = metrics.NDCG(k=-1)
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[pcrl],
                 metrics=[nDgc, rec, pre])
exp.run()


"""
Output:
github PreferredAI / cornac / examples / efm_example.py View on Github external
sentiment = amazon_toy.load_sentiment()
md = SentimentModality(data=sentiment)

split_data = RatioSplit(data=rating,
                        test_size=0.15,
                        exclude_unknowns=True, verbose=True,
                        sentiment=md, seed=123)

efm = cornac.models.EFM(num_explicit_factors=40, num_latent_factors=60, num_most_cared_aspects=15,
                        rating_scale=5.0, alpha=0.85,
                        lambda_x=1, lambda_y=1, lambda_u=0.01, lambda_h=0.01, lambda_v=0.01,
                        max_iter=100, num_threads=1,
                        trainable=True, verbose=True, seed=123)

rmse = cornac.metrics.RMSE()
ndcg_50 = cornac.metrics.NDCG(k=50)
auc = cornac.metrics.AUC()

exp = cornac.Experiment(eval_method=split_data,
                        models=[efm],
                        metrics=[rmse, ndcg_50, auc])
exp.run()
github PreferredAI / cornac / examples / mter_example.py View on Github external
from cornac.models import MTER
from cornac import Experiment

data = amazon_toy.load_feedback()
sentiment = amazon_toy.load_sentiment()
md = SentimentModality(data=sentiment)
eval_method = RatioSplit(data, test_size=0.2, rating_threshold=1.0,
                         sentiment=md, exclude_unknowns=True, verbose=True, seed=123)

mter = MTER(n_user_factors=15, n_item_factors=15, n_aspect_factors=12, n_opinion_factors=12,
            n_bpr_samples=1000, n_element_samples=50, lambda_reg=0.1, lambda_bpr=10,
            n_epochs=2000, lr=0.1, verbose=True, seed=123)

exp = Experiment(eval_method=eval_method,
                 models=[mter],
                 metrics=[RMSE(), NDCG(k=10), NDCG(k=20), NDCG(k=50), NDCG(k=100)])

exp.run()
github PreferredAI / cornac / examples / first_example.py View on Github external
# Load MovieLens 100K dataset
ml_100k = cn.datasets.movielens.load_feedback()

# Split data based on ratio
ratio_split = cn.eval_methods.RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# Here we are comparing biased MF, PMF, and BPR
mf = cn.models.MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, seed=123)
pmf = cn.models.PMF(k=10, max_iter=100, learning_rate=0.001, lamda=0.001, seed=123)
bpr = cn.models.BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123)

# Define metrics used to evaluate the models
mae = cn.metrics.MAE()
rmse = cn.metrics.RMSE()
rec_20 = cn.metrics.Recall(k=20)
ndcg_20 = cn.metrics.NDCG(k=20)
auc = cn.metrics.AUC()

# Put it together into an experiment and run
exp = cn.Experiment(eval_method=ratio_split,
                    models=[mf, pmf, bpr],
                    metrics=[mae, rmse, rec_20, ndcg_20, auc],
                    user_based=True)
exp.run()
github PreferredAI / cornac / cornac / models / ncf / ops.py View on Github external
def ndcg(model, train_set, val_set, k=100):
    return ranking_eval(model=model, metrics=[NDCG(k=k)],
                        train_set=train_set, test_set=val_set)[0][0]