How to use the cornac.metrics.Recall function in cornac

To help you get started, we’ve selected a few cornac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PreferredAI / cornac / tests / cornac / experiment / test_experiment.py View on Github external
def test_with_cross_validation(self):
        Experiment(eval_method=CrossValidation(self.data + [(self.data[0][0], self.data[1][1], 5.0)],
                                               exclude_unknowns=False, verbose=True),
                   models=[PMF(1, 0)],
                   metrics=[Recall(1), FMeasure(1)],
                   verbose=True).run()
github PreferredAI / cornac / examples / pcrl_example.py View on Github external
item_graph_modality = GraphModality(data=contexts)

ratio_split = RatioSplit(data=ratings,
                         test_size=0.2, rating_threshold=3.5,
                         exclude_unknowns=True, verbose=True,
                         item_graph=item_graph_modality)

pcrl = PCRL(k=100, z_dims=[300],
            max_iter=300, 
            learning_rate=0.001)


# Evaluation metrics
nDgc = metrics.NDCG(k=-1)
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[pcrl],
                 metrics=[nDgc, rec, pre])
exp.run()


"""
Output:
github PreferredAI / cornac / examples / nmf_example.py View on Github external
# Load the MovieLens 100K dataset
ml_100k = movielens.load_feedback()

# Instantiate an evaluation method.
eval_method = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0,
                         exclude_unknowns=True, verbose=True, seed=123)

# Instantiate a NMF recommender model.
nmf = cornac.models.NMF(k=15, max_iter=50, learning_rate=.005,
                        lambda_u=.06, lambda_v=.06, lambda_bu=.02, lambda_bi=.02,
                        use_bias=False, verbose=True, seed=123)

# Instantiate evaluation metrics.
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
rec_20 = cornac.metrics.Recall(k=20)
pre_20 = cornac.metrics.Precision(k=20)

# Instantiate and then run an experiment.
exp = cornac.Experiment(eval_method=eval_method,
                        models=[nmf],
                        metrics=[mae, rmse, rec_20, pre_20],
                        user_based=True)
exp.run()
github PreferredAI / cornac / examples / bpr_netflix.py View on Github external
"""Example for Bayesian Personalized Ranking with Netflix dataset"""

import cornac
from cornac.data import Reader
from cornac.datasets import netflix
from cornac.eval_methods import RatioSplit

ratio_split = RatioSplit(data=netflix.load_feedback(variant='small', reader=Reader(bin_threshold=1.0)),
                         test_size=0.1, rating_threshold=1.0,
                         exclude_unknowns=True, verbose=True)

most_pop = cornac.models.MostPop()
bpr = cornac.models.BPR(k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.01, verbose=True)

auc = cornac.metrics.AUC()
rec_20 = cornac.metrics.Recall(k=20)

cornac.Experiment(eval_method=ratio_split,
                  models=[most_pop, bpr],
                  metrics=[auc, rec_20],
                  user_based=True).run()
github PreferredAI / cornac / examples / mcf_office.py View on Github external
ratings = office.load_feedback()
contexts = office.load_graph()

item_graph_modality = GraphModality(data=contexts)

ratio_split = RatioSplit(data=ratings,
                         test_size=0.2, rating_threshold=3.5,
                         exclude_unknowns=True, verbose=True,
                         item_graph=item_graph_modality)

mcf = MCF(k=10, max_iter=40, learning_rate=0.001, verbose=True)

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[mcf],
                 metrics=[rmse, ndcg, rec, pre])
exp.run()

"""
Output:
github PreferredAI / cornac / examples / first_example.py View on Github external
# Load MovieLens 100K dataset
ml_100k = cn.datasets.movielens.load_feedback()

# Split data based on ratio
ratio_split = cn.eval_methods.RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# Here we are comparing biased MF, PMF, and BPR
mf = cn.models.MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, seed=123)
pmf = cn.models.PMF(k=10, max_iter=100, learning_rate=0.001, lamda=0.001, seed=123)
bpr = cn.models.BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123)

# Define metrics used to evaluate the models
mae = cn.metrics.MAE()
rmse = cn.metrics.RMSE()
rec_20 = cn.metrics.Recall(k=20)
ndcg_20 = cn.metrics.NDCG(k=20)
auc = cn.metrics.AUC()

# Put it together into an experiment and run
exp = cn.Experiment(eval_method=ratio_split,
                    models=[mf, pmf, bpr],
                    metrics=[mae, rmse, rec_20, ndcg_20, auc],
                    user_based=True)
exp.run()
github PreferredAI / cornac / examples / cvae_example.py View on Github external
# build text modality
item_text_modality = TextModality(corpus=docs, ids=item_ids,
                                tokenizer=BaseTokenizer(stop_words='english'),
                                max_vocab=8000, max_doc_freq=0.5)

ratio_split = RatioSplit(data=data, test_size=0.2, exclude_unknowns=True,
                         rating_threshold=0.5, verbose=True, seed=123,
                         item_text=item_text_modality)

cvae = cornac.models.CVAE(z_dim=50, vae_layers=[200, 100], act_fn='sigmoid',
                          input_dim=8000, lr=0.001, batch_size=128, n_epochs=100,
                          lambda_u=1e-4, lambda_v=0.001, lambda_r=10, lambda_w=1e-4,
                          seed=123, verbose=True)

rec_300 = cornac.metrics.Recall(k=300)

exp = cornac.Experiment(eval_method=ratio_split,
                        models=[cvae],
                        metrics=[rec_300])
exp.run()
github PreferredAI / cornac / examples / sbpr_epinions.py View on Github external
"""Example for Social Bayesian Personalized Ranking with Epinions dataset"""

import cornac
from cornac.data import Reader, GraphModality
from cornac.datasets import epinions
from cornac.eval_methods import RatioSplit

ratio_split = RatioSplit(data=epinions.load_feedback(Reader(bin_threshold=4.0)),
                         test_size=0.1, rating_threshold=0.5,
                         exclude_unknowns=True, verbose=True,
                         user_graph=GraphModality(data=epinions.load_trust()))

sbpr = cornac.models.SBPR(k=10, max_iter=50, learning_rate=0.001,
                          lambda_u=0.015, lambda_v=0.025, lambda_b=0.01,
                          verbose=True)
rec_10 = cornac.metrics.Recall(k=10)

cornac.Experiment(eval_method=ratio_split,
                  models=[sbpr],
                  metrics=[rec_10]).run()
github PreferredAI / cornac / examples / ctr_example_citeulike.py View on Github external
from cornac.data.text import BaseTokenizer

docs, item_ids = citeulike.load_text()
data = citeulike.load_feedback(reader=Reader(item_set=item_ids))

# build text modality
item_text_modality = TextModality(corpus=docs, ids=item_ids,
                                tokenizer=BaseTokenizer(sep=' ', stop_words='english'),
                                max_vocab=8000, max_doc_freq=0.5)

ratio_split = RatioSplit(data=data, test_size=0.2, exclude_unknowns=True,
                         item_text=item_text_modality, verbose=True, seed=123, rating_threshold=0.5)

ctr = cornac.models.CTR(k=50, max_iter=50, lambda_v=1)

rec_300 = cornac.metrics.Recall(k=300)

exp = cornac.Experiment(eval_method=ratio_split,
                        models=[ctr],
                        metrics=[rec_300])
exp.run()