How to use the cornac.metrics.RMSE function in cornac

To help you get started, we’ve selected a few cornac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PreferredAI / cornac / tests / cornac / metrics / test_rating.py View on Github external
def test_rmse(self):
        rmse = RMSE()

        self.assertEqual(rmse.type, 'rating')
        self.assertEqual(rmse.name, 'RMSE')

        self.assertEqual(0, rmse.compute(np.asarray([0]), np.asarray([0])))
        self.assertEqual(1, rmse.compute(np.asarray([0, 1]), np.asarray([1, 0])))
        self.assertEqual(2, rmse.compute(np.asarray([0, 1]), np.asarray([2, 3]), np.asarray([1, 3])))
github PreferredAI / cornac / examples / svd_example.py View on Github external
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================


import cornac as cn

ml_100k = cn.datasets.movielens.load_feedback()
ratio_split = cn.eval_methods.RatioSplit(data=ml_100k, test_size=0.2,
                                         rating_threshold=4.0, verbose=True)

bo = cn.models.BaselineOnly(max_iter=30, learning_rate=0.01, lambda_reg=0.02, verbose=True)
svd = cn.models.SVD(k=10, max_iter=30, learning_rate=0.01, lambda_reg=0.02, verbose=True)

mae = cn.metrics.MAE()
rmse = cn.metrics.RMSE()

cn.Experiment(eval_method=ratio_split,
              models=[bo, svd],
              metrics=[mae, rmse]).run()
github PreferredAI / cornac / examples / first_example.py View on Github external
import cornac as cn

# Load MovieLens 100K dataset
ml_100k = cn.datasets.movielens.load_feedback()

# Split data based on ratio
ratio_split = cn.eval_methods.RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# Here we are comparing biased MF, PMF, and BPR
mf = cn.models.MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, seed=123)
pmf = cn.models.PMF(k=10, max_iter=100, learning_rate=0.001, lamda=0.001, seed=123)
bpr = cn.models.BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123)

# Define metrics used to evaluate the models
mae = cn.metrics.MAE()
rmse = cn.metrics.RMSE()
rec_20 = cn.metrics.Recall(k=20)
ndcg_20 = cn.metrics.NDCG(k=20)
auc = cn.metrics.AUC()

# Put it together into an experiment and run
exp = cn.Experiment(eval_method=ratio_split,
                    models=[mf, pmf, bpr],
                    metrics=[mae, rmse, rec_20, ndcg_20, auc],
                    user_based=True)
exp.run()
github PreferredAI / cornac / examples / nmf_example.py View on Github external
# Load the MovieLens 100K dataset
ml_100k = movielens.load_feedback()

# Instantiate an evaluation method.
eval_method = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0,
                         exclude_unknowns=True, verbose=True, seed=123)

# Instantiate a NMF recommender model.
nmf = cornac.models.NMF(k=15, max_iter=50, learning_rate=.005,
                        lambda_u=.06, lambda_v=.06, lambda_bu=.02, lambda_bi=.02,
                        use_bias=False, verbose=True, seed=123)

# Instantiate evaluation metrics.
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
rec_20 = cornac.metrics.Recall(k=20)
pre_20 = cornac.metrics.Precision(k=20)

# Instantiate and then run an experiment.
exp = cornac.Experiment(eval_method=eval_method,
                        models=[nmf],
                        metrics=[mae, rmse, rec_20, pre_20],
                        user_based=True)
exp.run()
github PreferredAI / cornac / examples / biased_mf.py View on Github external
import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit

ratio_split = RatioSplit(data=movielens.load_feedback(variant='1M'),
                         test_size=0.2,
                         exclude_unknowns=False,
                         verbose=True)

global_avg = cornac.models.GlobalAvg()
mf = cornac.models.MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02,
                      use_bias=True, early_stop=True, verbose=True)

mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()

exp = cornac.Experiment(eval_method=ratio_split,
                        models=[global_avg, mf],
                        metrics=[mae, rmse],
                        user_based=True)
exp.run()
github PreferredAI / cornac / examples / mcf_office.py View on Github external
# Load office ratings and item contexts, see C2PF paper for details
ratings = office.load_feedback()
contexts = office.load_graph()

item_graph_modality = GraphModality(data=contexts)

ratio_split = RatioSplit(data=ratings,
                         test_size=0.2, rating_threshold=3.5,
                         exclude_unknowns=True, verbose=True,
                         item_graph=item_graph_modality)

mcf = MCF(k=10, max_iter=40, learning_rate=0.001, verbose=True)

# Evaluation metrics
ndcg = metrics.NDCG(k=-1)
rmse = metrics.RMSE()
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)

# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
                 models=[mcf],
                 metrics=[rmse, ndcg, rec, pre])
exp.run()

"""
Output:
github PreferredAI / cornac / examples / conv_mf_example.py View on Github external
from cornac.data.text import BaseTokenizer

plots, movie_ids = movielens.load_plot()
ml_1m = movielens.load_feedback(variant='1M', reader=Reader(item_set=movie_ids))

# build text modality
item_text_modality = TextModality(corpus=plots, ids=movie_ids,
                                  tokenizer=BaseTokenizer(sep='\t', stop_words='english'),
                                  max_vocab=8000, max_doc_freq=0.5)

ratio_split = RatioSplit(data=ml_1m, test_size=0.2, exclude_unknowns=True,
                         item_text=item_text_modality, verbose=True, seed=123)

convmf = cornac.models.ConvMF(n_epochs=5, verbose=True, seed=123)

rmse = cornac.metrics.RMSE()

exp = cornac.Experiment(eval_method=ratio_split,
                        models=[convmf],
                        metrics=[rmse],
                        user_based=True)
exp.run()
github PreferredAI / cornac / examples / pmf_ratio.py View on Github external
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.models import PMF

# Load the MovieLens 100K dataset
ml_100k = movielens.load_feedback()

# Instantiate an evaluation method.
ratio_split = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, exclude_unknowns=False)

# Instantiate a PMF recommender model.
pmf = PMF(k=10, max_iter=100, learning_rate=0.001, lamda=0.001)

# Instantiate evaluation metrics.
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
rec_20 = cornac.metrics.Recall(k=20)
pre_20 = cornac.metrics.Precision(k=20)

# Instantiate and then run an experiment.
exp = cornac.Experiment(eval_method=ratio_split,
                        models=[pmf],
                        metrics=[mae, rmse, rec_20, pre_20],
                        user_based=True)
exp.run()