How to use the cornac.metrics.MAE function in cornac

To help you get started, we’ve selected a few cornac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PreferredAI / cornac / tests / cornac / eval_methods / test_base_method.py View on Github external
def test_evaluate(self):
        data = Reader().read('./tests/data.txt')
        bm = BaseMethod.from_splits(train_data=data[:-1], test_data=data[-1:])
        model = MF(k=1, max_iter=0)
        result = bm.evaluate(model, metrics=[MAE()], user_based=False)
        result.__str__()
github PreferredAI / cornac / tests / cornac / metrics / test_rating.py View on Github external
def test_mae(self):
        mae = MAE()

        self.assertEqual(mae.type, 'rating')
        self.assertEqual(mae.name, 'MAE')

        self.assertEqual(0, mae.compute(np.asarray([0]), np.asarray([0])))
        self.assertEqual(1, mae.compute(np.asarray([0, 1]), np.asarray([1, 0])))
        self.assertEqual(2, mae.compute(np.asarray([0, 1]), np.asarray([2, 3]), np.asarray([1, 3])))
github PreferredAI / cornac / examples / nmf_example.py View on Github external
from cornac.eval_methods import RatioSplit

# Load the MovieLens 100K dataset
ml_100k = movielens.load_feedback()

# Instantiate an evaluation method.
eval_method = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0,
                         exclude_unknowns=True, verbose=True, seed=123)

# Instantiate a NMF recommender model.
nmf = cornac.models.NMF(k=15, max_iter=50, learning_rate=.005,
                        lambda_u=.06, lambda_v=.06, lambda_bu=.02, lambda_bi=.02,
                        use_bias=False, verbose=True, seed=123)

# Instantiate evaluation metrics.
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
rec_20 = cornac.metrics.Recall(k=20)
pre_20 = cornac.metrics.Precision(k=20)

# Instantiate and then run an experiment.
exp = cornac.Experiment(eval_method=eval_method,
                        models=[nmf],
                        metrics=[mae, rmse, rec_20, pre_20],
                        user_based=True)
exp.run()
github PreferredAI / cornac / examples / biased_mf.py View on Github external
"""Example for Matrix Factorization with biases"""

import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit

ratio_split = RatioSplit(data=movielens.load_feedback(variant='1M'),
                         test_size=0.2,
                         exclude_unknowns=False,
                         verbose=True)

global_avg = cornac.models.GlobalAvg()
mf = cornac.models.MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02,
                      use_bias=True, early_stop=True, verbose=True)

mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()

exp = cornac.Experiment(eval_method=ratio_split,
                        models=[global_avg, mf],
                        metrics=[mae, rmse],
                        user_based=True)
exp.run()
github PreferredAI / cornac / examples / first_example.py View on Github external
import cornac as cn

# Load MovieLens 100K dataset
ml_100k = cn.datasets.movielens.load_feedback()

# Split data based on ratio
ratio_split = cn.eval_methods.RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, seed=123)

# Here we are comparing biased MF, PMF, and BPR
mf = cn.models.MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, seed=123)
pmf = cn.models.PMF(k=10, max_iter=100, learning_rate=0.001, lamda=0.001, seed=123)
bpr = cn.models.BPR(k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123)

# Define metrics used to evaluate the models
mae = cn.metrics.MAE()
rmse = cn.metrics.RMSE()
rec_20 = cn.metrics.Recall(k=20)
ndcg_20 = cn.metrics.NDCG(k=20)
auc = cn.metrics.AUC()

# Put it together into an experiment and run
exp = cn.Experiment(eval_method=ratio_split,
                    models=[mf, pmf, bpr],
                    metrics=[mae, rmse, rec_20, ndcg_20, auc],
                    user_based=True)
exp.run()
github PreferredAI / cornac / examples / pmf_ratio.py View on Github external
import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.models import PMF

# Load the MovieLens 100K dataset
ml_100k = movielens.load_feedback()

# Instantiate an evaluation method.
ratio_split = RatioSplit(data=ml_100k, test_size=0.2, rating_threshold=4.0, exclude_unknowns=False)

# Instantiate a PMF recommender model.
pmf = PMF(k=10, max_iter=100, learning_rate=0.001, lamda=0.001)

# Instantiate evaluation metrics.
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
rec_20 = cornac.metrics.Recall(k=20)
pre_20 = cornac.metrics.Precision(k=20)

# Instantiate and then run an experiment.
exp = cornac.Experiment(eval_method=ratio_split,
                        models=[pmf],
                        metrics=[mae, rmse, rec_20, pre_20],
                        user_based=True)
exp.run()
github PreferredAI / cornac / examples / svd_example.py View on Github external
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================


import cornac as cn

ml_100k = cn.datasets.movielens.load_feedback()
ratio_split = cn.eval_methods.RatioSplit(data=ml_100k, test_size=0.2,
                                         rating_threshold=4.0, verbose=True)

bo = cn.models.BaselineOnly(max_iter=30, learning_rate=0.01, lambda_reg=0.02, verbose=True)
svd = cn.models.SVD(k=10, max_iter=30, learning_rate=0.01, lambda_reg=0.02, verbose=True)

mae = cn.metrics.MAE()
rmse = cn.metrics.RMSE()

cn.Experiment(eval_method=ratio_split,
              models=[bo, svd],
              metrics=[mae, rmse]).run()