Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
from ...utils import get_rng
from ...utils.init_utils import xavier_uniform
self.seed = get_rng(self.seed)
self.U = self.init_params.get('U', xavier_uniform((self.train_set.num_users, self.k), self.seed))
self.V = self.init_params.get('V', xavier_uniform((self.train_set.num_items, self.k), self.seed))
if self.trainable:
self._fit_cdr()
return self
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
from ...utils import get_rng
from ...utils.init_utils import xavier_uniform
rng = get_rng(self.seed)
self.U = self.init_params.get('U', xavier_uniform((self.train_set.num_users, self.dimension), rng))
self.V = self.init_params.get('V', xavier_uniform((self.train_set.num_items, self.dimension), rng))
self.W = self.init_params.get('W', xavier_uniform((self.train_set.item_text.vocab.size, self.emb_dim), rng))
if self.trainable:
self._fit_convmf()
return self
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
from ...utils import get_rng
from ...utils.init_utils import uniform
rng = get_rng(self.seed)
(rating_matrix, user_item_aspect, user_aspect_opinion,
item_aspect_opinion, user_item_pairs) = self._build_data(self.train_set)
U_shape = (self.train_set.num_users, self.n_user_factors)
I_shape = (self.train_set.num_items, self.n_item_factors)
A_shape = (self.train_set.sentiment.num_aspects+1, self.n_aspect_factors)
O_shape = (self.train_set.sentiment.num_opinions,
self.n_opinion_factors)
G1_shape = (self.n_user_factors, self.n_item_factors,
self.n_aspect_factors)
G2_shape = (self.n_user_factors, self.n_aspect_factors,
self.n_opinion_factors)
G3_shape = (self.n_item_factors, self.n_aspect_factors,
self.n_opinion_factors)
def sdae(X_corrupted, layers, dropout_rate=0.0, act_fn='relu', seed=None, name="SDAE"):
fn = act_functions.get(act_fn, None)
if fn is None:
raise ValueError('Invalid type of activation function {}\n'
'Supported functions: {}'.format(act_fn, act_functions.keys()))
# Weight initialization
L = len(layers)
rng = get_rng(seed)
weights, biases = [], []
with tf.variable_scope(name):
for i in range(L - 1):
w = xavier_uniform((layers[i], layers[i + 1]), random_state=rng)
weights.append(tf.get_variable(name='W_{}'.format(i), dtype=tf.float32,
initializer=tf.constant(w)))
biases.append(tf.get_variable(name='b_{}'.format(i), dtype=tf.float32,
shape=(layers[i + 1]),
initializer=tf.zeros_initializer()))
# Construct the auto-encoder
h_value = X_corrupted
for i in range(L - 1):
# The encoder
if i <= int(L / 2) - 1:
h_value = fn(tf.add(tf.matmul(h_value, weights[i]), biases[i]))
def _init_factors(self, n_users, n_items, features):
from ...utils import get_rng
from ...utils.init_utils import zeros, xavier_uniform
rng = get_rng(self.seed)
self.beta_item = self.init_params.get('Bi', zeros(n_items))
self.gamma_user = self.init_params.get('Gu', xavier_uniform((n_users, self.k), rng))
self.gamma_item = self.init_params.get('Gi', xavier_uniform((n_items, self.k), rng))
self.theta_user = self.init_params.get('Tu', xavier_uniform((n_users, self.k2), rng))
self.emb_matrix = self.init_params.get('E', xavier_uniform((features.shape[1], self.k2), rng))
self.beta_prime = self.init_params.get('Bp', xavier_uniform((features.shape[1], 1), rng))
# pre-computed for faster evaluation
self.theta_item = np.matmul(features, self.emb_matrix)
self.visual_bias = np.matmul(features, self.beta_prime).ravel()
def paraserver(user_item_pairs, user_item_aspect, user_aspect_opinion, item_aspect_opinion,
n_element_samples, n_bpr_samples, lambda_reg, n_epochs, lr,
G1, G2, G3, U, I, A, O,
error_square, error_bpr, q_samples_mse, q_samples_bpr,
del_g1, del_g2, del_g3, del_u, del_i, del_a, del_o, num_grad, n_threads, seed=None, verbose=False):
from ...utils import get_rng
rng = get_rng(seed)
sum_square_gradients_G1 = np.zeros_like(G1)
sum_square_gradients_G2 = np.zeros_like(G2)
sum_square_gradients_G3 = np.zeros_like(G3)
sum_square_gradients_U = np.zeros_like(U)
sum_square_gradients_I = np.zeros_like(I)
sum_square_gradients_A = np.zeros_like(A)
sum_square_gradients_O = np.zeros_like(O)
mse_per_proc = int(n_element_samples / n_threads)
bpr_per_proc = int(n_bpr_samples / n_threads)
user_item_aspect_keys = np.array(list(user_item_aspect.keys()))
user_aspect_opinion_keys = np.array(list(user_aspect_opinion.keys()))
item_aspect_opinion_keys = np.array(list(item_aspect_opinion.keys()))
user_item_pairs_keys = np.array(user_item_pairs)
def _partition_data(self):
"""Partition ratings into n_folds"""
rng = get_rng(self.seed)
fold_size = int(self.n_ratings / self.n_folds)
remain_size = self.n_ratings - fold_size * self.n_folds
partition = np.repeat(np.arange(self.n_folds), fold_size)
rng.shuffle(partition)
if remain_size > 0:
remain_partition = rng.choice(self.n_folds, size=remain_size, replace=True, p=None)
partition = np.concatenate((partition, remain_partition))
return partition
def __init__(self, U, V, n_user, n_item, n_vocab, k=200, lambda_u=0.01, lambda_v=0.01, eta=0.01,
a=1, b=0.01, max_iter=100, seed=None):
self.k = k
self.lambda_u = lambda_u
self.lambda_v = lambda_v
self.eta = eta
self.a = a
self.b = b
self.max_iter = max_iter
self.U = U
self.V = V
self.n_item = n_item
self.n_user = n_user
self.n_vocab = n_vocab
self.seed = seed
rng = get_rng(self.seed)
# LDA variables
self.theta = rng.random_sample([self.n_item, self.k])
self.theta = self.theta / self.theta.sum(1)[:, np.newaxis] # normalize
self.beta = rng.random_sample([self.n_vocab, self.k])
self.beta = self.beta / self.beta.sum(0) # normalize
self.phi_sum = np.zeros([self.n_vocab, self.k]) + self.eta