How to use the implicit.cuda function in implicit

To help you get started, we’ve selected a few implicit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github benfred / implicit / implicit / als.py View on Github external
def _fit_gpu(self, Ciu_host, Cui_host, show_progress=True):
        """ specialized training on the gpu. copies inputs to/from cuda device """
        if not implicit.cuda.HAS_CUDA:
            raise ValueError("No CUDA extension has been built, can't train on GPU.")

        if self.dtype == np.float64:
            log.warning("Factors of dtype float64 aren't supported with gpu fitting. "
                        "Converting factors to float32")
            self.item_factors = self.item_factors.astype(np.float32)
            self.user_factors = self.user_factors.astype(np.float32)

        Ciu = implicit.cuda.CuCSRMatrix(Ciu_host)
        Cui = implicit.cuda.CuCSRMatrix(Cui_host)
        X = implicit.cuda.CuDenseMatrix(self.user_factors.astype(np.float32))
        Y = implicit.cuda.CuDenseMatrix(self.item_factors.astype(np.float32))

        solver = implicit.cuda.CuLeastSquaresSolver(self.factors)
        log.debug("Running %i ALS iterations", self.iterations)
        with tqdm(total=self.iterations, disable=not show_progress) as progress:
github benfred / implicit / implicit / approximate_als.py View on Github external
def __init__(self, approximate_similar_items=True, approximate_recommend=True,
                 nlist=400, nprobe=20, use_gpu=implicit.cuda.HAS_CUDA, *args, **kwargs):
        self.similar_items_index = None
        self.recommend_index = None

        self.approximate_similar_items = approximate_similar_items
        self.approximate_recommend = approximate_recommend

        # hyper-parameters for FAISS
        self.nlist = nlist
        self.nprobe = nprobe
        super(FaissAlternatingLeastSquares, self).__init__(*args, use_gpu=use_gpu, **kwargs)
github benfred / implicit / implicit / als.py View on Github external
""" specialized training on the gpu. copies inputs to/from cuda device """
        if not implicit.cuda.HAS_CUDA:
            raise ValueError("No CUDA extension has been built, can't train on GPU.")

        if self.dtype == np.float64:
            log.warning("Factors of dtype float64 aren't supported with gpu fitting. "
                        "Converting factors to float32")
            self.item_factors = self.item_factors.astype(np.float32)
            self.user_factors = self.user_factors.astype(np.float32)

        Ciu = implicit.cuda.CuCSRMatrix(Ciu_host)
        Cui = implicit.cuda.CuCSRMatrix(Cui_host)
        X = implicit.cuda.CuDenseMatrix(self.user_factors.astype(np.float32))
        Y = implicit.cuda.CuDenseMatrix(self.item_factors.astype(np.float32))

        solver = implicit.cuda.CuLeastSquaresSolver(self.factors)
        log.debug("Running %i ALS iterations", self.iterations)
        with tqdm(total=self.iterations, disable=not show_progress) as progress:
            for iteration in range(self.iterations):
                s = time.time()
                solver.least_squares(Cui, X, Y, self.regularization, self.cg_steps)
                solver.least_squares(Ciu, Y, X, self.regularization, self.cg_steps)
                progress.update(1)

                if self.fit_callback:
                    self.fit_callback(iteration, time.time() - s)

                if self.calculate_training_loss:
                    loss = solver.calculate_loss(Cui, X, Y, self.regularization)
                    progress.set_postfix({"loss": loss})

        if self.calculate_training_loss:
github benfred / implicit / implicit / als.py View on Github external
def __init__(self, factors=100, regularization=0.01, dtype=np.float32,
                 use_native=True, use_cg=True, use_gpu=implicit.cuda.HAS_CUDA,
                 iterations=15, calculate_training_loss=False, num_threads=0):
        super(AlternatingLeastSquares, self).__init__()

        # currently there are some issues when training on the GPU when some of the warps
        # don't have full factors. Round up to be warp aligned.
        # TODO: figure out where the issue is (best guess is in the
        # the 'dot' function in 'implicit/cuda/utils/cuh)
        if use_gpu and factors % 32:
            padding = 32 - factors % 32
            log.warning("GPU training requires factor size to be a multiple of 32."
                        " Increasing factors from %i to %i.", factors, factors + padding)
            factors += padding

        # parameters on how to factorize
        self.factors = factors
        self.regularization = regularization