How to use the mindsdb.libs.ml_models.pytorch.libs.base_model.BaseModel function in MindsDB

To help you get started, we’ve selected a few MindsDB examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / libs / base_model.py View on Github external
def __init__(self, sample_batch, **kwargs):
        """

        :param sample_batch:
        :type sample_batch: utils.
        :param use_cuda:
        :param kwargs:
        """
        super(BaseModel, self).__init__()

        self.lossFunction = LogLoss()
        self.errorFunction = LogLoss()
        self.sample_batch = sample_batch

        # Implementing this
        # https://towardsdatascience.com/understanding-learning-rates-and-how-it-improves-performance-in-deep-learning-d0d4059c1c10
        self.learning_rates = [(1, 200), (0.8, 20), (0.6, 20),(0.4, 20), (0.2, 40),(0.1, 60), (0.01, 80), (0.001, 100)]
        for i in range(5):
            self.learning_rates += [(1, 40), (0.8, 20), (0.6, 20),(0.4, 20), (0.2, 20),(0.1, 20), (0.01, 30), (0.001, 40)]
        for i in range(5):
            self.learning_rates += [(0.1, 60), (0.08, 30), (0.06, 30),(0.04, 20), (0.02, 30),(0.01, 30), (0.005, 40), (0.001, 50)]


        self.setLearningRateIndex(0)
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / libs / base_model.py View on Github external
def __init__(self, sample_batch, **kwargs):
        """

        :param sample_batch:
        :type sample_batch: utils.
        :param use_cuda:
        :param kwargs:
        """
        super(BaseModel, self).__init__()

        self.lossFunction = LogLoss()
        self.errorFunction = LogLoss()
        self.sample_batch = sample_batch

        # Implementing this
        # https://towardsdatascience.com/understanding-learning-rates-and-how-it-improves-performance-in-deep-learning-d0d4059c1c10
        self.learning_rates = [(1, 200), (0.8, 20), (0.6, 20),(0.4, 20), (0.2, 40),(0.1, 60), (0.01, 80), (0.001, 100)]
        for i in range(5):
            self.learning_rates += [(1, 40), (0.8, 20), (0.6, 20),(0.4, 20), (0.2, 20),(0.1, 20), (0.01, 30), (0.001, 40)]
        for i in range(5):
            self.learning_rates += [(0.1, 60), (0.08, 30), (0.06, 30),(0.04, 20), (0.02, 30),(0.01, 30), (0.005, 40), (0.001, 50)]


        self.setLearningRateIndex(0)
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / models / ensemble_conv_net / ensemble_conv_net.py View on Github external
from mindsdb.config import *
from mindsdb.libs.constants.mindsdb import *

import torch
import math
import torch.nn as nn


from mindsdb.libs.ml_models.pytorch.libs.base_model import BaseModel



class EnsembleConvNet(BaseModel):

    #ignore_types = [] # NONE

    def setup(self, sample_batch):
        """
        Here we define the basic building blocks of our model, in forward we define how we put it all together along wiht an input

        :param sample_batch: this is used to understand the characteristics of the input and target, it is an object of type utils.libs.data_types.batch.Batch
        """

        self.flatTarget = True # True means that we will expect target to be a flat vector per row, even if its multiple variables
        self.flatInput = False # True means that we will expect input to be a dictionary of flat vectors per column

        sample_input = sample_batch.getInput(flatten=self.flatInput)
        sample_target = sample_batch.getTarget(flatten=self.flatTarget)
        output_size = sample_target.size()[1]
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / models / fully_connected_net / fully_connected_net.py View on Github external
from mindsdb.config import *
from mindsdb.libs.constants.mindsdb import *

import math
import torch
import torch.nn as nn
from mindsdb.libs.ml_models.pytorch.libs.base_model import BaseModel



class FullyConnectedNet(BaseModel):


    def setup(self, sample_batch):
        """
        Here we define the basic building blocks of our model, in forward we define how we put it all together along wiht an input

        :param sample_batch: this is used to understand the characteristics of the input and target, it is an object of type utils.libs.data_types.batch.Batch
        """

        self.flatTarget = True # True means that we will expect target to be a flat vector per row, even if its multiple variables
        self.flatInput = True # True means that we will expect input to be a a flat vector per row, even if it smade of multiple variables

        sample_input = sample_batch.getInput(flatten=self.flatInput)
        sample_target = sample_batch.getTarget(flatten=self.flatTarget)
        input_size = sample_input.size()[1]
        output_size = sample_target.size()[1]
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / models / ensemble_fully_connected_net / ensemble_fully_connected_net.py View on Github external
from mindsdb.config import *
from mindsdb.libs.constants.mindsdb import *

import torch
import math
import torch.nn as nn
import numpy as np

from mindsdb.libs.ml_models.pytorch.libs.base_model import BaseModel
from mindsdb.libs.ml_models.pytorch.encoders.rnn.encoder_rnn import EncoderRNN


class EnsembleFullyConnectedNet(BaseModel):

    ignore_types = [] # NONE
    use_full_text_input = True

    def setup(self, sample_batch):
        """
        Here we define the basic building blocks of our model, in forward we define how we put it all together along wiht an input

        :param sample_batch: this is used to understand the characteristics of the input and target, it is an object of type utils.libs.data_types.batch.Batch
        """

        self.flatTarget = True # True means that we will expect target to be a flat vector per row, even if its multiple variables
        self.flatInput = False # True means that we will expect input to be a dictionary of flat vectors per column
        self.fulltext_encoder_hidden_size = 128 # this is the size of the vector we encode text, TODO: This can be a config value
        self.input_encoder_size = 3 # this is what we encode each input column to. TODO: This can be a config value
        self.dropout_pct = 0.2 # this is what we dropout of the neurons to minimize saddle points TODO: This can be a config value
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / encoders / rnn / rnn.py View on Github external
*******************************************************
"""

from mindsdb.config import *
from mindsdb.libs.constants.mindsdb import *

import torch
import math
import torch.nn as nn


from mindsdb.libs.ml_models.pytorch.libs.base_model import BaseModel



class Rnn(BaseModel):

    #ignore_types = [] # NONE

    def setup(self, sample_batch):
        """
        Here we define the basic building blocks of our model, in forward we define how we put it all together along wiht an input

        :param sample_batch: this is used to understand the characteristics of the input and target, it is an object of type utils.libs.data_types.batch.Batch
        """

        self.flatTarget = True # True means that we will expect target to be a flat vector per row, even if its multiple variables
        self.flatInput = False # True means that we will expect input to be a dictionary of flat vectors per column

        sample_input = sample_batch.getInput(flatten=self.flatInput)
        sample_target = sample_batch.getTarget(flatten=self.flatTarget)
        output_size = sample_target.size()[1]
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / models / fully_connected_net / fully_connected_net.py View on Github external
from mindsdb.config import *
from mindsdb.libs.constants.mindsdb import *

import math
import torch
import torch.nn as nn
from mindsdb.libs.ml_models.pytorch.libs.base_model import BaseModel



class FullyConnectedNet(BaseModel):


    def setup(self, sample_batch):
        """
        Here we define the basic building blocks of our model, in forward we define how we put it all together along wiht an input

        :param sample_batch: this is used to understand the characteristics of the input and target, it is an object of type utils.libs.data_types.batch.Batch
        """

        self.flatTarget = True # True means that we will expect target to be a flat vector per row, even if its multiple variables
        self.flatInput = True # True means that we will expect input to be a a flat vector per row, even if it smade of multiple variables

        sample_input = sample_batch.getInput(flatten=self.flatInput)
        sample_target = sample_batch.getTarget(flatten=self.flatTarget)
        input_size = sample_input.size()[1]
        output_size = sample_target.size()[1]
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / models / fully_connected_buckets_net / fully_connected_buckets_net.py View on Github external
from mindsdb.config import *
from mindsdb.libs.constants.mindsdb import *

import math
import torch
import torch.nn as nn
from mindsdb.libs.ml_models.pytorch.libs.base_model import BaseModel



class FullyConnectedBucketsNet(BaseModel):


    def setup(self, sample_batch):
        """
        Here we define the basic building blocks of our model, in forward we define how we put it all together along wiht an input

        :param sample_batch: this is used to understand the characteristics of the input and target, it is an object of type utils.libs.data_types.batch.Batch
        """

        self.flatTarget = True # True means that we will expect target to be a flat vector per row, even if its multiple variables
        self.flatInput = True # True means that we will expect input to be a a flat vector per row, even if it smade of multiple variables

        self.lossFunctionForBuckets = nn.CrossEntropyLoss() # this is the loss function for buckets

        sample_input = sample_batch.getInput(flatten=self.flatInput)
        sample_target = sample_batch.getTarget(flatten=self.flatTarget)