How to use the mindsdb.libs.data_types.mindsdb_logger.log.debug function in MindsDB

To help you get started, we’ve selected a few MindsDB examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / libs / base_model.py View on Github external
self.eval() # toggle eval
        perm_index = 0

        for batch_number, batch in enumerate(test_sampler):

            # do only one permutation at a time, if we 2 or more columns
            if len(self.input_column_names) > 1:
                perms = [self.col_permutations[perm_index], []]
                perm_index = perm_index + 1 if perm_index + 1 < len(self.col_permutations) else 0
            else:
                perms = [[]]

            for permutation in perms:
                batch.blank_columns = permutation
                #batch.blank_columns = []
                log.debug('[EPOCH-BATCH] testing batch: {batch_number}'.format(batch_number=batch_number))
                # get real and predicted values by running the model with the input of this batch
                predicted_target = self.forwardWrapper(batch)
                print(predicted_target)
                exit()
                real_target = batch.getTarget(flatten=self.flatTarget)
                # append to all targets and all real values
                real_target_all += real_target.data.tolist()
                predicted_target_all += predicted_target.data.tolist()

                if len(permutation) == 0:
                    # append to all targets and all real values
                    real_target_all_ret += real_target.data.tolist()
                    predicted_target_all_ret += predicted_target.data.tolist()

        if batch is None:
            log.error('there is no data in test, we should not be here')
github mindsdb / mindsdb / mindsdb / libs / data_types / sampler.py View on Github external
# log.debug('Generating: pytorch variables, batch: {column}-[{group_pointer}:{limit}]-{column_type}'.format(column=column, group_pointer=group_pointer, limit=limit, column_type=self.stats[column]['data_type']))
                    # col_start_time = time.time()
                    #if self.stats[column]['data_type'] != DATA_TYPES.FULL_TEXT:
                    ret[column] = self.data[group][column][group_pointer:limit]

                    ext_col_name = EXTENSION_COLUMNS_TEMPLATE.format(column_name=column)
                    if ext_col_name in self.data[group]:
                        ret[ext_col_name] = self.data[group][ext_col_name][group_pointer:limit]
                    # else:
                    #     # Todo: figure out how to deal with full text features here
                    #     ret[column] =[0]*(limit-group_pointer)

                    # log.debug('Generated: {column} [OK] in {time_delta:.2f} seconds'.format(column=column, time_delta=(time.time()-col_start_time)))

                log.debug('Generated: [ALL_COLUMNS] in batch [OK], {time_delta:.2f} seconds'.format(time_delta=(time.time() - allcols_time)))

                yield Batch(self, ret, group=group, column=column, start=group_pointer, end=limit, blank_columns=self.blank_columns)

                ret = {}
                group_pointer = limit
github mindsdb / mindsdb / mindsdb / libs / helpers / general_helpers.py View on Github external
token = token+'.NO_WRITE'
    extra = urllib.parse.quote_plus(token)
    try:
        r = requests.get('http://mindsdb.com/updates/check/{extra}'.format(extra=extra), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)})
    except:
        log.warning('Could not check for updates')
        return
    try:
        # TODO: Extract version, compare with version in version.py
        ret = r.json()

        if 'version' in ret and ret['version']!= __version__:
            pass
            #log.warning("There is a new version of MindsDB {version}, please do:\n    pip3 uninstall mindsdb\n    pip3 install mindsdb --user".format(version=ret['version']))
        else:
            log.debug('MindsDB is up to date!')

    except:
        log.warning('could not check for MindsDB updates')
github mindsdb / mindsdb / mindsdb / libs / helpers / general_helpers.py View on Github external
token = token+'.NO_WRITE'
    extra = urllib.parse.quote_plus(token)
    try:
        r = requests.get('http://mindsdb.com/updates/check/{extra}'.format(extra=extra), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)})
    except:
        log.warning('Could not check for updates')
        return
    try:
        # TODO: Extract version, compare with version in version.py
        ret = r.json()

        if 'version' in ret and ret['version']!= __version__:
            pass
            #log.warning("There is a new version of MindsDB {version}, please do:\n    pip3 uninstall mindsdb\n    pip3 install mindsdb --user".format(version=ret['version']))
        else:
            log.debug('MindsDB is up to date!')

    except:
        log.warning('could not check for MindsDB updates')
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / libs / base_model.py View on Github external
# Iterate over permutations on train loop (which is what is inside this for statement)
                # Interface: Batch.setNullColumns(cols=)

                # do only one permutation at a time, if we 2 or more columns
                if len(self.input_column_names) > 1:
                    perms = [self.col_permutations[perm_index], []]
                    perm_index = perm_index +1 if perm_index + 1 < len(self.col_permutations) else 0
                else:
                    perms = [[]]
                # so here we pass one column per batch that can use as permutation,
                # essentially on every batch it makes sure that we pass all columns [[]] and also we do a run with one column with none values

                for permutation in perms:
                    batch.blank_columns = permutation
                    response.batch = batch_number
                    log.debug('[EPOCH-BATCH] Training on epoch: {epoch}/{num_epochs}, batch: {batch_number}'.format(
                            epoch=epoch + 1, num_epochs=self.total_epochs, batch_number=batch_number))
                    model_object.train() # toggle to train
                    model_object.zeroGradOptimizer()
                    loss, batch_size = model_object.calculateBatchLoss(batch)
                    if batch_size <= 0:
                        break
                    total_samples += batch_size
                    full_set_loss += int(loss.item()) * batch_size # this is because we need to wight the error by samples in batch
                    average_loss = full_set_loss / total_samples
                    loss.backward()#retain_graph=True)
                    model_object.optimize()
                    response.loss = average_loss

                    yield response
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / libs / base_model.py View on Github external
self.eval() # toggle eval
        perm_index = 0

        for batch_number, batch in enumerate(test_sampler):

            # do only one permutation at a time, if we 2 or more columns
            if len(self.input_column_names) > 1:
                perms = [self.col_permutations[perm_index], []]
                perm_index = perm_index + 1 if perm_index + 1 < len(self.col_permutations) else 0
            else:
                perms = [[]]

            for permutation in perms:
                batch.blank_columns = permutation
                #batch.blank_columns = []
                log.debug('[EPOCH-BATCH] testing batch: {batch_number}'.format(batch_number=batch_number))
                # get real and predicted values by running the model with the input of this batch
                predicted_target = self.forwardWrapper(batch)
                real_target = batch.getTarget(flatten=self.flatTarget)
                # append to all targets and all real values
                real_target_all += real_target.data.tolist()
                predicted_target_all += predicted_target.data.tolist()

                if len(permutation) == 0:
                    # append to all targets and all real values
                    real_target_all_ret += real_target.data.tolist()
                    predicted_target_all_ret += predicted_target.data.tolist()

        if batch is None:
            log.error('there is no data in test, we should not be here')
            return
github mindsdb / mindsdb / mindsdb / libs / ml_models / pytorch / libs / base_model.py View on Github external
# Iterate over permutations on train loop (which is what is inside this for statement)
                # Interface: Batch.setNullColumns(cols=)

                # do only one permutation at a time, if we 2 or more columns
                if len(self.input_column_names) > 1:
                    perms = [self.col_permutations[perm_index], []]
                    perm_index = perm_index +1 if perm_index + 1 < len(self.col_permutations) else 0
                else:
                    perms = [[]]
                # so here we pass one column per batch that can use as permutation,
                # essentially on every batch it makes sure that we pass all columns [[]] and also we do a run with one column with none values

                for permutation in perms:
                    batch.blank_columns = permutation
                    response.batch = batch_number
                    log.debug('[EPOCH-BATCH] Training on epoch: {epoch}/{num_epochs}, batch: {batch_number}'.format(
                            epoch=epoch + 1, num_epochs=self.total_epochs, batch_number=batch_number))
                    model_object.train() # toggle to train
                    model_object.zeroGradOptimizer()
                    loss, batch_size = model_object.calculateBatchLoss(batch)
                    if batch_size <= 0:
                        break
                    total_samples += batch_size
                    full_set_loss += int(loss.item()) * batch_size # this is because we need to wight the error by samples in batch
                    average_loss = full_set_loss / total_samples
                    loss.backward()#retain_graph=True)
                    model_object.optimize()
                    response.loss = average_loss

                    yield response