How to use the lightwood.config.config.CONFIG function in lightwood

To help you get started, we’ve selected a few lightwood examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mindsdb / lightwood / lightwood / api / predictor.py View on Github external
setattr(self._mixer, param, mixer_params[param])
            else:
                logging.warning(
                    'trying to set mixer param {param} but mixerclass {mixerclass} does not have such parameter'.format
                    (param=param, mixerclass=str(type(self._mixer)))
                )

        def callback_on_iter_w_acc(epoch, training_error, test_error, delta_mean):
            if callback_on_iter is not None:
                callback_on_iter(epoch, training_error, test_error, delta_mean, self.calculate_accuracy(test_data_ds))

        self._mixer.fit(train_ds=from_data_ds ,test_ds=test_data_ds, callback=callback_on_iter_w_acc, stop_training_after_seconds=stop_training_after_seconds, eval_every_x_epochs=eval_every_x_epochs)
        self.train_accuracy = self.calculate_accuracy(test_data_ds)

        # Train some alternative mixers
        if CONFIG.HELPER_MIXERS and self.has_boosting_mixer and (CONFIG.FORCE_HELPER_MIXERS or len(from_data_ds) < 12 * pow(10,3)):
            try:
                self._helper_mixers = self.train_helper_mixers(from_data_ds, test_data_ds, self._mixer.quantiles[self._mixer.quantiles_pair[0]+1:self._mixer.quantiles_pair[1]+1])
            except Exception as e:
                logging.warning(f'Failed to train helper mixers with error: {e}')

        return self
github mindsdb / lightwood / lightwood / mixers / nn / nn.py View on Github external
layer_name = []
                        for index, layer in enumerate(self.net.awareness_net):
                            if 'Linear' in str(type(layer)):
                                weights.append( list(layer.weight.cpu().detach().numpy().ravel()) )
                                gradients.append( list(layer.weight.grad.cpu().detach().numpy().ravel()) )
                                layer_name.append(f'Layer {index}-{index+1}')
                        self.monitor.weight_map(layer_name, weights, 'Awareness network weights')
                        self.monitor.weight_map(layer_name, weights, 'Awareness network gradients')

                self.optimizer.step()
                # now that we have run backward in both losses, optimize()
                # (review: we may need to optimize for each step)

                error = running_loss / (i + 1)

                if CONFIG.MONITORING['batch_loss']:
                    #self.monitor.plot_loss(total_loss.item(), self.total_iterations, 'Total Batch Loss')
                    self.monitor.plot_loss(error, self.total_iterations, 'Mean Total Running Loss')

            if CONFIG.MONITORING['epoch_loss']:
                self.monitor.plot_loss(error, self.total_iterations, 'Train Epoch Error')
                self.monitor.plot_loss(error, self.total_iterations, f'Train Epoch Error - Subset {subset_id}')
            yield error
github mindsdb / lightwood / lightwood / mixers / nn / nn.py View on Github external
gradients.append( list(layer.weight.grad.cpu().detach().numpy().ravel()) )
                                layer_name.append(f'Layer {index}-{index+1}')
                        self.monitor.weight_map(layer_name, weights, 'Awareness network weights')
                        self.monitor.weight_map(layer_name, weights, 'Awareness network gradients')

                self.optimizer.step()
                # now that we have run backward in both losses, optimize()
                # (review: we may need to optimize for each step)

                error = running_loss / (i + 1)

                if CONFIG.MONITORING['batch_loss']:
                    #self.monitor.plot_loss(total_loss.item(), self.total_iterations, 'Total Batch Loss')
                    self.monitor.plot_loss(error, self.total_iterations, 'Mean Total Running Loss')

            if CONFIG.MONITORING['epoch_loss']:
                self.monitor.plot_loss(error, self.total_iterations, 'Train Epoch Error')
                self.monitor.plot_loss(error, self.total_iterations, f'Train Epoch Error - Subset {subset_id}')
            yield error
github mindsdb / lightwood / lightwood / api / data_source.py View on Github external
def __getitem__(self, idx):
        sample = {}

        dropout_features = None

        if self.training and random.randint(0,3) == 1 and self.enable_dropout and CONFIG.ENABLE_DROPOUT:
            dropout_features = [feature['name'] for feature in self.configuration['input_features'] if random.random() > (1 - self.dropout_dict[feature['name']])]

            # Make sure we never drop all the features, since this would make the row meaningless
            if len(dropout_features) > len(self.configuration['input_features']):
                dropout_features = dropout_features[:-1]
            #logging.debug(f'\n-------------\nDroping out features: {dropout_features}\n-------------\n')

        if not self.disable_cache:
            if self.transformed_cache is None:
                self.transformed_cache = [None] * len(self)

            if dropout_features is None or len(dropout_features) == 0:
                cached_sample = self.transformed_cache[idx]
                if cached_sample is not None:
                    return cached_sample
github mindsdb / lightwood / scraps / bayesian_nn / bayesian_nn.py View on Github external
data_loader = DataLoader(ds, batch_size=self.batch_size, shuffle=True, num_workers=0)
        running_loss = 0.0
        error = 0

        for i, data in enumerate(data_loader, 0):
            inputs, labels = data
            inputs = inputs.to(self.net.device)
            labels = labels.to(self.net.device)

            if self.is_categorical_output:
                target = labels.cpu().numpy()
                target_indexes = np.where(target > 0)[1]
                targets_c = torch.LongTensor(target_indexes)
                labels = targets_c.to(self.net.device)

            sampled_models = [self.pyro_guide(None, None) for _ in range(CONFIG.NUMBER_OF_PROBABILISTIC_MODELS)]
            out_hats = [model(inputs).data for model in sampled_models]
            outputs_mean = torch.mean(torch.stack(out_hats), 0)

            loss = self.criterion(outputs_mean, labels)
            running_loss += loss.item()
            error = running_loss / (i + 1)

        return error