How to use the megnet.callbacks.ModelCheckpointMAE function in megnet

To help you get started, we’ve selected a few megnet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github materialsvirtuallab / megnet / megnet / models.py View on Github external
val_nb_atoms = [len(i['atom']) for i in validation_graphs]
            validation_targets = [self.target_scaler.transform(i, j) for i, j in zip(validation_targets, val_nb_atoms)]
            val_inputs = self.graph_converter.get_flat_data(validation_graphs, validation_targets)

            val_generator = self._create_generator(*val_inputs,
                                                   batch_size=batch_size)
            steps_per_val = int(np.ceil(len(validation_graphs) / batch_size))
            if automatic_correction:
                callbacks.extend([ReduceLRUponNan(filepath=filepath,
                                                  monitor=monitor,
                                                  mode=mode,
                                                  factor=lr_scaling_factor,
                                                  patience=patience,
                                                  )])
            if save_checkpoint:
                callbacks.extend([ModelCheckpointMAE(filepath=filepath,
                                                     monitor=monitor,
                                                     mode=mode,
                                                     save_best_only=True,
                                                     save_weights_only=False,
                                                     val_gen=val_generator,
                                                     steps_per_val=steps_per_val,
                                                     target_scaler=self.target_scaler)])
        else:
            val_generator = None
            steps_per_val = None
        train_inputs = self.graph_converter.get_flat_data(train_graphs, train_targets)
        # check dimension match
        self.check_dimension(train_graphs[0])
        train_generator = self._create_generator(*train_inputs, batch_size=batch_size)
        steps_per_train = int(np.ceil(len(train_graphs) / batch_size))
        self.fit_generator(train_generator, steps_per_epoch=steps_per_train,
github materialsvirtuallab / megnet / megnet / models.py View on Github external
val_nb_atoms = [len(i['atom']) for i in validation_graphs]
            validation_targets = [self.target_scaler.transform(i, j) for i, j in zip(validation_targets, val_nb_atoms)]
            val_inputs = self.graph_converter.get_flat_data(validation_graphs, validation_targets)

            val_generator = self._create_generator(*val_inputs,
                                                   batch_size=batch_size)
            steps_per_val = int(np.ceil(len(validation_graphs) / batch_size))
            if automatic_correction:
                callbacks.extend([ReduceLRUponNan(filepath=filepath,
                                                  monitor=monitor,
                                                  mode=mode,
                                                  factor=lr_scaling_factor,
                                                  patience=patience,
                                                  )])
            if save_checkpoint:
                callbacks.extend([ModelCheckpointMAE(filepath=filepath,
                                                     monitor=monitor,
                                                     mode=mode,
                                                     save_best_only=True,
                                                     save_weights_only=False,
                                                     val_gen=val_generator,
                                                     steps_per_val=steps_per_val,
                                                     target_scaler=self.target_scaler)])
        else:
            val_generator = None
            steps_per_val = None
        train_inputs = self.graph_converter.get_flat_data(train_graphs, train_targets)
        # check dimension match
        self.check_dimension(train_graphs[0])
        train_generator = self._create_generator(*train_inputs, batch_size=batch_size)
        steps_per_train = int(np.ceil(len(train_graphs) / batch_size))
        self.fit_generator(train_generator, steps_per_epoch=steps_per_train,