Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_model_trainer_timout():
model = CnnGenerator(3, (28, 28, 3)).generate().produce_model()
timeout = 1
train_data, test_data = get_classification_data_loaders()
with pytest.raises(TimeoutError):
ModelTrainer(model,
train_data=train_data,
test_data=test_data,
metric=Accuracy,
loss_function=Backend.classification_loss,
verbose=True,
path=TEST_TEMP_DIR).train_model(max_iter_num=300, timeout=timeout)
clean_dir(TEST_TEMP_DIR)
Args:
x_test: An instance of numpy.ndarray containing the testing data.
Returns:
A numpy.ndarray containing the predicted labels for the testing data.
"""
if Constant.LIMIT_MEMORY:
pass
x_test = self.preprocess(x_test)
test_loader = self.data_transformer.transform_test(x_test)
model = self.graph.produce_model()
model.eval()
output = Backend.predict(model, test_loader)
return self.inverse_transform_y(output)
"""
validate_xy(x_train, y_train)
self.resize_shape = compute_image_resize_params(x_train)
x_train = self.preprocess(x_train)
self.y_encoder.fit(y_train)
y_train = self.transform_y(y_train)
# Divide training data into training and testing data.
validation_set_size = int(len(y_train) * Constant.VALIDATION_SET_SIZE)
validation_set_size = min(validation_set_size, 500)
validation_set_size = max(validation_set_size, 1)
x_train_new, x_test, y_train_new, y_test = train_test_split(x_train, y_train,
test_size=validation_set_size,
random_state=42)
# initialize data_transformer
self.data_transformer = Backend.get_image_transformer(x_train)
# Wrap the data into DataLoaders
train_loader = self.data_transformer.transform_train(x_train_new, y_train_new)
test_loader = self.data_transformer.transform_test(x_test, y_test)
self.generator = self._init_generator(self.y_encoder.n_classes, x_train_new.shape[1:])
graph = self.generator.generate()
if time_limit is None:
time_limit = {'max_no_improvement_num': 30}
_, _1, self.graph = train(None, graph, train_loader, test_loader,
time_limit, self.metric, self.loss,
self.verbose, self.path)
Args:
nz: size of the latent z vector
ngf: of gen filters in first conv layer
ndf: of discrim filters in first conv layer
nc: number of input chanel
verbose: A boolean of whether the search process will be printed to stdout.
gen_training_result: A tuple of (path, size) to denote where to output the intermediate result with size
augment: A boolean value indicating whether the data needs augmentation.
"""
super().__init__(verbose)
self.nz = nz
self.ngf = ngf
self.ndf = ndf
self.nc = nc
self.verbose = verbose
self.device = Backend.get_device()
self.gen_training_result = gen_training_result
self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION
self.data_transformer = None
self.net_d = Discriminator(self.nc, self.ndf)
self.net_g = Generator(self.nc, self.nz, self.ngf)
x_train: ndarray contained the training data
Returns:
"""
# input size stay the same, enable cudnn optimization
cudnn.benchmark = True
self.data_transformer = Backend.get_image_transformer(x_train, augment=self.augment)
train_dataloader = self.data_transformer.transform_train(x_train)
GANModelTrainer(self.net_g,
self.net_d,
train_dataloader,
Backend.binary_classification_loss,
self.verbose,
self.gen_training_result,
device=Backend.get_device()).train_model()
def compute(cls, prediction, target):
return Backend.classification_metric(prediction, target)
def compute(cls, prediction, target):
return Backend.regression_metric(prediction, target)
def produce_model(self):
"""Build a new torch model based on the current graph."""
return Backend.produce_model(self)
def loss(self):
return Backend.classification_loss
def loss(self):
return Backend.classification_loss