Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Load model
model = SupervisedModel('evaluation', './')
# Load data
supervised_data_loader = SupervisedDataLoader(dataset_path)
val_data_container = supervised_data_loader.load(1)
val_data_container.X = numpy.float32(val_data_container.X)
val_data_container.X /= 255.0
val_data_container.X *= 2.0
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
checkpoint_file_list = sorted(
glob.glob(os.path.join(checkpoint_dir, '*.pkl')))
evaluator = util.Evaluator(model, val_data_container,
checkpoint_file_list[0], preprocessor)
# For each checkpoint, compute the overall val accuracy
accuracies = []
for checkpoint in checkpoint_file_list:
print 'Checkpoint: %s' % os.path.split(checkpoint)[1]
evaluator.set_checkpoint(checkpoint)
accuracy = evaluator.run()
print 'Accuracy: %f\n' % accuracy
accuracies.append(accuracy)
# Find checkpoint that produced the highest accuracy
max_accuracy = numpy.max(accuracies)
max_index = numpy.argmax(accuracies)
max_checkpoint = checkpoint_file_list[max_index]
print 'Max Checkpoint: %s' % max_checkpoint
X_val *= 2.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0
val_data_container = SupervisedDataContainer(X_val, y_val)
test_data_container = SupervisedDataContainer(X_test, y_test)
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
val_evaluator = util.Evaluator(model, val_data_container,
checkpoint_file, preprocessor)
test_evaluator = util.Evaluator(model, test_data_container,
checkpoint_file, preprocessor)
# For the inputted checkpoint, compute the overall val accuracy
print 'Checkpoint: %s' % os.path.split(checkpoint_file)[1]
val_evaluator.set_checkpoint(checkpoint_file)
val_accuracy = val_evaluator.run()
print 'Val Accuracy: %f\n' % val_accuracy
# For the inputted checkpoint, cmopute the overall test accuracy
print 'Checkoint: %s' % os.path.split(checkpoint_file)[1]
test_evaluator.set_checkpoint(checkpoint_file)
test_accuracy = test_evaluator.run()
print 'Test Accuracy: %f\n' % test_accuracy
X_val /= 255.0
X_val *= 2.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0
val_data_container = SupervisedDataContainer(X_val, y_val)
test_data_container = SupervisedDataContainer(X_test, y_test)
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
checkpoint_file_list = sorted(
glob.glob(os.path.join(checkpoint_dir, '*.pkl')))
val_evaluator = util.Evaluator(model, val_data_container,
checkpoint_file_list[0], preprocessor)
test_evaluator = util.Evaluator(model, test_data_container,
checkpoint_file_list[0], preprocessor)
# For each checkpoint, compute the overall val accuracy
val_accuracies = []
for checkpoint in checkpoint_file_list:
print 'Checkpoint: %s' % os.path.split(checkpoint)[1]
val_evaluator.set_checkpoint(checkpoint)
val_accuracy = val_evaluator.run()
print 'Val Accuracy: %f\n' % val_accuracy
val_accuracies.append(val_accuracy)
# Find checkpoint that produced the highest val accuracy
max_val_accuracy = numpy.max(val_accuracies)
max_index = numpy.argmax(val_accuracies)
# Load model
model = SupervisedModel('evaluation', './')
# Load dataset
supervised_data_loader = SupervisedDataLoader(dataset_path)
data_container = supervised_data_loader.load(set_num)
data_container.X = numpy.float32(data_container.X)
data_container.X /= 255.0
data_container.X *= 2.0
print data_container.X.shape
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
evaluator = util.Evaluator(model, data_container,
checkpoint_file, preprocessor)
# For the inputted checkpoint, compute the overall accuracy
accuracies = []
print 'Checkpoint: %s' % os.path.split(checkpoint_file)[1]
evaluator.set_checkpoint(checkpoint_file)
accuracy = evaluator.run()
print 'Accuracy: %f\n' % accuracy
accuracies.append(accuracy)
if test_split == 9:
X_test, y_test = add_padding(X_test, y_test)
elif test_split == 8:
X_val, y_val = add_padding(X_val, y_val)
val_data_container = SupervisedDataContainer(X_val, y_val)
test_data_container = SupervisedDataContainer(X_test, y_test)
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
checkpoint_file_list = sorted(
glob.glob(os.path.join(checkpoint_dir, '*.pkl')))
val_evaluator = util.Evaluator(model, val_data_container,
checkpoint_file_list[0], preprocessor)
test_evaluator = util.Evaluator(model, test_data_container,
checkpoint_file_list[0], preprocessor)
# For each checkpoint, compute the overall val accuracy
val_accuracies = []
for checkpoint in checkpoint_file_list:
print 'Checkpoint: %s' % os.path.split(checkpoint)[1]
val_evaluator.set_checkpoint(checkpoint)
if test_split != 8:
val_accuracy = val_evaluator.run()
else:
val_predictions = val_evaluator._get_predictions()
val_predictions = val_predictions[0:num_val_samples]
val_true_labels = val_data_container.y[0:num_val_samples]
val_accuracy = 100.0 * (1.0 * numpy.sum(
X_val = numpy.float32(X_val)
X_val /= 255.0
X_val *= 2.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0
val_data_container = SupervisedDataContainer(X_val, y_val)
test_data_container = SupervisedDataContainer(X_test, y_test)
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
val_evaluator = util.Evaluator(model, val_data_container,
checkpoint_file, preprocessor)
test_evaluator = util.Evaluator(model, test_data_container,
checkpoint_file, preprocessor)
# For the inputted checkpoint, compute the overall val accuracy
print 'Checkpoint: %s' % os.path.split(checkpoint_file)[1]
val_evaluator.set_checkpoint(checkpoint_file)
val_accuracy = val_evaluator.run()
print 'Val Accuracy: %f\n' % val_accuracy
# For the inputted checkpoint, cmopute the overall test accuracy
print 'Checkoint: %s' % os.path.split(checkpoint_file)[1]
test_evaluator.set_checkpoint(checkpoint_file)
test_accuracy = test_evaluator.run()
print 'Test Accuracy: %f\n' % test_accuracy
print 'Reduced Val Data: ', X_val.shape, y_val.shape
print 'Reduced Test Data: ', X_test.shape, y_test.shape
if test_split == 9:
X_test, y_test = add_padding(X_test, y_test)
elif test_split == 8:
X_val, y_val = add_padding(X_val, y_val)
val_data_container = SupervisedDataContainer(X_val, y_val)
test_data_container = SupervisedDataContainer(X_test, y_test)
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
val_evaluator = util.Evaluator(model, val_data_container,
checkpoint_file, preprocessor)
test_evaluator = util.Evaluator(model, test_data_container,
checkpoint_file, preprocessor)
# For the inputted checkpoint, compute the overall test accuracy
#accuracies = []
print 'Checkpoint: %s' % os.path.split(checkpoint_file)[1]
val_evaluator.set_checkpoint(checkpoint_file)
if test_split != 8:
val_accuracy = val_evaluator.run()
else:
val_predictions = val_evaluator._get_predictions()
val_predictions = val_predictions[0:num_val_samples]
val_true_labels = val_data_container.y[0:num_val_samples]
print 'Reduced Test Data: ', X_test.shape, y_test.shape
if test_split == 9:
X_test, y_test = add_padding(X_test, y_test)
elif test_split == 8:
X_val, y_val = add_padding(X_val, y_val)
val_data_container = SupervisedDataContainer(X_val, y_val)
test_data_container = SupervisedDataContainer(X_test, y_test)
# Construct evaluator
preprocessor = [util.Normer3(filter_size=5, num_channels=1)]
checkpoint_file_list = sorted(
glob.glob(os.path.join(checkpoint_dir, '*.pkl')))
val_evaluator = util.Evaluator(model, val_data_container,
checkpoint_file_list[0], preprocessor)
test_evaluator = util.Evaluator(model, test_data_container,
checkpoint_file_list[0], preprocessor)
# For each checkpoint, compute the overall val accuracy
val_accuracies = []
for checkpoint in checkpoint_file_list:
print 'Checkpoint: %s' % os.path.split(checkpoint)[1]
val_evaluator.set_checkpoint(checkpoint)
if test_split != 8:
val_accuracy = val_evaluator.run()
else:
val_predictions = val_evaluator._get_predictions()
val_predictions = val_predictions[0:num_val_samples]
val_true_labels = val_data_container.y[0:num_val_samples]