Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
+ str(train_split)+'.npy')
y_train = numpy.load('/data/stl10_matlab/train_splits/train_y_'
+ str(train_split)+'.npy')
X_test = numpy.load('/data/stl10_matlab/test_X.npy')
y_test = numpy.load('/data/stl10_matlab/test_y.npy')
X_train = numpy.float32(X_train)
X_train /= 255.0
X_train *= 2.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=45000)
test_iterator = test_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=45000)
# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer2(filter_size=5, num_channels=3)
augmenter = util.DataAugmenter(16, (96, 96))
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = augmenter.run(x_batch)
x_batch = normer.run(x_batch)
# y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
# Loading STL-10 dataset
print('Loading Data')
X_train = numpy.load('/data/stl10_matlab/train_splits/train_X_'+str(train_split)+'.npy')
y_train = numpy.load('/data/stl10_matlab/train_splits/train_y_'+str(train_split)+'.npy')
X_test = numpy.load('/data/stl10_matlab/test_X.npy')
y_test = numpy.load('/data/stl10_matlab/test_y.npy')
X_train = numpy.float32(X_train)
X_train /= 255.0
X_train *= 1.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 1.0
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=45000)
test_iterator = test_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=45000)
# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer2(filter_size=5, num_channels=3)
augmenter = util.DataAugmenter(16, (96, 96), color_on=True)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = augmenter.run(x_batch)
x_batch = normer.run(x_batch)
model = CNNModel('experiment', './', learning_rate=1e-2)
monitor = util.Monitor(model)
# Loading CIFAR-10 dataset
print('Loading Data')
data_path = '/data/cifar10/'
reduced_data_path = os.path.join(data_path, 'reduced', 'cifar10_1000')
train_data = numpy.load(os.path.join(reduced_data_path, 'train_X_split_0.npy'))
train_labels = numpy.load(os.path.join(reduced_data_path,
'train_y_split_0.npy'))
test_data = numpy.load('/data/cifar10/test_X.npy')
test_labels = numpy.load('/data/cifar10/test_y.npy')
train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels)
test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128,
num_batches=100000)
normer = util.Normer2(filter_size=5, num_channels=3)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = normer.run(x_batch)
#y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
monitor.start()
log_prob, accuracy = model.train(x_batch, y_batch)
monitor.stop(1-accuracy) # monitor takes error instead of accuracy
+ str(train_split)+'.npy')
y_train = numpy.load('/data/stl10_matlab/train_splits/train_y_'
+ str(train_split)+'.npy')
X_test = numpy.load('/data/stl10_matlab/test_X.npy')
y_test = numpy.load('/data/stl10_matlab/test_y.npy')
X_train = numpy.float32(X_train)
X_train /= 255.0
X_train *= 1.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 1.0
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
test_dataset = supervised_dataset.SupervisedDataset(X_test, y_test)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=45000)
test_iterator = test_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=45000)
# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer2(filter_size=5, num_channels=3)
augmenter = util.DataAugmenter(16, (96, 96), color_on=True)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = augmenter.run(x_batch)
x_batch = normer.run(x_batch)
# y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
model = CNNModel('experiment', './', learning_rate=1e-2)
monitor = util.Monitor(model)
# Loading CIFAR-10 dataset
print('Loading Data')
data_path = '/data/cifar10/'
reduced_data_path = os.path.join(data_path, 'reduced', 'cifar10_1000')
train_data = numpy.load(os.path.join(reduced_data_path, 'train_X_split_0.npy'))
train_labels = numpy.load(os.path.join(reduced_data_path,
'train_y_split_0.npy'))
test_data = numpy.load('/data/cifar10/test_X.npy')
test_labels = numpy.load('/data/cifar10/test_y.npy')
train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels)
test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128,
num_batches=100000)
normer = util.Normer2(filter_size=5, num_channels=3)
augmenter = util.DataAugmenter(2, (32, 32), flip=False)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = augmenter.run(x_batch)
x_batch = normer.run(x_batch)
#y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
monitor.start()
X_test = test_data_container.X
y_test = test_data_container.y
X_train = numpy.float32(X_train)
X_train /= 255.0
X_train *= 2.0
X_val = numpy.float32(X_val)
X_val /= 255.0
X_val *= 2.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
mode='random_uniform', batch_size=64, num_batches=31000)
# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer3(filter_size=5, num_channels=1)
module_list = [normer]
preprocessor = util.Preprocessor(module_list)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = preprocessor.run(x_batch)
monitor.start()
X_test = test_data_container.X
y_test = test_data_container.y
X_train = numpy.float32(X_train)
X_train /= 255.0
X_train *= 2.0
X_val = numpy.float32(X_val)
X_val /= 255.0
X_val *= 2.0
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
mode='random_uniform', batch_size=64, num_batches=31000)
# Do data augmentation (crops, flips, rotations, scales, intensity)
data_augmenter = util.DataAugmenter2(crop_shape=(96, 96),
flip=True, gray_on=True)
normer = util.Normer3(filter_size=5, num_channels=1)
module_list_train = [data_augmenter, normer]
module_list_val = [normer]
preprocessor_train = util.Preprocessor(module_list_train)
preprocessor_val = util.Preprocessor(module_list_val)
print('Training Model')
model = CNNModel('experiment', './', learning_rate=1e-2)
monitor = util.Monitor(model)
# Loading CIFAR-10 dataset
print('Loading Data')
data_path = '/data/cifar10/'
reduced_data_path = os.path.join(data_path, 'reduced', 'cifar10_500')
train_data = numpy.load(os.path.join(reduced_data_path, 'train_X_split_0.npy'))
train_labels = numpy.load(os.path.join(reduced_data_path,
'train_y_split_0.npy'))
test_data = numpy.load('/data/cifar10/test_X.npy')
test_labels = numpy.load('/data/cifar10/test_y.npy')
train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels)
test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128,
num_batches=100000)
normer = util.Normer2(filter_size=5, num_channels=3)
augmenter = util.DataAugmenter(2, (32, 32), flip=False)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = augmenter.run(x_batch)
x_batch = normer.run(x_batch)
#y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
monitor.start()
model.fc4.dropout = 0.5
model._compile()
# Loading CIFAR-10 dataset
print('Loading Data')
data_path = '/data/cifar10/'
reduced_data_path = os.path.join(data_path, 'reduced', 'cifar10_100')
train_data = numpy.load(os.path.join(reduced_data_path, 'train_X_split_0.npy'))
train_labels = numpy.load(os.path.join(reduced_data_path,
'train_y_split_0.npy'))
test_data = numpy.load('/data/cifar10/test_X.npy')
test_labels = numpy.load('/data/cifar10/test_y.npy')
train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels)
test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128,
num_batches=100000)
normer = util.Normer2(filter_size=5, num_channels=3)
augmenter = util.DataAugmenter(2, (32, 32))
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = augmenter.run(x_batch)
x_batch = normer.run(x_batch)
#y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
monitor.start()
log_prob, accuracy = model.train(x_batch, y_batch)
model = CNNModel('experiment', './', learning_rate=1e-2)
monitor = util.Monitor(model)
# Loading CIFAR-10 dataset
print('Loading Data')
data_path = '/data/cifar10/'
reduced_data_path = os.path.join(data_path, 'reduced', 'cifar10_1000')
train_data = numpy.load(os.path.join(reduced_data_path, 'train_X_split_0.npy'))
train_labels = numpy.load(os.path.join(reduced_data_path,
'train_y_split_0.npy'))
test_data = numpy.load('/data/cifar10/test_X.npy')
test_labels = numpy.load('/data/cifar10/test_y.npy')
train_dataset = supervised_dataset.SupervisedDataset(train_data, train_labels)
test_dataset = supervised_dataset.SupervisedDataset(test_data, test_labels)
train_iterator = train_dataset.iterator(
mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='random_uniform', batch_size=128,
num_batches=100000)
normer = util.Normer2(filter_size=5, num_channels=3)
augmenter = util.DataAugmenter(2, (32, 32), flip=False)
print('Training Model')
for x_batch, y_batch in train_iterator:
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = augmenter.run(x_batch)
x_batch = normer.run(x_batch)
#y_batch = numpy.int64(numpy.argmax(y_batch, axis=1))
monitor.start()
log_prob, accuracy = model.train(x_batch, y_batch)