How to use the anna.datasets.unsupervised_dataset function in anna

To help you get started, we’ve selected a few anna examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / cifar10 / cae / layer1 / train.py View on Github external
print('PID: {}'.format(pid))
f = open('pid', 'wb')
f.write(str(pid)+'\n')
f.close()

model = CAELayer1Model('experiment', './', learning_rate=1e-4)
monitor = util.Monitor(model, save_steps=200)


# Loading CIFAR-10 dataset
print('Loading Data')
train_data = numpy.load('/data/cifar10/train_X.npy')
test_data = numpy.load('/data/cifar10/test_X.npy')

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

normer = util.Normer2(filter_size=5, num_channels=3)

# Grab batch for patch extraction.
x_batch = train_iterator.next()
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = normer.run(x_batch)
# Grab some patches to initialize weights.
patch_grabber = util.PatchGrabber(96, 5)
patches = patch_grabber.run(x_batch)*0.01
model.conv1.W.set_value(patches)

# Grab test data to give to NormReconVisualizer.
github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / cifar10 / cae / layer2 / train.py View on Github external
model = CAELayer2Model('experiment', './', learning_rate=1e-5)
checkpoint = checkpoints.unsupervised_layer1
util.set_parameters_from_unsupervised_model(model, checkpoint)
monitor = util.Monitor(model, save_steps=200)

model.conv1.trainable = False
model._compile()

# Loading CIFAR-10 dataset
print('Loading Data')
train_data = numpy.load('/data/cifar10/train_X.npy')
test_data = numpy.load('/data/cifar10/test_X.npy')

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

normer = util.Normer2(filter_size=5, num_channels=3)

# Orthogonalize second layer weights.
W2 = model.conv2.W.get_value()
W2 = conv_orthogonalize(W2)
# Scale second layer weights.
s = 5.0
model.conv2.W.set_value(W2*s)

# Grab test data to give to NormReconVisualizer.
test_x_batch = test_iterator.next()
test_x_batch = test_x_batch.transpose(1, 2, 3, 0)
github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / stl10 / cae / unsupervised_layer3 / train.py View on Github external
monitor = util.Monitor(model, save_steps=200)

model.conv1.trainable = False
model.conv2.trainable = False
model._compile()

# Loading STL-10 dataset
print('Loading Data')
data = numpy.load('/data/stl10_matlab/unsupervised.npy')
data = numpy.float32(data)
data /= 255.0
data *= 2.0
train_data = data[0:90000, :, :, :]
test_data = data[90000::, :, :, :]

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(mode='random_uniform', batch_size=128,
                                        num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer2(filter_size=5, num_channels=3)

# Orthogonalize third layer weights.
W3 = model.conv3.W.get_value()
W3 = conv_orthogonalize(W3)
# Scale third layer weights.
s = 3.0
model.conv3.W.set_value(W3*s)
github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / cifar10 / cae / layer1 / train.py View on Github external
pid = os.getpid()
print('PID: {}'.format(pid))
f = open('pid', 'wb')
f.write(str(pid)+'\n')
f.close()

model = CAELayer1Model('experiment', './', learning_rate=1e-4)
monitor = util.Monitor(model, save_steps=200)


# Loading CIFAR-10 dataset
print('Loading Data')
train_data = numpy.load('/data/cifar10/train_X.npy')
test_data = numpy.load('/data/cifar10/test_X.npy')

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

normer = util.Normer2(filter_size=5, num_channels=3)

# Grab batch for patch extraction.
x_batch = train_iterator.next()
x_batch = x_batch.transpose(1, 2, 3, 0)
x_batch = normer.run(x_batch)
# Grab some patches to initialize weights.
patch_grabber = util.PatchGrabber(96, 5)
patches = patch_grabber.run(x_batch)*0.01
model.conv1.W.set_value(patches)
github ifp-uiuc / anna / anna / scripts / zeiler_plotter.py View on Github external
print('Loading Model')
    model = UnsupervisedModel('xxx', './')
    print('Loading Checkpoint')
    util.load_checkpoint(model, checkpoint)

    weight_layer = eval('model.' + weight_layer)
    feature_layer = eval('model.' + feature_layer)

    print('Loading Data')
    data = numpy.load('/data/stl10_matlab/unsupervised.npy')
    data = numpy.float32(data)
    data /= 255.0
    data *= 2.0
    train_data = data[0:90000, :, :, :]
    test_data = data[90000::, :, :, :]
    train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
    test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
    # test_x_batch = test_x_batch.transpose(1, 2, 3, 0)
    normer = util.Normer(filter_size=7)
    print('Done')

    print('Computing top activations for each filter.')
    acts_array = get_activations(model, weight_layer, test_dataset, normer)
    best_acts = numpy.sort(acts_array, axis=1)
    best_index = numpy.argsort(acts_array, axis=1)
    print acts_array.shape
    print numpy.max(acts_array, axis=1)

    num_filters = acts_array.shape[0]
    zeiler_plotter = ZeilerMaxPlotter(model, weight_layer, feature_layer,
                                      test_dataset, normer)
github ifp-uiuc / anna / anna / scripts / zeiler_plotter.py View on Github external
model = UnsupervisedModel('xxx', './')
    print('Loading Checkpoint')
    util.load_checkpoint(model, checkpoint)

    weight_layer = eval('model.' + weight_layer)
    feature_layer = eval('model.' + feature_layer)

    print('Loading Data')
    data = numpy.load('/data/stl10_matlab/unsupervised.npy')
    data = numpy.float32(data)
    data /= 255.0
    data *= 2.0
    train_data = data[0:90000, :, :, :]
    test_data = data[90000::, :, :, :]
    train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
    test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
    # test_x_batch = test_x_batch.transpose(1, 2, 3, 0)
    normer = util.Normer(filter_size=7)
    print('Done')

    print('Computing top activations for each filter.')
    acts_array = get_activations(model, weight_layer, test_dataset, normer)
    best_acts = numpy.sort(acts_array, axis=1)
    best_index = numpy.argsort(acts_array, axis=1)
    print acts_array.shape
    print numpy.max(acts_array, axis=1)

    num_filters = acts_array.shape[0]
    zeiler_plotter = ZeilerMaxPlotter(model, weight_layer, feature_layer,
                                      test_dataset, normer)

    print('Extracting top 25 activations for each filter.')
github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / cifar10 / cae / layer3 / train.py View on Github external
model = CAELayer3Model('experiment', './', learning_rate=1e-4)
checkpoint = checkpoints.unsupervised_layer2
util.set_parameters_from_unsupervised_model(model, checkpoint)
monitor = util.Monitor(model, save_steps=200)

model.conv1.trainable = False
model.conv2.trainable = False
model._compile()

# Loading CIFAR-10 dataset
print('Loading Data')
train_data = numpy.load('/data/cifar10/train_X.npy')
test_data = numpy.load('/data/cifar10/test_X.npy')

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

normer = util.Normer2(filter_size=5, num_channels=3)

# Orthogonalize third layer weights.
W3 = model.conv3.W.get_value()
W3 = conv_orthogonalize(W3)
# Scale third layer weights.
s = 5.0
model.conv3.W.set_value(W3*s)

# Grab test data to give to NormReconVisualizer.
test_x_batch = test_iterator.next()
github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / stl10 / cae / unsupervised_layer3 / train.py View on Github external
model.conv1.trainable = False
model.conv2.trainable = False
model._compile()

# Loading STL-10 dataset
print('Loading Data')
data = numpy.load('/data/stl10_matlab/unsupervised.npy')
data = numpy.float32(data)
data /= 255.0
data *= 2.0
train_data = data[0:90000, :, :, :]
test_data = data[90000::, :, :, :]

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(mode='random_uniform', batch_size=128,
                                        num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer2(filter_size=5, num_channels=3)

# Orthogonalize third layer weights.
W3 = model.conv3.W.get_value()
W3 = conv_orthogonalize(W3)
# Scale third layer weights.
s = 3.0
model.conv3.W.set_value(W3*s)

# Grab test data to give to NormReconVisualizer.
github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / stl10 / cae / unsupervised_layer2 / train.py View on Github external
monitor = util.Monitor(model, save_steps=200)

model.conv1.trainable = False
model._compile()

# Loading STL-10 dataset
print('Loading Data')
data = numpy.load('/data/stl10_matlab/unsupervised.npy')
data = numpy.float32(data)
data /= 255.0
data *= 2.0
train_data = data[0:90000, :, :, :]
test_data = data[90000::, :, :, :]

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer2(filter_size=5, num_channels=3)

# Orthogonalize second layer weights.
W2 = model.conv2.W.get_value()
W2 = conv_orthogonalize(W2)
# Scale second layer weights.
s = 2.5
model.conv2.W.set_value(W2*s)

# Grab test data to give to NormReconVisualizer.
github ifp-uiuc / an-analysis-of-unsupervised-pre-training-iclr-2015 / cifar10 / cae / layer2 / train.py View on Github external
f.close()

model = CAELayer2Model('experiment', './', learning_rate=1e-5)
checkpoint = checkpoints.unsupervised_layer1
util.set_parameters_from_unsupervised_model(model, checkpoint)
monitor = util.Monitor(model, save_steps=200)

model.conv1.trainable = False
model._compile()

# Loading CIFAR-10 dataset
print('Loading Data')
train_data = numpy.load('/data/cifar10/train_X.npy')
test_data = numpy.load('/data/cifar10/test_X.npy')

train_dataset = unsupervised_dataset.UnsupervisedDataset(train_data)
test_dataset = unsupervised_dataset.UnsupervisedDataset(test_data)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=128, num_batches=100000)
test_iterator = test_dataset.iterator(mode='sequential', batch_size=128)

normer = util.Normer2(filter_size=5, num_channels=3)

# Orthogonalize second layer weights.
W2 = model.conv2.W.get_value()
W2 = conv_orthogonalize(W2)
# Scale second layer weights.
s = 5.0
model.conv2.W.set_value(W2*s)

# Grab test data to give to NormReconVisualizer.
test_x_batch = test_iterator.next()