How to use anna - 10 common examples

To help you get started, we’ve selected a few anna examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / tfd / tfd_single_checkpoint_evaluator.py View on Github external
print 'Evaluating on split %d' % fold
    print 'Using %s set\n' % args.which_set    

    # Load model
    model = SupervisedModel('evaluation', './')

    # Load dataset
    supervised_data_loader = SupervisedDataLoader(dataset_path)
    data_container = supervised_data_loader.load(set_num)
    data_container.X = numpy.float32(data_container.X)
    data_container.X /= 255.0
    data_container.X *= 2.0
    print data_container.X.shape

    # Construct evaluator
    preprocessor = [util.Normer3(filter_size=5, num_channels=1)]

    evaluator = util.Evaluator(model, data_container,
                               checkpoint_file, preprocessor)

    # For the inputted checkpoint, compute the overall accuracy
    accuracies = []
    print 'Checkpoint: %s' % os.path.split(checkpoint_file)[1]
    evaluator.set_checkpoint(checkpoint_file)
    accuracy = evaluator.run()
    print 'Accuracy: %f\n' % accuracy
    accuracies.append(accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / ck_plus / ck_plus_checkpoint_checker.py View on Github external
print X_val.shape, y_val.shape
    print X_test.shape, y_test.shape

    X_val = numpy.float32(X_val)
    X_val /= 255.0
    X_val *= 2.0

    X_test = numpy.float32(X_test)
    X_test /= 255.0
    X_test *= 2.0

    val_data_container = SupervisedDataContainer(X_val, y_val)
    test_data_container = SupervisedDataContainer(X_test, y_test)

    # Construct evaluator
    preprocessor = [util.Normer3(filter_size=5, num_channels=1)]

    checkpoint_file_list = sorted(
        glob.glob(os.path.join(checkpoint_dir, '*.pkl')))
    val_evaluator = util.Evaluator(model, val_data_container,
                                   checkpoint_file_list[0], preprocessor)
    test_evaluator = util.Evaluator(model, test_data_container,
                                    checkpoint_file_list[0], preprocessor)

    # For each checkpoint, compute the overall val accuracy
    val_accuracies = []
    for checkpoint in checkpoint_file_list:
        print 'Checkpoint: %s' % os.path.split(checkpoint)[1]
        val_evaluator.set_checkpoint(checkpoint)
        val_accuracy = val_evaluator.run()
        print 'Val Accuracy: %f\n' % val_accuracy
        val_accuracies.append(val_accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / tfd / tfd_checkpoint_checker.py View on Github external
print 'Checkpoint directory: %s' % checkpoint_dir
    print 'Testing on split %d\n' % fold

    # Load model
    model = SupervisedModel('evaluation', './')

    # Load data
    supervised_data_loader = SupervisedDataLoader(dataset_path)
    val_data_container = supervised_data_loader.load(1)
    val_data_container.X = numpy.float32(val_data_container.X)
    val_data_container.X /= 255.0
    val_data_container.X *= 2.0

    # Construct evaluator
    preprocessor = [util.Normer3(filter_size=5, num_channels=1)]

    checkpoint_file_list = sorted(
        glob.glob(os.path.join(checkpoint_dir, '*.pkl')))
    evaluator = util.Evaluator(model, val_data_container,
                               checkpoint_file_list[0], preprocessor)

    # For each checkpoint, compute the overall val accuracy
    accuracies = []
    for checkpoint in checkpoint_file_list:
        print 'Checkpoint: %s' % os.path.split(checkpoint)[1]
        evaluator.set_checkpoint(checkpoint)
        accuracy = evaluator.run()
        print 'Accuracy: %f\n' % accuracy
        accuracies.append(accuracy)

    # Find checkpoint that produced the highest accuracy
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / tfd / cnn / train.py View on Github external
X_val *= 2.0

X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0

train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)

# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer3(filter_size=5, num_channels=1)
module_list = [normer]
preprocessor = util.Preprocessor(module_list)

print('Training Model')
for x_batch, y_batch in train_iterator:
    x_batch = preprocessor.run(x_batch)
    monitor.start()
    log_prob, accuracy = model.train(x_batch, y_batch)
    monitor.stop(1-accuracy)

    if monitor.test:
        monitor.start()
        x_val_batch, y_val_batch = val_iterator.next()
        x_val_batch = preprocessor.run(x_val_batch)
        val_accuracy = model.eval(x_val_batch, y_val_batch)
        monitor.stop_test(1-val_accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / tfd / cnn_d / train.py View on Github external
X_val *= 2.0

X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0

train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)

# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer3(filter_size=5, num_channels=1)
module_list = [normer]
preprocessor = util.Preprocessor(module_list)

print('Training Model')
for x_batch, y_batch in train_iterator:
    x_batch = preprocessor.run(x_batch)
    monitor.start()
    log_prob, accuracy = model.train(x_batch, y_batch)
    monitor.stop(1-accuracy)

    if monitor.test:
        monitor.start()
        x_val_batch, y_val_batch = val_iterator.next()
        x_val_batch = preprocessor.run(x_val_batch)
        val_accuracy = model.eval(x_val_batch, y_val_batch)
        monitor.stop_test(1-val_accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / tfd / cnn_ad / train.py View on Github external
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)

# Do data augmentation (crops, flips, rotations, scales, intensity)
data_augmenter = util.DataAugmenter2(crop_shape=(96, 96),
                                     flip=True, gray_on=True)
normer = util.Normer3(filter_size=5, num_channels=1)
module_list_train = [data_augmenter, normer]
module_list_val = [normer]
preprocessor_train = util.Preprocessor(module_list_train)
preprocessor_val = util.Preprocessor(module_list_val)

print('Training Model')
for x_batch, y_batch in train_iterator:
    x_batch = preprocessor_train.run(x_batch)
    monitor.start()
    log_prob, accuracy = model.train(x_batch, y_batch)
    monitor.stop(1-accuracy)

    if monitor.test:
        monitor.start()
        x_val_batch, y_val_batch = val_iterator.next()
        x_val_batch = preprocessor_val.run(x_val_batch)
        val_accuracy = model.eval(x_val_batch, y_val_batch)
        monitor.stop_test(1-val_accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / ck_plus_six_class / cnn_ad / train.py View on Github external
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)

# Do data augmentation (crops, flips, rotations, scales, intensity)
data_augmenter = util.DataAugmenter2(crop_shape=(96, 96),
                                     flip=True, gray_on=True)
normer = util.Normer3(filter_size=5, num_channels=1)
module_list_train = [data_augmenter, normer]
module_list_val = [normer]
preprocessor_train = util.Preprocessor(module_list_train)
preprocessor_val = util.Preprocessor(module_list_val)

print('Training Model')
for x_batch, y_batch in train_iterator:
    x_batch = preprocessor_train.run(x_batch)
    monitor.start()
    log_prob, accuracy = model.train(x_batch, y_batch)
    monitor.stop(1-accuracy)

    if monitor.test:
        monitor.start()
        x_val_batch, y_val_batch = val_iterator.next()
        x_val_batch = preprocessor_val.run(x_val_batch)
        val_accuracy = model.eval(x_val_batch, y_val_batch)
        monitor.stop_test(1-val_accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / ck_plus / cnn_ad / train.py View on Github external
train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)


# Do data augmentation (crops, flips, rotations, scales, intensity)
data_augmenter = util.DataAugmenter2(crop_shape=(96, 96),
                                     flip=True, gray_on=True)
normer = util.Normer3(filter_size=5, num_channels=1)
module_list_train = [data_augmenter, normer]
module_list_val = [normer]
preprocessor_train = util.Preprocessor(module_list_train)
preprocessor_val = util.Preprocessor(module_list_val)

print('Training Model')
for x_batch, y_batch in train_iterator:
    x_batch = preprocessor_train.run(x_batch)
    monitor.start()
    log_prob, accuracy = model.train(x_batch, y_batch)
    monitor.stop(1-accuracy)

    if monitor.test:
        monitor.start()
        x_val_batch, y_val_batch = val_iterator.next()
        x_val_batch = preprocessor_val.run(x_val_batch)
        val_accuracy = model.eval(x_val_batch, y_val_batch)
        monitor.stop_test(1-val_accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / tfd / cnn_a / train.py View on Github external
X_test *= 2.0

train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)

# Do data augmentation (crops, flips, rotations, scales, intensity)
data_augmenter = util.DataAugmenter2(crop_shape=(96, 96),
                                     flip=True, gray_on=True)
normer = util.Normer3(filter_size=5, num_channels=1)
module_list_train = [data_augmenter, normer]
module_list_val = [normer]
preprocessor_train = util.Preprocessor(module_list_train)
preprocessor_val = util.Preprocessor(module_list_val)

print('Training Model')
for x_batch, y_batch in train_iterator:
    x_batch = preprocessor_train.run(x_batch)
    monitor.start()
    log_prob, accuracy = model.train(x_batch, y_batch)
    monitor.stop(1-accuracy)

    if monitor.test:
        monitor.start()
        x_val_batch, y_val_batch = val_iterator.next()
        x_val_batch = preprocessor_val.run(x_val_batch)
        val_accuracy = model.eval(x_val_batch, y_val_batch)
        monitor.stop_test(1-val_accuracy)
github ifp-uiuc / do-neural-networks-learn-faus-iccvw-2015 / tfd / cnn_d / train.py View on Github external
X_test = numpy.float32(X_test)
X_test /= 255.0
X_test *= 2.0

train_dataset = supervised_dataset.SupervisedDataset(X_train, y_train)
val_dataset = supervised_dataset.SupervisedDataset(X_val, y_val)
train_iterator = train_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)
val_iterator = val_dataset.iterator(
    mode='random_uniform', batch_size=64, num_batches=31000)

# Create object to local contrast normalize a batch.
# Note: Every batch must be normalized before use.
normer = util.Normer3(filter_size=5, num_channels=1)
module_list = [normer]
preprocessor = util.Preprocessor(module_list)

print('Training Model')
for x_batch, y_batch in train_iterator:
    x_batch = preprocessor.run(x_batch)
    monitor.start()
    log_prob, accuracy = model.train(x_batch, y_batch)
    monitor.stop(1-accuracy)

    if monitor.test:
        monitor.start()
        x_val_batch, y_val_batch = val_iterator.next()
        x_val_batch = preprocessor.run(x_val_batch)
        val_accuracy = model.eval(x_val_batch, y_val_batch)
        monitor.stop_test(1-val_accuracy)