Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def meta_train(exp_string, monitor, args):
# Set monitors
monitor_loss = MonitorSeries(
'Training loss', monitor, interval=args.print_interval, verbose=False)
monitor_valid_err = MonitorSeries(
'Validation error', monitor, interval=args.test_print_interval, verbose=False)
# Load data
if args.datasource == 'omniglot':
shape_x = (1, 28, 28)
train_data, valid_data, _ = load_omniglot(
os.path.join(args.dataset_root, 'omniglot/data/'), shape_x)
else:
raise ValueError('Unrecognized data source.')
train_data_generator = DataGenerator(
args.num_classes, args.train_num_shots, args.train_num_queries, shape_x, train_data, args.meta_batch_size)
valid_data_generator = DataGenerator(
args.num_classes, args.num_shots, args.num_queries, shape_x, valid_data, args.meta_batch_size)
# Build training models
n_episode_for_test = args.n_episode_for_test
work_dir = args.work_dir
# Set context
from nnabla.ext_utils import get_extension_context
logger.info("Running in %s" % args.context)
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
# Monitor outputs
from nnabla.monitor import Monitor, MonitorSeries
monitor = Monitor(args.work_dir)
monitor_loss = MonitorSeries(
"Training loss", monitor, interval=iter_per_epoch)
monitor_valid_err = MonitorSeries(
"Validation error", monitor, interval=iter_per_valid)
monitor_test_err = MonitorSeries("Test error", monitor)
monitor_test_conf = MonitorSeries("Test error confidence", monitor)
# Output files
param_file = work_dir + "params.h5"
tsne_file = work_dir + "tsne.png"
# Load data
shape_x = (1, 28, 28)
train_data, valid_data, test_data = load_omniglot(
dataset_root + "/omniglot/data/")
train_episode_generator = EpisodeGenerator(
n_class_tr, n_shot_tr, n_query_tr, shape_x, train_data)
valid_episode_generator = EpisodeGenerator(
n_class, n_shot, n_query, shape_x, valid_data)
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create predition graph.
vpred = mnist_cnn_prediction(vimage, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=10)
# Initialize DataIterator for MNIST.
data = data_iterator_mnist(args.batch_size, True)
vdata = data_iterator_mnist(args.batch_size, False)
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, c, h, w])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = model_prediction(vimage, maps=maps, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
# Initialize DataIterator
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
vimage.d, vlabel.d = vdata.next()
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create predition graph.
vpred = mnist_cnn_prediction(vimage, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=10)
# Initialize DataIterator for MNIST.
data = data_iterator_mnist(args.batch_size, True)
vdata = data_iterator_mnist(args.batch_size, False)
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
monitor_verr.add(i, ve / args.val_iter)
# -------------------- Solver Setup ---------------------
d_lr = args.d_lr # initial learning rate for Discriminator
g_lr = args.g_lr # initial learning rate for Generator
solver_dis = S.Adam(alpha=args.d_lr, beta1=args.beta1, beta2=args.beta2)
solver_gen = S.Adam(alpha=args.g_lr, beta1=args.beta1, beta2=args.beta2)
# register parameters to each solver.
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
# -------------------- Create Monitors --------------------
monitor = Monitor(args.monitor_path)
monitor_d_cls_loss = MonitorSeries(
'real_classification_loss', monitor, args.log_step)
monitor_g_cls_loss = MonitorSeries(
'fake_classification_loss', monitor, args.log_step)
monitor_loss_dis = MonitorSeries(
'discriminator_loss', monitor, args.log_step)
monitor_recon_loss = MonitorSeries(
'reconstruction_loss', monitor, args.log_step)
monitor_loss_gen = MonitorSeries('generator_loss', monitor, args.log_step)
monitor_time = MonitorTimeElapsed("Training_time", monitor, args.log_step)
# -------------------- Prepare / Split Dataset --------------------
using_attr = args.selected_attrs
dataset, attr2idx, idx2attr = get_data_dict(args.attr_path, using_attr)
random.seed(313) # use fixed seed.
random.shuffle(dataset) # shuffle dataset.
test_dataset = dataset[-2000:] # extract 2000 images for test
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, c, h, w])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = model_prediction(vimage, maps=maps, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
# Initialize DataIterator
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
pred_real = discriminator(x)
loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,
F.constant(1, pred_real.shape)))
# Create Solver.
solver_gen = S.Adam(args.learning_rate, beta1=0.5)
solver_dis = S.Adam(args.learning_rate, beta1=0.5)
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
monitor_loss_dis = M.MonitorSeries(
"Discriminator loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
monitor_fake = M.MonitorImageTile(
"Fake images", monitor, normalize_method=lambda x: x + 1 / 2.)
data = data_iterator_mnist(args.batch_size, True)
# Training loop.
for i in range(args.max_iter):
if i % args.model_save_interval == 0:
with nn.parameter_scope("gen"):
nn.save_parameters(os.path.join(
args.model_save_path, "generator_param_%06d.h5" % i))
with nn.parameter_scope("dis"):
nn.save_parameters(os.path.join(
args.model_save_path, "discriminator_param_%06d.h5" % i))
error_valid = F.mean(F.top_n_error(pred_valid, label_valid, axis=1))
input_image_valid = {"image": image_valid, "label": label_valid}
# Solvers
solver = S.Adam()
solver.set_parameters(nn.get_parameters())
base_lr = args.learning_rate
warmup_iter = int(1. * n_train_samples /
args.batch_size / n_devices) * args.warmup_epoch
warmup_slope = base_lr * (n_devices - 1) / warmup_iter
solver.set_learning_rate(base_lr)
# Create monitor
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=10)
monitor_verr = MonitorSeries("Test error", monitor, interval=1)
monitor_vtime = MonitorTimeElapsed("Validation time", monitor, interval=1)
# Data Iterator
rng = np.random.RandomState(device_id)
_, tdata = data_iterator(args.batch_size, True, rng)
vsource, vdata = data_iterator(args.batch_size, False)
# Training-loop
ve = nn.Variable()
for i in range(int(args.max_iter / n_devices)):
# Validation
if i % int(n_train_samples / args.batch_size / n_devices) == 0:
ve_local = 0.
'Imagenet_result_epoch0.nnp'), contents)
# Create Solver.
solver = S.Momentum(args.learning_rate, 0.9)
solver.set_parameters(nn.get_parameters())
start_point = 0
if args.checkpoint is not None:
# load weights and solver state info from specified checkpoint file.
start_point = load_checkpoint(args.checkpoint, solver)
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10)
monitor_err = M.MonitorSeries("Training error", monitor, interval=10)
monitor_vloss = M.MonitorSeries("Validation loss", monitor, interval=10)
monitor_verr = M.MonitorSeries("Validation error", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=10)
monitor_vtime = M.MonitorTimeElapsed(
"Validation time", monitor, interval=10)
# Training loop.
for i in range(start_point, args.max_iter):
# Save parameters
if i % args.model_save_interval == 0:
# save checkpoint file
save_checkpoint(args.model_save_path, i, solver)
# Validation
if i % args.val_interval == 0 and i != 0: