Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
mon.add(ii, loss.d)
# Save
if (i + 1) % (args.model_save_interval // n_devices) == 0:
for mon, x in monitor_images:
mon.add(ii, x.d)
nn.save_parameters(os.path.join(
args.monitor_path, "param_{:05d}.h5".format(i)))
if mpi_local_rank == 0:
# Monitor
for mon, loss in monitor_losses:
mon.add(ii, loss.d)
# Save
for mon, x in monitor_images:
mon.add(ii, x.d)
nn.save_parameters(os.path.join(
args.monitor_path, "param_{:05d}.h5".format(i)))
logger.info("Mode:Test,Epoch:{}".format(epoch))
# Monitor for train
for monitor, v in monitor_train_list:
monitor.add(i, v.d)
# Use training graph since there are no test mode
x_data, _ = di_test_B.next()
y_data, _ = di_test_A.next()
x_real_test.d = x_data
y_real_test.d = y_data
x_recon_test.forward()
y_recon_test.forward()
# Monitor for test
for monitor, v in monitor_test_list:
monitor.add(i, v.d)
# Save model
nn.save_parameters(os.path.join(
args.model_save_path, 'params_%06d.h5' % i))
# Learning rate decay
for solver in [solver_gen, solver_dis_x, solver_dis_y]:
linear_decay(solver, base_lr, epoch, args.max_epoch)
epoch += 1
# Get data
x_data, _ = di_train_B.next()
y_data, _ = di_train_A.next()
x_raw.d = x_data
y_raw.d = y_data
# Train Generators
loss_gen.forward(clear_no_need_grad=False)
solver_gen.zero_grad()
loss_gen.backward(clear_buffer=True)
vdata = data_iterator_cifar10(args.batch_size, False)
# Training-loop
for i in range(int(args.max_iter / n_devices)):
# Validation
if i % int(n_train_samples / args.batch_size / n_devices) == 0:
ve = 0.
for j in range(args.val_iter):
image, label = vdata.next()
input_image_valid["image"].d = image
pred_valid.forward()
ve += categorical_error(pred_valid.d, label)
ve /= args.val_iter
monitor_verr.add(i * n_devices, ve)
if i % int(args.model_save_interval / n_devices) == 0:
nn.save_parameters(os.path.join(
args.model_save_path, 'params_%06d.h5' % i))
# Forwards/Zerograd/Backwards
fb_results = []
for device_id in range(n_devices):
image, label = tdata.next()
res = pools[device_id].apply_async(forward_backward,
(input_image_train[device_id]["image"], image,
input_image_train[device_id]["label"], label,
losses_train[device_id], solvers[device_id]))
fb_results.append(res)
for device_id in range(n_devices):
fb_results[device_id].get()
# In-place allreduce
# Defining network first
x = nn.Variable((1, 3, 224, 224))
y = darknet19.darknet19_feature(x / 255, test=True)
# Get NNabla parameters
params = nn.get_parameters(grad_only=False)
# Parse Darknet weights and store them into NNabla params
dn_weights = parser.load_weights_raw(args.input)
cursor = 0
for i in range(1, 19): # 1 to 18
print("Layer", i)
cursor = parser.load_convolutional_and_get_next_cursor(
dn_weights, cursor, params, 'c{}'.format(i))
nn.save_parameters(args.output)
monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
monitor_loss_dis = M.MonitorSeries(
"Discriminator loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
monitor_fake = M.MonitorImageTile(
"Fake images", monitor, normalize_method=lambda x: x + 1 / 2.)
data = data_iterator_mnist(args.batch_size, True)
# Training loop.
for i in range(args.max_iter):
if i % args.model_save_interval == 0:
with nn.parameter_scope("gen"):
nn.save_parameters(os.path.join(
args.model_save_path, "generator_param_%06d.h5" % i))
with nn.parameter_scope("dis"):
nn.save_parameters(os.path.join(
args.model_save_path, "discriminator_param_%06d.h5" % i))
# Training forward
image, _ = data.next()
x.d = image / 255. - 0.5 # [0, 255] to [-1, 1]
z.d = np.random.randn(*z.shape)
# Generator update.
solver_gen.zero_grad()
loss_gen.forward(clear_no_need_grad=True)
loss_gen.backward(clear_buffer=True)
solver_gen.weight_decay(args.weight_decay)
solver_gen.update()
monitor_fake.add(i, fake)
monitor_loss_gen.add(i, loss_gen.d.copy())
monitor_fake.add(i, fake)
monitor_loss_gen.add(i, loss_gen.d.copy())
# Discriminator update.
solver_dis.zero_grad()
loss_dis.forward(clear_no_need_grad=True)
loss_dis.backward(clear_buffer=True)
solver_dis.weight_decay(args.weight_decay)
solver_dis.update()
monitor_loss_dis.add(i, loss_dis.d.copy())
monitor_time.add(i)
with nn.parameter_scope("gen"):
nn.save_parameters(os.path.join(args.model_save_path,
"generator_param_%06d.h5" % args.max_iter))
with nn.parameter_scope("dis"):
nn.save_parameters(os.path.join(args.model_save_path,
"discriminator_param_%06d.h5" % args.max_iter))
data = data_iterator(args.batch_size, True)
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
ve /= int(n_valid / args.batch_size)
monitor_verr.add(i, ve)
if ve < best_ve:
nn.save_parameters(os.path.join(
args.model_save_path, 'params_%06d.h5' % i))
best_ve = ve
# Training forward
image.d, label.d = data.next()
solver.zero_grad()
loss.forward(clear_no_need_grad=True)
loss.backward(clear_buffer=True)
solver.weight_decay(args.weight_decay)
solver.update()
e = categorical_error(pred.d, label.d)
monitor_loss.add(i, loss.d.copy())
monitor_err.add(i, e)
monitor_time.add(i)
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
vdata = data_iterator(args.batch_size, False)
best_ve = 1.0
ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
ve /= int(n_valid / args.batch_size)
monitor_verr.add(i, ve)
if ve < best_ve:
nn.save_parameters(os.path.join(
args.model_save_path, 'params_%06d.h5' % i))
best_ve = ve
# Training forward
image.d, label.d = data.next()
solver.zero_grad()
loss.forward(clear_no_need_grad=True)
loss.backward(clear_buffer=True)
solver.weight_decay(args.weight_decay)
solver.update()
e = categorical_error(pred.d, label.d)
monitor_loss.add(i, loss.d.copy())
monitor_err.add(i, e)
monitor_time.add(i)
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
image.d, label.d = data.next()
solver.zero_grad()
loss.forward(clear_no_need_grad=True)
# Training backward & update
loss.backward(clear_buffer=True)
solver.weight_decay(args.weight_decay)
solver.update()
# Monitor
e = categorical_error(pred.d, label.d)
monitor_loss.add(i, loss.d.copy())
monitor_err.add(i, e)
monitor_time.add(i)
parameter_file = os.path.join(
args.model_save_path, 'params_%06d.h5' % args.max_iter)
nn.save_parameters(parameter_file)
e = categorical_error(pred.d, label.d)
monitor_loss.add(i, loss.d.copy())
monitor_err.add(i, e)
monitor_time.add(i)
ve = 0.0
for j in range(int(n_valid / args.batch_size)):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
ve /= int(n_valid / args.batch_size)
monitor_verr.add(i, ve)
parameter_file = os.path.join(
args.model_save_path, 'params_{:06}.h5'.format(args.max_iter))
nn.save_parameters(parameter_file)