Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
cache['ssim'] += ssim
cache['psnr'] += psnr
# Avoid out of memory crash on 8G GPU
if len(dev_images) < 60 :
dev_images.extend([to_image()(val_hr_restore.squeeze(0)), to_image()(hr.data.cpu().squeeze(0)), to_image()(sr.data.cpu().squeeze(0))])
dev_images = torch.stack(dev_images)
dev_images = torch.chunk(dev_images, dev_images.size(0) // 3)
dev_save_bar = tqdm(dev_images, desc='[saving training results]')
index = 1
for image in dev_save_bar:
image = utils.make_grid(image, nrow=3, padding=5)
utils.save_image(image, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5)
index += 1
if use_tensorboard:
log_value('ssim', cache['ssim']/len(dev_loader), epoch)
log_value('psnr', cache['psnr']/len(dev_loader), epoch)
errD = errD_real + errD_fake
errD.backward()
optimizerD.step()
netG.zero_grad()
target = Variable(torch.ones(input.size()[0]))
output = netD(fake)
errG = criterion(output, target)
errG.backward()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f' % (epoch, 25, i, len(dataloader), errD.data[0], errG.data[0]))
if i % 100 == 0:
vutils.save_image(real, '%s/real_samples.png' % "./results", normalize = True)
fake = netG(noise)
vutils.save_image(fake.data, '%s/fake_samples_epoch_%03d.png' % ("./results", epoch), normalize = True)
start_time = time.time()
print('*'*25)
print("Generating Image...")
# Generate images.
with torch.no_grad():
x = model.generate(int(args.num_output))
time_elapsed = time.time() - start_time
print('\nDONE!')
print('Time taken to generate image: %.2fs' % (time_elapsed))
print('\nSaving generated image...')
fig = plt.figure(figsize=(int(np.sqrt(int(args.num_output)))*2, int(np.sqrt(int(args.num_output)))*2))
plt.axis("off")
plt.imshow(np.transpose(vutils.make_grid(
x[-1], nrow=int(np.sqrt(int(args.num_output))), padding=1, normalize=True, pad_value=1).cpu(), (1, 2, 0)))
plt.savefig("Generated_Image")
plt.close('all')
# Create animation for the generation.
fig = plt.figure(figsize=(int(np.sqrt(int(args.num_output)))*2, int(np.sqrt(int(args.num_output)))*2))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in x]
anim = animation.ArtistAnimation(fig, ims, interval=200, repeat_delay=2000, blit=True)
anim.save('draw_generate.gif', dpi=100, writer='imagemagick')
print('DONE!')
print('-'*50)
plt.show()
batch_code.grad.data.zero_()
best_code[i * opt.batch_size : i * opt.batch_size + batch_size].copy_(batch_code.data)
generated, _ = gen(batch_code)
loss = testfunc(generated, batch_target)
test_loss = test_loss + loss.data[0] * batch_size
if opt.final_test:
print('batch loss = {0}'.format(loss.data[0]))
sample_rec_pair = torch.Tensor(2, 3, opt.height, opt.width)
for j in range(batch_size):
sample_rec_pair[0].copy_(get_data(test_index[i * opt.batch_size + j]))
sample_rec_pair[1].copy_(generated.data[j])
if opt.output_scale:
torchvision.utils.save_image(sample_rec_pair * 2 - 1, os.path.join(opt.load_path, '{0}_test'.format(opt.net), '{0}.png'.format(i * opt.batch_size + j)), 2)
else:
torchvision.utils.save_image(sample_rec_pair, os.path.join(opt.load_path, '{0}_test'.format(opt.net), '{0}.png'.format(i * opt.batch_size + j)), 2)
for param in gen.parameters():
param.requires_grad = True
gen.train()
if not opt.final_test:
visualize(
best_code[0 : min(test_index.size(0), opt.vis_row * opt.vis_col)],
filename=os.path.join(opt.save_path, 'running_test', 'test_{0}.jpg'.format(current_iter)),
filename_r=os.path.join(opt.save_path, 'running_test', 'r{0}_test_%d.jpg' % (current_iter,)),
filename_all=os.path.join(opt.save_path, 'running_test', 'all_test_{0}.jpg'.format(current_iter))
)
test_loss = test_loss / test_index.size(0)
print('loss = {0}'.format(test_loss))
return test_loss
time_o = time.time()
self.an_scores[i*self.opt.batchsize : i*self.opt.batchsize+error.size(0)] = error.reshape(error.size(0))
self.gt_labels[i*self.opt.batchsize : i*self.opt.batchsize+error.size(0)] = self.gt.reshape(error.size(0))
self.latent_i [i*self.opt.batchsize : i*self.opt.batchsize+error.size(0), :] = latent_i.reshape(error.size(0), self.opt.nz)
self.latent_o [i*self.opt.batchsize : i*self.opt.batchsize+error.size(0), :] = latent_o.reshape(error.size(0), self.opt.nz)
self.times.append(time_o - time_i)
# Save test images.
if self.opt.save_test_images:
dst = os.path.join(self.opt.outf, self.opt.name, 'test', 'images')
if not os.path.isdir(dst):
os.makedirs(dst)
real, fake, _ = self.get_current_images()
vutils.save_image(real, '%s/real_%03d.eps' % (dst, i+1), normalize=True)
vutils.save_image(fake, '%s/fake_%03d.eps' % (dst, i+1), normalize=True)
# Measure inference time.
self.times = np.array(self.times)
self.times = np.mean(self.times[:100] * 1000)
# Scale error vector between [0, 1]
self.an_scores = (self.an_scores - torch.min(self.an_scores)) / (torch.max(self.an_scores) - torch.min(self.an_scores))
# auc, eer = roc(self.gt_labels, self.an_scores)
auc = evaluate(self.gt_labels, self.an_scores, metric=self.opt.metric)
performance = OrderedDict([('Avg Run Time (ms/batch)', self.times), ('AUC', auc)])
if self.opt.display_id > 0 and self.opt.phase == 'test':
counter_ratio = float(epoch_iter) / len(self.dataloader['test'].dataset)
self.visualizer.plot_performance(self.epoch, counter_ratio, performance)
return performance
self.writer.add_scalar('train/G_loss', G_loss, global_step)
self.writer.add_scalar('train/D_loss', D_loss, global_step)
self.writer.add_scalar('train/Wasserstein_Dist', Wasserstein_Dist, global_step)
print("[stage {}/{}][epoch {}/{}][aug {}/{}][iter {}/{}] G_loss {:.4f} D_loss {:.4f} W_Dist {:.4f}" \
.format(stage, total_stages, epoch+1, M, aug+1, opt.num_aug, i+1, len(self.dataloader), G_loss, D_loss, Wasserstein_Dist))
global_step += 1
ticker += 1
global_epoch += 1
if epoch % disp_circle == disp_circle-1:
print('\nlog images...\n')
I_real = utils.make_grid(real_data, nrow=4, normalize=True, scale_each=True)
self.writer.add_image('stage_{}/real'.format(stage), I_real, epoch)
with torch.no_grad():
self.G_EMA.eval()
fake_data = self.G_EMA.forward(fixed_z)
I_fake = utils.make_grid(fake_data, nrow=4, normalize=True, scale_each=True)
self.writer.add_image('stage_{}/fake'.format(stage), I_fake, epoch)
# after each stage: save checkpoints
print('\nsaving checkpoints...\n')
checkpoint = {
'G_state_dict': self.G.module.state_dict(),
'G_EMA_state_dict': self.G_EMA.state_dict(),
'D_state_dict': self.D.module.state_dict(),
'opt_G_state_dict': self.opt_G.state_dict(),
'opt_D_state_dict': self.opt_D.state_dict(),
'stage': stage
}
torch.save(checkpoint, os.path.join(opt.outf,'stage{}.tar'.format(stage)))
'Loss_G': Loss_G.item(),
'loss_critic1': loss_critic1.item(),
'loss_generator1': loss_generator1.item(),
'loss_critic2': loss_critic2.item(),
'loss_generator2': loss_generator2.item(),
'loss_critic3': loss_critic3.item(),
'loss_generator3': loss_generator3.item(),
}
for tag, value in info.items():
summary_writer.add_scalar(tag, value, global_step)
if (step+1) % args.tst_step == 0:
depth_Pre_real = torch.cat([depth_Pre[0:args.batchsize],depth_Pre[2*args.batchsize:3*args.batchsize], depth_Pre[4*args.batchsize:5*args.batchsize]],0)
depth_Pre_fake = torch.cat([depth_Pre[args.batchsize:2*args.batchsize],depth_Pre[3*args.batchsize:4*args.batchsize], depth_Pre[5*args.batchsize:6*args.batchsize]],0)
depth_Pre_all = vutils.make_grid(depth_Pre, normalize=True, scale_each=True)
depth_Pre_real = vutils.make_grid(depth_Pre_real, normalize=True, scale_each=True)
depth_Pre_fake = vutils.make_grid(depth_Pre_fake, normalize=True, scale_each=True)
summary_writer.add_image('Depth_Image_all', depth_Pre_all, global_step)
summary_writer.add_image('Depth_Image_real', depth_Pre_real, global_step)
summary_writer.add_image('Depth_Image_fake', depth_Pre_fake, global_step)
#============ print the log info ============#
if (step+1) % args.log_step == 0:
errors = OrderedDict([
('Loss_depth', Loss_depth.item()),
('Loss_triplet', Loss_triplet.item()),
('Loss_cls', Loss_cls.item()),
('Loss_G', Loss_G.item()),
('loss_critic1', loss_critic1.item()),
def save_current_visual(self, epoch, iter):
"""
save visual results for comparison
"""
if epoch % self.save_vis_step == 0:
visuals_list = []
visuals = self.get_current_visual(need_np=False)
visuals_list.extend([util.quantize(visuals['HR'].squeeze(0), self.opt['rgb_range']),
util.quantize(visuals['SR'].squeeze(0), self.opt['rgb_range'])])
visual_images = torch.stack(visuals_list)
visual_images = thutil.make_grid(visual_images, nrow=2, padding=5)
visual_images = visual_images.byte().permute(1, 2, 0).numpy()
misc.imsave(os.path.join(self.visual_dir, 'epoch_%d_img_%d.png' % (epoch, iter + 1)),
visual_images)
# loss_d.backward()
self.d_optim.step()
# update g_net
self.g_net.zero_grad()
d_fake = self.d_net(work_fake)
loss_g = self.criterion(d_fake, target_real)
loss_g.backward()
self.g_optim.step()
if cur_step % SAVE_IMG_STEP == 0:
vutils.save_image(work_real,
"./output/real_netlevel%02d.jpg" % net_level,
nrow=4, normalize=True, padding=0)
fake_work = self.g_net(g_data)
vutils.save_image(fake_work.detach(),
'./output/fake_%s_netlevel%02d_%07d.jpg' % (net_status, net_level, cur_step),
nrow=4, normalize=True, padding=0)