Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
for n_iter in range(100):
s1 = torch.rand(1) # value to keep
s2 = torch.rand(1)
# data grouping by `slash`
writer.add_scalar(os.path.join("data", "scalar_systemtime"), s1[0], n_iter)
# data grouping by `slash`
writer.add_scalar(os.path.join("data", "scalar_customtime"), s1[0], n_iter, walltime=n_iter)
writer.add_scalars(os.path.join("data", "scalar_group"), {"xsinx": n_iter * np.sin(n_iter),
"xcosx": n_iter * np.cos(n_iter),
"arctanx": np.arctan(n_iter)}, n_iter)
x = torch.rand(32, 3, 64, 64) # output from network
if n_iter % 10 == 0:
x = vutils.make_grid(x, normalize=True, scale_each=True)
writer.add_image('Image', x, n_iter) # Tensor
# writer.add_image('astronaut', skimage.data.astronaut(), n_iter) # numpy
# writer.add_image('imread',
# skimage.io.imread('screenshots/audio.png'), n_iter) # numpy
x = torch.zeros(sample_rate * 2)
for i in range(x.size(0)):
# sound amplitude should in [-1, 1]
x[i] = np.cos(freqs[n_iter // 10] * np.pi *
float(i) / float(sample_rate))
writer.add_audio('myAudio', x, n_iter)
writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter)
for name, param in resnet18.named_parameters():
if 'bn' not in name:
writer.add_histogram(name, param, n_iter)
writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(
def imshow(input: torch.Tensor):
out: torch.Tensor = torchvision.utils.make_grid(input, nrow=2, padding=5)
out_np: np.ndarray = kornia.tensor_to_image(out)
plt.imshow(out_np)
plt.axis('off')
def save_img_results(imgs_tcpu, fake_imgs, num_imgs,
count, image_dir, summary_writer):
num = cfg.TRAIN.VIS_COUNT
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples%09d.png' % (image_dir,count),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
for i in range(len(fake_imgs)):
fake_img = fake_imgs[i][0:num]
vutils.save_image(
fake_img.data, '%s/count_%09d_fake_samples%d.png' %
(image_dir, count, i), normalize=True)
fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()
fake_img_set = np.transpose(fake_img_set, (1, 2, 0))
fake_img_set = (fake_img_set + 1) * 255 / 2
fake_img_set = fake_img_set.astype(np.uint8)
rgb[:, :, 1] = g
rgb[:, :, 2] = b
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
if __name__ == '__main__':
local_path = '/home/neuron/Desktop/Donghao/cellsegmentation/normalCV/camvid-master'
dst = camvidLoader(local_path, is_transform=True)
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
plt.imshow(img)
plt.show()
plt.imshow(dst.decode_segmap(labels.numpy()[i]))
plt.show()
if iter_num % 2 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3,0,1,2).repeat(1,3,1,1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label', grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label', grid_image, iter_num)
out_dis_slice = out_dis[0, 0, :, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(out_dis_slice, 5, normalize=False)
writer.add_image('train/out_dis_map', grid_image, iter_num)
gt_dis_slice = gt_dis[0, 0,:, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(gt_dis_slice, 5, normalize=False)
writer.add_image('train/gt_dis_map', grid_image, iter_num)
## change lr
if iter_num % 2500 == 0:
lr_ = base_lr * 0.1 ** (iter_num //1000)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
if iter_num % 1000 == 0:
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(net.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num > max_iterations:
def _transform_image(output_tensor):
output_tensor = output_tensor.cpu()
return torchvision.utils.make_grid(output_tensor, normalize=True, scale_each=True)
def show_batch(batch):
img_batch = batch['X']
img_batch[:,0,...].mul_(1)
grid = utils.make_grid(img_batch)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.title('Batch from dataloader')
def drawing(images, results1, results2, mode='train'):
images = images.data.cpu()[:16]
results1 = (results1 > 0.5).float()[:16]
results2 = (results2 > 0.5).float()[:16]
images = vutils.make_grid(images, normalize=True, scale_each=True)
results1 = vutils.make_grid(results1, normalize=True, scale_each=True)
results2 = vutils.make_grid(results2, normalize=True, scale_each=True)
writer.add_image('image/{}'.format(mode), torch.cat([images * 0.5 + results1 * 0.5, images * 0.5 + results2 * 0.5], 1))
print('Trainig Complete with best validation loss {:.4f}'.format(BEST_VAL))
else:
conv_autoencoder.load_state_dict(torch.load('./history/conv_autoencoder.pt'))
evaluation(conv_autoencoder, test_loader)
conv_autoencoder.cpu()
dataiter = iter(train_loader)
images, _ = next(dataiter)
images = Variable(images[:32])
outputs = conv_autoencoder(images)
# plot and save original and reconstruction images for comparisons
plt.figure()
plt.subplot(121)
plt.title('Original MNIST Images')
data_utils.imshow(torchvision.utils.make_grid(images))
plt.subplot(122)
plt.title('Autoencoder Reconstruction')
data_utils.imshow(torchvision.utils.make_grid(outputs.data))
plt.savefig('./images/conv_autoencoder.png')