How to use the imageio.imwrite function in imageio

To help you get started, we’ve selected a few imageio examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ZhangMenghe / rgbd-processor-python / demo.py View on Github external
cameraMatrix = processCamMat(camf.readlines())

        depthAddr_root  = rootpath + '/'.join(p for p in split_items[:-2]) + '/depth_bfx/' #+ split_items[-1].split('.')[0]+'_abs.png'
        rawDepthAddr_root = rootpath + '/'.join(p for p in split_items[:-2]) + '/depth/' #+ split_items[-1].split('.')[0]+'_abs.png'

        depthAddr = [depthAddr_root + f for f in listdir(depthAddr_root) if isfile(join(depthAddr_root,f ))][0]
        rawDepthAddr = [rawDepthAddr_root  +  f for f in listdir(rawDepthAddr_root) if isfile(join(rawDepthAddr_root,f ))][0]

        depthImage = imageio.imread(depthAddr).astype(float)/10000
        rawDepth = imageio.imread(rawDepthAddr).astype(float)/100000
        missingMask = (rawDepth == 0)

        HHA = getHHAImg(depthImage, missingMask, cameraMatrix)

        imageio.imwrite(outputpath + chooseSplit + '/hha/' + str(idx+1) + '.png',HHA)
        imageio.imwrite(outputpath + chooseSplit + '/height/' + str(idx+1) + '.png', HHA[:,:,1])
github shaohua0116 / Multiview2Novelview / evaler.py View on Github external
step, loss, img, batch_id, step_time = \
                        self.run_single_step(batch_chunk, step=s)

                    # plot images
                    if self.config.plot_image:
                        if use_test_id_list:
                            for i in range(self.batch_size):
                                for img_key in img.keys():
                                    model_name = batch_id_list[i][0].split('_')[0]
                                    target_id = '_'.join(batch_id_list[i][0].split('_')[1:])
                                    source_id = '-'.join(['_'.join(id.split('_')[1:])
                                                          for id in batch_id_list[i][1:]])
                                    img_name = '{}_target_{}_source_{}_{}.png'.format(
                                        model_name, target_id, source_id, img_key)
                                    if self.config.plot_image:
                                        imageio.imwrite(os.path.join(
                                            self.config.output_dir, img_name),
                                            img[img_key][i])
                        else:
                            raise ValueError('Plotting images requires an id list.')

                    loss_all.append(np.array(loss.values()))
                    time_all += step_time

                    s += 1
                    if use_test_id_list:
                        continue_evaluate = s < len(self.id_list)/self.batch_size
                    else:
                        continue_evaluate = s < self.config.max_eval_steps

                    # report loss
                    if not self.config.quiet:
github tensorlayer / tensorlayer / tensorlayer / visualize.py View on Github external
def save_image(image, image_path='_temp.png'):
    """Save a image.

    Parameters
    -----------
    image : numpy array
        [w, h, c]
    image_path : str
        path

    """
    try:  # RGB
        imageio.imwrite(image_path, image)
    except Exception:  # Greyscale
        imageio.imwrite(image_path, image[:, :, 0])
github jornpeters / integer_discrete_flows / experiment_progressive_loading.py View on Github external
images.append(x_recon.float())

                if i == 10:
                    break
            break

    for j in range(len(ys) + 1):

        grid = make_grid(
            torch.stack(images[j::len(ys) + 1], dim=0).squeeze(),
            nrow=11, padding=0,
            normalize=True, range=None,
            scale_each=False, pad_value=0)

        imageio.imwrite(
            exp_dir + 'loaded{j}.png'.format(j=j),
            grid.cpu().numpy().transpose(1, 2, 0))
github MarvinTeichmann / ConvCRF / demo.py View on Github external
plt.show()
    else:
        if args.output is None:
            args.output = "out.png"

        logging.warning("Matplotlib not found.")
        logging.info("Saving output to {} instead".format(args.output))

    if args.output is not None:
        # Save results to disk
        out_img = np.concatenate(
            (image, coloured_label, coloured_unary, coloured_crf),
            axis=1)

        imageio.imwrite(args.output, out_img.astype(np.uint8))

        logging.info("Plot has been saved to {}".format(args.output))

    return
github NekoApocalypse / road-extraction-d-linknet / data_loader.py View on Github external
self.data_pipeline.join()


def dummy_consumer(img, mask):
    print('Dummy Consumer Working...')
    print(img.shape, img.dtype)
    print(mask.shape, mask.dtype)


if __name__ == '__main__':
    # unit test
    image_loader = ImageLoader(shuffle=True)
    img_list, mask_list = image_loader.serve_data(5)
    for i in range(5):
        imageio.imwrite('test_img_{}.png'.format(i), img_list[i])
        imageio.imwrite('test_mask_{}.png'.format(i), mask_list[i])
    '''
    while not image_loader.eof:
github rajeswar18 / pix2shape / diffrend / torch / GAN / scaffold.py View on Github external
print(f"Min in image: {torch.min(output_image)}, Max: {torch.max(output_image)}, Avg: {torch.mean(output_image)}")

        output_image_normalized = (output_image - torch.min(output_image)) /\
                (torch.max(output_image) - torch.min(output_image) + 1e-10)
                
        loss = criterion(output_image_normalized, res['image'])
        loss.backward()
        optimizer.step()
        # TODO add other losses on the saved_outputs

        print(f"Loss at step {i}: {loss}")

        # Log outputs
        folder = 'tmp_outputs'
        imwrite(f'{folder}/input_image_{i}.png', res['image'].cpu().numpy())
        imwrite(f'{folder}/input_image_normalized_{i}.png', ((res['image'] - torch.min(res['image'])) /
                (torch.max(res['image']) - torch.min(res['image']) + 1e-10)).cpu().numpy())
        imwrite(f'{folder}/output_image_{i}.png', output_image.detach().cpu().numpy())
        imwrite(f'{folder}/output_image_normalized_{i}.png', output_image_normalized.detach().cpu().numpy())
github phuang17 / DeepMVS / python / download_training_datasets.py View on Github external
"extrinsic": [[img[5],img[8],img[11],img[14]], [img[6],img[9],img[12],img[15]], [img[7],img[10],img[13],img[16]], [0.0,0.0,0.0,1.0]],
							"f_x": img[0],
							"f_y": img[1],
							"c_x": img[3],
							"c_y": img[4]
						})
						with open(os.path.join(path, dataset_name, "{:04d}".format(seq_idx), "poses", "{:04d}.json".format(f_idx)), "w") as output_file:
							json.dump(camera, output_file)
					elif dt_type == "depth":
						dimension = dataset.attrs["extents"]
						depth = np.array(np.frombuffer(decompress(img.tobytes(), dimension[0] * dimension[1] * 2), dtype = np.float16)).astype(np.float32)
						depth = depth.reshape(dimension[0], dimension[1])
						imageio.imwrite(os.path.join(path, dataset_name, "{:04d}".format(seq_idx), "depths", "{:04d}.exr".format(f_idx)), depth, flags = freeimage.IO_FLAGS.EXR_ZIP)
					elif dt_type == "image":
						img = imageio.imread(img.tobytes(), format = "RAW-FI")
						imageio.imwrite(os.path.join(path, dataset_name, "{:04d}".format(seq_idx), "images", "{:04d}.png".format(f_idx)), img)
		with open(os.path.join(path, dataset_name, "num_images.json"), "w") as output_file:
			json.dump(num_images, output_file)
github golmschenk / sr-gan / age / data.py View on Github external
crop_x_start = center_x - half_crop_size
            crop_x_end = center_x + half_crop_size
            crop_y_start = center_y - half_crop_size
            crop_y_end = center_y + half_crop_size
            unchecked_crop_box = [crop_y_start, crop_x_start, crop_y_end, crop_x_end]
            crop_x_start = max(crop_x_start, 0)
            crop_y_start = max(crop_y_start, 0)
            crop_x_end = min(crop_x_end, image.shape[1])
            crop_y_end = min(crop_y_end, image.shape[0])
            crop_box = [crop_y_start, crop_x_start, crop_y_end, crop_x_end]
            if unchecked_crop_box != crop_box:
                print('Bad crop for {}. Cropped image is stretched.'.format(image_path))
            cropped_image = image[crop_y_start:crop_y_end, crop_x_start:crop_x_end]
        cropped_image = transform.resize(cropped_image, (self.preprocessed_image_size, self.preprocessed_image_size),
                                         preserve_range=True)
        imageio.imwrite(os.path.join(output_directory, image_name), cropped_image.astype(np.uint8))