How to use the imageio.imread function in imageio

To help you get started, we’ve selected a few imageio examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github keplerlab / katna / tests / image_similarity.py View on Github external
def get_img(self, path, norm_size=True, norm_exposure=False):
        """
    Prepare an image for image processing tasks
    param path: the input image path
    type x: str
    return: the image
    rtype: numpy.ndarray
    """
        # flatten returns a 2d grayscale array
        img = imageio.imread(path, as_gray=True).astype(int)
        # resizing returns float vals 0:255; convert to ints for downstream tasks
        if norm_size:
            img = skimage.transform.resize(
                img, (self.height, self.width), mode="constant", preserve_range=True
            )
        if norm_exposure:
            img = self.normalize_exposure(img)
        return img
github imageio / imageio / tests / test_pillow.py View on Github external
def test_png():

    for isfloat in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 1, 3, 4):
                fname = fnamebase + "%i.%i.%i.png" % (isfloat, crop, colors)
                rim = get_ref_im(colors, crop, isfloat)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if isfloat else 1
                assert_close(rim * mul, im, 0.1)  # lossless

    # Parameters
    im = imageio.imread("imageio:chelsea.png", ignoregamma=True)
    imageio.imsave(fnamebase + ".png", im, interlaced=True)

    # Parameter fail
    raises(TypeError, imageio.imread, "imageio:chelsea.png", notavalidk=True)
    raises(TypeError, imageio.imsave, fnamebase + ".png", im, notavalidk=True)

    # Compression
    imageio.imsave(fnamebase + "1.png", im, compression=0)
    imageio.imsave(fnamebase + "2.png", im, compression=9)
    s1 = os.stat(fnamebase + "1.png").st_size
    s2 = os.stat(fnamebase + "2.png").st_size
    assert s2 < s1
    # Fail
    raises(ValueError, imageio.imsave, fnamebase + ".png", im, compression=12)

    # Quantize
github JuheonYi / VESPCN-PyTorch / data / vsrdata.py View on Github external
def _load_file(self, idx):
        """
        Read image from given image directory
        Return: n_seq * H * W * C numpy array and list of corresponding filenames
        """

        if self.train:
            f_hrs = self.images_hr[idx]
            f_lrs = self.images_lr[idx]
            start = self._get_index(random.randint(0, self.n_frames_video[idx] - self.n_seq))
            filenames = [os.path.splitext(os.path.basename(file))[0] for file in f_hrs[start:start+self.n_seq]]
            hrs = np.array([imageio.imread(hr_name) for hr_name in f_hrs[start:start+self.n_seq]])
            lrs = np.array([imageio.imread(lr_name) for lr_name in f_lrs[start:start+self.n_seq]])

        else:
            n_poss_frames = [n - self.n_seq + 1 for n in self.n_frames_video]
            video_idx, frame_idx = self._find_video_num(idx, n_poss_frames)
            f_hrs = self.images_hr[video_idx][frame_idx:frame_idx+self.n_seq]
            f_lrs = self.images_lr[video_idx][frame_idx:frame_idx+self.n_seq]
            filenames = [os.path.split(os.path.dirname(file))[-1] + '.' + os.path.splitext(os.path.basename(file))[0] for file in f_hrs]
            hrs = np.array([imageio.imread(hr_name) for hr_name in f_hrs])
            lrs = np.array([imageio.imread(lr_name) for lr_name in f_lrs])
        return lrs, hrs, filenames
github johannah / trajectories / trajectories / train_pixel_cnn.py View on Github external
def generate(frame_num, gen_latents, orig_img_path, save_img_path):
    z_q_x = vmodel.embedding(gen_latents.view(gen_latents.size(0),-1))
    z_q_x = z_q_x.view(gen_latents.shape[0],6,6,-1).permute(0,3,1,2)
    x_d = vmodel.decoder(z_q_x)
    if save_img_path is not None:
        x_tilde = sample_from_discretized_mix_logistic(x_d, nr_logistic_mix)
        pred = (((np.array(x_tilde.cpu().data)[0,0]+1.0)/2.0)*float(max_pixel-min_pixel)) + min_pixel
        # input x is between 0 and 1
        real = imread(orig_img_path)
        f, ax = plt.subplots(1,3, figsize=(10,3))
        ax[0].imshow(real, vmin=0, vmax=max_pixel)
        ax[0].set_title("original frame %s"%frame_num)
        ax[1].imshow(pred, vmin=0, vmax=max_pixel)
        ax[1].set_title("pred")
        ax[2].imshow((pred-real)**2, cmap='gray')
        ax[2].set_title("error")
        f.tight_layout()
        plt.savefig(save_img_path)
        plt.close()
        print("saving example image")
        print("rsync -avhp jhansen@erehwon.cim.mcgill.ca://%s" %os.path.abspath(save_img_path))
github salu133445 / musegan / v1 / musegan / libs / utils.py View on Github external
def make_gif(imgs_filter, gen_dir='./', stop__frame_num=10):
    img_list = glob.glob(imgs_filter)
    images = []
    for filename in img_list:
        images.append(imageio.imread(filename))
    print('%d imgs'% len(img_list))

    stop_frame = np.zeros(images[0].shape)
    images = images + [stop_frame] * stop__frame_num

    imageio.mimsave(os.path.join(gen_dir, 'movie.gif'), images, duration=0.3)
github itdxer / neupy / examples / cnn / deeplab / data.py View on Github external
def read_data(filepath):
    with open(filepath) as f:
        for line in f:
            image_path = os.path.join(IMAGES, line.strip() + '.jpg')
            annotation_path = os.path.join(ANNOTATIONS, line.strip() + '.png')

            yield (
                imread(image_path),
                imread(annotation_path),
            )
github ly015 / intrinsic_flow / scripts / data_generation / create_seg.py View on Github external
return 1
        elif x in torso:
            return 2
        elif x in larm:
            return 3
        elif x in rarm:
            return 4
        elif x in lleg:
            return 5
        elif x in rleg:
            return 6
    
    _vfunc_merge = np.vectorize(_func_merge, otypes=[np.uint8])

    for sid in tqdm.tqdm(id_list):
        silh = imageio.imread(input_dir+'%s.bmp'%sid)
        silhm = _vfunc_merge(silh)
        imageio.imwrite(output_dir+'%s.bmp'%sid, silhm)
github golmschenk / sr-gan / age / data.py View on Github external
if np.isnan(genders[index]):
                continue
            try:
                image = imageio.imread(os.path.join(dataset_base, image_path))
            except FileNotFoundError:
                continue
            if image.shape[0] < 256 or image.shape[1] < 256 or abs(image.shape[0] - image.shape[1]) > 5:
                continue
            indexes.append(index)
        image_paths = image_paths[indexes]
        ages = ages[indexes].astype(np.float32).tolist()
        genders = genders[indexes].tolist()
        # Preprocess images and create JSON.
        json_list = []
        for image_path, age, gender in zip(image_paths, ages, genders):
            image = imageio.imread(os.path.join(dataset_base, image_path))
            image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),
                                     preserve_range=True)
            if len(image.shape) == 2:
                image = color.gray2rgb(image)
            image_name = os.path.basename(image_path)
            imageio.imsave(os.path.join(preprocessed_directory, image_name), image.astype(np.uint8))
            gender = {0: 'female', 1: 'male'}[gender]
            json_list.append([image_name, age, gender])
        with open(os.path.join(preprocessed_directory, 'meta.json'), 'w+') as json_file:
            json.dump(json_list, json_file)
github martinResearch / PyIPOL / ipol / wrappers / DCT_Image_Denoising_a_Simple_and_Effective_Image_Denoising_Algorithm / examples.py View on Github external
def example():
   
   from matplotlib import pyplot as plt
   import numpy as np
   
   noise_free=imread(wrapper.source_directory+'/cinput.jpg')# the scipy.misc.imread uses PIL which give an error for this bmp file (Unsupported BMP compression )
   noisy=imread(wrapper.source_directory+'/cnoisy.jpg')
   output=wrapper.DCTdenoising(noisy,sigma=3,noise_free=noise_free)
   plt.subplot(2,2,1)   
   plt.imshow(noise_free)
   plt.subplot(2,2,2)   
   plt.imshow(noisy)
   plt.subplot(2,2,3)   
   plt.imshow(output)   
   plt.subplot(2,2,4)   
   plt.imshow(np.sum(np.abs(output.astype(np.float)-noise_free.astype(np.float)),axis=2)/5 ,cmap='Greys_r')    
   plt.show()
   print ('done')
github Parallel-in-Time / pySDC / pySDC / projects / AllenCahn_Bayreuth / visualize_temp.py View on Github external
Produces one png file per time-step, combine as movie via e.g.
      > ffmpeg -i data/name_%08d.png name.mp4

    Args:
        path (str): path to data files
        name (str): name of the simulation (expects data to be in data path)
        output (str): path to output
    """

    img_files = sorted(glob.glob(f'{path}/{name}_*.png'))
    print(f'{path}{name}')

    images = []
    for fimg in img_files:
        img = imageio.imread(fimg)
        print(fimg, img.shape)
        images.append(imageio.imread(fimg))
    fname = f'{output}/{name}.mp4'
    imageio.mimsave(fname, images, fps=8)