How to use the moviepy.editor.ImageSequenceClip function in moviepy

To help you get started, we’ve selected a few moviepy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github metalbubble / moments_models / test_video.py View on Github external
with torch.no_grad():
    logits = model(input)
    h_x = F.softmax(logits, 1).mean(dim=0)
    probs, idx = h_x.sort(0, True)

# Output the prediction.
video_name = args.frame_folder if args.frame_folder is not None else args.video_file
print('RESULT ON ' + video_name)
for i in range(0, 5):
    print('{:.3f} -> {}'.format(probs[i], categories[idx[i]]))

# Render output frames with prediction text.
if args.rendered_output is not None:
    prediction = categories[idx[0]]
    rendered_frames = render_frames(frames, prediction)
    clip = mpy.ImageSequenceClip(rendered_frames, fps=4)
    clip.write_videofile(args.rendered_output)
github febert / visual_mpc / python_visual_mpc / video_prediction / utils_vpred / create_gif_lib.py View on Github external
def npy_to_gif(im_list, filename):

    save_dir = '/'.join(str.split(filename, '/')[:-1])

    if not os.path.exists(save_dir):
        print 'creating directory: ', save_dir
        os.mkdir(save_dir)

    clip = mpy.ImageSequenceClip(im_list, fps=4)
    clip.write_gif(filename + '.gif')
    return
github febert / visual_mpc / python_visual_mpc / data_preparation / create_gif.py View on Github external
def npy_to_gif(im_list, filename):

    # save_dir = '/'.join(str.split(filename, '/')[:-1])
    #
    # if not os.path.exists(save_dir):
    #     print 'creating directory: ', save_dir
    #     os.mkdir(save_dir)

    clip = mpy.ImageSequenceClip(im_list, fps=4)
    clip.write_gif(filename + '.gif')
    return
github naruya / tecnets-pytorch / resize.py View on Github external
def resize(path_from, path_to):
    os.mkdir(path_to)

    D0 = natsorted(glob.glob(os.path.join(path_from, "object_*")))

    for d0 in tqdm(D0): # 0~769
        D1 = natsorted(glob.glob(d0+"/*"))
        d0 = d0.split("/")[-1]
        os.mkdir(os.path.join(path_to, d0))

        for d1 in D1: # 0~24
            gif = vread(d1)
            gif = [cv2.resize(frame, (64,64)) for frame in gif]
            clip = mpy.ImageSequenceClip(gif, fps=20)
            d1 = d1.split("/")[-1]
            clip.write_gif(os.path.join(path_to, d0, d1), fps=20)
github jonasrothfuss / ProMP / meta-policy-search / samplers / utils.py View on Github external
break
        o = next_o
        if animated:
            env.render()
            time.sleep(timestep*frame_skip / speedup)
            if save_video:
                from PIL import Image
                image = env.wrapped_env.wrapped_env.get_viewer().get_image()
                pil_image = Image.frombytes('RGB', (image[1], image[2]), image[0])
                images.append(np.flipud(np.array(pil_image)))

    if animated:
        if save_video:
            import moviepy.editor as mpy
            fps = int(speedup/timestep * frame_skip)
            clip = mpy.ImageSequenceClip(images, fps=fps)
            if video_filename[-3:] == 'gif':
                clip.write_gif(video_filename, fps=fps)
            else:
                clip.write_videofile(video_filename, fps=fps)
        #return

    return dict(
        observations=observations,
        actons=actions,
        rewards=rewards,
        agent_infos=agent_infos,
        env_infos=env_infos
        )
github jonasrothfuss / DeepEpisodicMemory / utils / io_handler.py View on Github external
def convert_frames_to_gif(frames_dir, gif_file_name=None, image_type='.png', fps=15, gif_file_path=None):
  """ converts a folder with images to a gif file"""
  assert gif_file_name or gif_file_path
  file_names = sorted((os.path.join(frames_dir, fn) for fn in os.listdir(frames_dir) if fn.endswith(image_type)))
  print(file_names)
  filename = os.path.join(frames_dir, os.path.basename(gif_file_name)) + '.gif' if not gif_file_path else gif_file_path + '.gif'
  clip = mpy.ImageSequenceClip(file_names, fps=fps).to_RGB()
  clip.write_gif(filename, program='ffmpeg')
github edbeeching / 3d_control_deep_rl / 3dcdrl / create_rollout_videos.py View on Github external
obss = []  
        
        obs = env.reset().astype(np.float32)
        done = False
        while not done:
            obss.append(obs*2)
            result = policy(torch.from_numpy(obs).unsqueeze(0), state, mask)

            action = result['actions']
            state = result['states']

            obs, reward, done, _ = env.step(action.item())
            obs = obs.astype(np.float32)
            
    observations = [o.transpose(1,2,0) for o in obss]
    clip = ImageSequenceClip(observations, fps=int(30/args.frame_skip))
    clip.write_videofile(filename)
github febert / visual_mpc / python_visual_mpc / visual_mpc_core / Datasets / analyze_dataset.py View on Github external
batch_size = 5
    good_dataset, bad_dataset = BaseVideoDataset('{}/good'.format(args.base_path), batch_size), BaseVideoDataset('{}/bad'.format(args.base_path), batch_size)
    good_images, bad_images = good_dataset['images'], bad_dataset['images']
    good_states, good_actions = good_dataset['state'], good_dataset['actions']

    sess = tf.InteractiveSession()
    tf.train.start_queue_runners(sess)
    sess.run(tf.global_variables_initializer())

    good_images, bad_images, good_states, good_actions = sess.run([good_images, bad_images, good_states, good_actions])
    T = good_images.shape[1]
    for i in range(batch_size):
        clip = mpy.ImageSequenceClip([good_images[i, t, args.view, :, :] for t in range(T)], fps = 5)
        clip.write_gif('good_{}.gif'.format(i))

        clip = mpy.ImageSequenceClip([bad_images[i, t, args.view, :, :] for t in range(T)], fps = 5)
        clip.write_gif('bad_{}.gif'.format(i))

    for i in range(batch_size):
        print('actions')
        print(good_actions[0])
        print('states')
        print(good_states[0])
github pascalxia / driver_attention_prediction / ut.py View on Github external
def make_turing_moive(camera_images, gazemaps, thresh, fps):
    if len(gazemaps.shape)==3:
        gazemaps = np.expand_dims(gazemaps, axis=-1)
    blurred_images = filters.gaussian_filter(camera_images, sigma=(0,5,5,0))
    
    camera_clip = mpy.ImageSequenceClip([im for im in camera_images], fps=fps)
    blurred_clip = mpy.ImageSequenceClip([im for im in blurred_images], fps=fps)
    
    masks = (gazemaps>thresh).astype(np.float)*255
    blurred_masks = filters.gaussian_filter(masks, sigma=(0,2,2,0))
    mask_clip = mpy.ImageSequenceClip(list(blurred_masks), fps=fps).to_mask()
    fovea_clip = camera_clip.set_mask(mask_clip)
    mix_clip = mpy.CompositeVideoClip([blurred_clip, fovea_clip])
    return mix_clip
github pytorch / pytorch / torch / utils / tensorboard / summary.py View on Github external
import moviepy  # noqa: F401
    except ImportError:
        print('add_video needs package moviepy')
        return
    try:
        from moviepy import editor as mpy
    except ImportError:
        print("moviepy is installed, but can't import moviepy.editor.",
              "Some packages could be missing [imageio, requests]")
        return
    import tempfile

    t, h, w, c = tensor.shape

    # encode sequence of images into gif string
    clip = mpy.ImageSequenceClip(list(tensor), fps=fps)

    filename = tempfile.NamedTemporaryFile(suffix='.gif', delete=False).name
    try:  # older version of moviepy does not support progress_bar argument.
        clip.write_gif(filename, verbose=False, progress_bar=False)
    except TypeError:
        clip.write_gif(filename, verbose=False)

    with open(filename, 'rb') as f:
        tensor_string = f.read()

    try:
        os.remove(filename)
    except OSError:
        logging.warning('The temporary file used by moviepy cannot be deleted.')

    return Summary.Image(height=h, width=w, colorspace=c, encoded_image_string=tensor_string)