How to use the moviepy.editor function in moviepy

To help you get started, we’ve selected a few moviepy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sniklaus / sepconv-slomo / run.py View on Github external
tensorSecond = torch.FloatTensor(numpy.array(PIL.Image.open(arguments_strSecond))[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0))

		tensorOutput = estimate(tensorFirst, tensorSecond)

		PIL.Image.fromarray((tensorOutput.clamp(0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, ::-1] * 255.0).astype(numpy.uint8)).save(arguments_strOut)

	elif arguments_strOut.split('.')[-1] in [ 'avi', 'mp4', 'webm', 'wmv' ]:
		import moviepy
		import moviepy.editor

		strTempdir = tempfile.gettempdir() + '/' + str.join('', [ random.choice('abcdefghijklmnopqrstuvwxyz0123456789') for intCount in range(20) ]); os.makedirs(strTempdir + '/')

		intFrames = 0
		tensorFrames = [ None, None, None, None, None ]

		for intFrame, numpyFrame in enumerate(numpyFrame[:, :, ::-1] for numpyFrame in moviepy.editor.VideoFileClip(filename=arguments_strVideo).iter_frames()):
			tensorFrames[4] = torch.FloatTensor(numpyFrame.transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0))

			if tensorFrames[0] is not None:
				tensorFrames[2] = estimate(tensorFrames[0], tensorFrames[4])
				tensorFrames[1] = estimate(tensorFrames[0], tensorFrames[2])
				tensorFrames[3] = estimate(tensorFrames[2], tensorFrames[4])

				PIL.Image.fromarray((tensorFrames[0].clamp(0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, ::-1] * 255.0).astype(numpy.uint8)).save(strTempdir + '/' + str(intFrames).zfill(5) + '.png'); intFrames += 1
				PIL.Image.fromarray((tensorFrames[1].clamp(0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, ::-1] * 255.0).astype(numpy.uint8)).save(strTempdir + '/' + str(intFrames).zfill(5) + '.png'); intFrames += 1
				PIL.Image.fromarray((tensorFrames[2].clamp(0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, ::-1] * 255.0).astype(numpy.uint8)).save(strTempdir + '/' + str(intFrames).zfill(5) + '.png'); intFrames += 1
				PIL.Image.fromarray((tensorFrames[3].clamp(0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, ::-1] * 255.0).astype(numpy.uint8)).save(strTempdir + '/' + str(intFrames).zfill(5) + '.png'); intFrames += 1
			# end

			tensorFrames[0] = torch.FloatTensor(numpyFrame.transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0))
		# end
github febert / visual_mpc / python_visual_mpc / sawyer / visual_mpc_rospkg / src / utils / robot_dualcam_recorder.py View on Github external
clip = []
            for i in range(self.sequence_length):
                cv2.imwrite('{}/im{}.png'.format(folder, i), self.images[i, f, :, :, ::-1],
                            [cv2.IMWRITE_PNG_STRATEGY_DEFAULT, 1])
                if 'save_large_gifs' in self._agent_conf:
                    if 'opencv_tracking' in self._agent_conf:
                        raw_images_bgr = copy.deepcopy(self.raw_images[i, f])
                        clip.append(render_bbox(raw_images_bgr, self.track_bbox[i, f]))
                    else:
                        clip.append(self.raw_images[i, f])
                else:
                    clip.append(self.images[i, f])
                if self._save_raw:
                    cv2.imwrite('{}/im_med{}.png'.format(folder, i), self.raw_images[i, f, :, :, ::-1],
                                [cv2.IMWRITE_PNG_STRATEGY_DEFAULT, 1])
            clip = mpy.ImageSequenceClip(clip, fps = 5)
            clip.write_gif('{}/diag.gif'.format(folder))
github SenorPez / project-cars-replay-enhancer / replayenhancer / ReplayEnhancer.py View on Github external
def _load_configuration(self, json_data):
        """
        Commenting out all elements. Enable as needed by development.
        We'll see what we're not using, that way.
        """
        try:
            self._video = mpy.VideoFileClip(json_data['source_video'])
            self._video = self._video.subclip(json_data['video_skipstart'], json_data['video_skipend'])
        except KeyError as e:
            pass
github febert / visual_mpc / python_visual_mpc / region_proposal_networks / rpn_tracker.py View on Github external
def make_gif(im_list):
    clip = mpy.ImageSequenceClip(im_list, fps=4)
    clip.write_gif('modeldata/tracking.gif')
github rl-lang-grounding / rl-lang-ground / helper.py View on Github external
if true_image:
	  return x.astype(np.uint8)
	else:
	  return ((x+1)/2*255).astype(np.uint8)
  
  def make_mask(t):
	try:
	  x = salIMGS[int(len(salIMGS)/duration*t)]
	except:
	  x = salIMGS[-1]
	return x

  clip = mpy.VideoClip(make_frame, duration=duration)
  if salience == True:
	mask = mpy.VideoClip(make_mask, ismask=True,duration= duration)
	clipB = clip.set_mask(mask)
	clipB = clip.set_opacity(0)
	mask = mask.set_opacity(0.1)
	mask.write_gif(fname, fps = len(images) / duration,verbose=False)
	#clipB.write_gif(fname, fps = len(images) / duration,verbose=False)
  else:
	clip.write_gif(fname, fps = len(images) / duration,verbose=False)
github uber-research / atari-model-zoo / atari_zoo / utils.py View on Github external
def load_clip_from_cache(algo,env,run_id,tag="final",video_cache="."):

    i_video_fn ="%s/%s-%s-%d-%s.mp4" % (video_cache,algo,env,run_id,tag)

    return  mpy.VideoFileClip(i_video_fn)
github MaDFolking / AI_BIG_DATAS_ALGORITHM / DeepLearningProject / DGGANProject / utils.py View on Github external
def make_gif(images, fname, duration=2, true_image=False):
  import moviepy.editor as mpy

  def make_frame(t):
    try:
      x = images[int(len(images)/duration*t)]
    except:
      x = images[-1]

    if true_image:
      return x.astype(np.uint8)
    else:
      return ((x+1)/2*255).astype(np.uint8)

  clip = mpy.VideoClip(make_frame, duration=duration)
  clip.write_gif(fname, fps = len(images) / duration)
github K-Du / Generative-Adversarial-Network / srez_demo.py View on Github external
def demo1(sess):
    """Demo based on images dumped during training"""

    # Get images that were dumped during training
    filenames = tf.gfile.ListDirectory(FLAGS.train_dir)
    filenames = sorted(filenames)
    filenames = [os.path.join(FLAGS.train_dir, f) for f in filenames if f[-4:]=='.png']

    assert len(filenames) >= 1

    fps = 5

    # Create video file from PNGs
    print("Producing video file...")
    filename  = os.path.join(FLAGS.train_dir, 'demo1.mp4')
    clip      = mpe.ImageSequenceClip(filenames, fps=fps)
    clip.write_videofile(filename)
    print("Done!")