How to use the moviepy.editor.VideoFileClip function in moviepy

To help you get started, we’ve selected a few moviepy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github AlexEMG / DeepLabCut / Analysis-tools / AnalyzeVideos.py View on Github external
for video in videos:
    vname=video.split('.')[0]
    dataname = vname+ scorer +'.h5'
    try:
        # Attempt to load data.
        pd.read_hdf(dataname)
        print("Video already analyzed!", dataname)
    except FileNotFoundError:
        
        cfg['batch_size']=batchsize
        sess, inputs, outputs = predict.setup_pose_prediction(cfg)
        
        print("Loading ", video)
        start0=time.time()
        
        clip = VideoFileClip(video)
        ny, nx = clip.size  # dimensions of frame (height, width)
        fps = clip.fps
        if cropping:
            clip = clip.crop(
                y1=y1, y2=y2, x1=x1, x2=x2)  # one might want to adjust

        nframes_approx = int(np.ceil(clip.duration * clip.fps) + frame_buffer)
        print("Duration of video [s]: ", clip.duration, ", recorded with ", fps,
              "fps!")
        print("Overall # of frames: ", nframes_approx,"with cropped frame dimensions: ", clip.size)

        start = time.time()
        clip.reader.initialize()
        print("Starting to extract posture")
        if batchsize>1:
            PredicteData,nframes=CrankVideo(cfg, sess, inputs, outputs,clip,nframes_approx,batchsize)
github ogniK5377 / DiscordEmojify / emojify.py View on Github external
def main(argc, argv):
    if argc < 4:
        print('%s <input> <output>   [max height emojis]' % argv[0])
        return 1
    
    try:
        if not os.path.exists(argv[2]):
            os.makedirs(argv[2])
        max_w = int(argv[4])
        clip = VideoFileClip(argv[1])
        max_h = 0
        if argc == 6:
            max_h = int(argv[5])
            if max_h*max_w &gt; MAX_EMOJI_COUNT:
                print('Emojis exceed limit of %d, emoji count is %d!' % (MAX_EMOJI_COUNT, max_h*max_w))
                option = get_user_input('Would you like to continue? (y/N): ').lower()
                if len(option) == 0 or option[0] != 'y':
                    return 1
        else:
            max_h = int(math.floor(MAX_EMOJI_COUNT / max_w))

        new_w = EMOJI_BLOCK_SZ * max_w
        new_h = EMOJI_BLOCK_SZ * max_h
        print('Rescaled size:', new_w, new_h)
        discord_cmd = ''
        idx = 0</output>
github pelednoam / mmvt / src / utils / movies_utils.py View on Github external
from moviepy import editor

    def annotate(clip, txt, txt_color=txt_color, fontsize=fontsize):
        """ Writes a text at the bottom of the clip. """
        # To make this code works the policy.xml should be editted
        #  identify -list policy
        # sudo gedit /etc/ImageMagick/policy.xml &
        # Put under comment the TEXT and LABEL lines
        txtclip = editor.TextClip(txt, fontsize=fontsize, color=txt_color)  # font=font
        # txtclip = txtclip.on_color((clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, 'center'))
        cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
        return cvc.set_duration(clip.duration)

    if isinstance(subs, str):
        subs = import_subs(movie_fol, subs, subs_delim)
    video = editor.VideoFileClip(op.join(movie_fol, movie_name))
    annotated_clips = [annotate(video.subclip(from_t, to_t), txt) for (from_t, to_t), txt in subs]
    final_clip = editor.concatenate_videoclips(annotated_clips)
    final_clip.write_videofile(op.join(movie_fol, out_movie_name))
github zo7 / deep-features-video / extract_features.py View on Github external
#mot_existing = [os.path.splitext(x)[0] for x in os.listdir(motion_dir)]
    #flo_existing = [os.path.splitext(x)[0] for x in os.listdir(opflow_dir)]

    video_filenames = [x for x in sorted(os.listdir(input_dir))
                       if is_video(x) and os.path.splitext(x)[0] not in vis_existing]


    # Go through each video and extract features

    from keras.applications.imagenet_utils import preprocess_input

    for video_filename in tqdm(video_filenames):

        # Open video clip for reading
        try:
            clip = VideoFileClip( os.path.join(input_dir, video_filename) )
        except Exception as e:
            sys.stderr.write("Unable to read '%s'. Skipping...\n" % video_filename)
            sys.stderr.write("Exception: {}\n".format(e))
            continue

        # Sample frames at 1fps
        fps = int( np.round(clip.fps) )
        frames = [scipy.misc.imresize(crop_center(x.astype(np.float32)), shape)
                  for idx, x in enumerate(clip.iter_frames()) if idx % fps == fps//2]


        n_frames = len(frames)

        frames_arr = np.empty((n_frames,)+shape+(3,), dtype=np.float32)
        for idx, frame in enumerate(frames):
            frames_arr[idx,:,:,:] = frame
github alex000kim / video_frame_remover / video_frame_remover.py View on Github external
def remove_frame(in_fname, out_fname, n_sample_frames=100):
    sample_frames = get_frames(in_fname, n_sample_frames)
    input_frame = get_median_frame(sample_frames)
    res = get_frame_box_coords(input_frame)
    if res is None:
        print("No border was detected in {}".format(in_fname))
        return None
    else:
        x, y, w, h = res
    clip = VideoFileClip(in_fname)
    crop_clip = crop(clip, x1=x, y1=y, x2=x + w, y2=y + h)
    crop_clip.write_videofile(out_fname)
github cvalenzuela / scenescoop / make_scene.py View on Github external
# get the largest continuous scene
  largest_continuous_scene = []
  for scene in scenes:
    if (len(scene) > len(largest_continuous_scene)):
      largest_continuous_scene = scene

  start_frame = largest_continuous_scene[0]
  end_frame = largest_continuous_scene[-1]
  frames_duration = end_frame - start_frame
  if (frames_duration == 0):
    start_frame = start_frame - 1
    end_frame = end_frame + 1

  # create the video
  clip = mp.VideoFileClip(transform_video).subclip(start_frame,end_frame)
  composition = mp.concatenate([clip])
  video_name = "/{}.mp4".format(str(time()))
  composition.write_videofile(output_dir + video_name)
  if (api == True):
    return {"name": video_name, "scene_closest_meaning": closest_meaning}
github theriley106 / Nightchord / main.py View on Github external
def GetDuration(clip):
	if '.mp4' in str(clip): return VideoFileClip(clip).duration
	elif '.mp3' in str(clip): return eyed3.load('{}'.format(clip)).info.time_secs
github abinashmeher999 / voice-data-extract / srtvoiceext / extractor.py View on Github external
# Downloads ffmpeg binary if it is absent
        imageio.plugins.ffmpeg.download()

        video_path = os.path.join(os.getcwd(), video_name)
        logger.info("video path: {}".format(video_path))
        subtitle_path = os.path.join(os.getcwd(), subtitle_name)
        logger.info("subtitles path: {}".format(subtitle_path))
        output_path = os.path.join(os.getcwd(), relative_outdir)
        logger.info("output path: {}".format(output_path))
        if not mkdir_p(output_path):
            logger.debug("Output directory created at {}".format(output_path))

        subs = pysrt.open(subtitle_path, encoding='utf-8')

        clip = mp.VideoFileClip(video_path)
        for line, num in zip(subs, itertools.count()):
            if '\n' in line.text:
                continue

            time_convert = lambda t: (t.hours, t.minutes, t.seconds + t.milliseconds / 1000)
            start = time_convert(line.start)
            end = time_convert(line.end)
            subclip = clip.subclip(t_start=start, t_end=end)

            audio_filename = "{}-{}-{}.mp3".format(num, format_filename(line.text[:100]),
                                                   suuid.ShortUUID().random(length=6))
            audio_filepath = os.path.join(output_path, audio_filename)
            subclip.audio.write_audiofile(audio_filepath, verbose=False, progress_bar=False)

            audio = EasyID3(audio_filepath)
            audio['title'] = line.text
github ddddwee1 / sul / sul_tool.py View on Github external
def combine_audio(vidname, audname, outname, fps=25):
	import moviepy.editor as mpe
	my_clip = mpe.VideoFileClip(vidname)
	audio_background = mpe.AudioFileClip(audname)
	final_clip = my_clip.set_audio(audio_background)
	final_clip.write_videofile(outname,fps=fps)