How to use the moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip function in moviepy

To help you get started, we’ve selected a few moviepy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Zulko / moviepy / tests / test_Videos.py View on Github external
def test_afterimage():
    ai = ImageClip("media/afterimage.png")
    masked_clip = mask_color(ai, color=[0,255,1]) # for green
    some_background_clip = ColorClip((800,600), color=(255,255,255))
    final_clip = CompositeVideoClip([some_background_clip, masked_clip],
                                    use_bgclip=True)
    final_clip.duration = 5
    final_clip.write_videofile(
    os.path.join(TMP_DIR, "afterimage.mp4"), fps=30)
github Zulko / moviepy / tests / test_PR.py View on Github external
def test_PR_610():
    """
    Test that the max fps of the video clips is used for the composite video clip
    """
    clip1 = ColorClip((640, 480), color=(255, 0, 0)).set_duration(1)
    clip2 = ColorClip((640, 480), color=(0, 255, 0)).set_duration(1)
    clip1.fps = 24
    clip2.fps = 25
    composite = CompositeVideoClip([clip1, clip2])
    assert composite.fps == 25
github Zulko / moviepy / tests / test_misc.py View on Github external
def test_subtitles():
    red = ColorClip((800, 600), color=(255, 0, 0)).set_duration(10)
    green = ColorClip((800, 600), color=(0, 255, 0)).set_duration(10)
    blue = ColorClip((800, 600), color=(0, 0, 255)).set_duration(10)
    myvideo = concatenate_videoclips([red, green, blue])
    assert myvideo.duration == 30

    generator = lambda txt: TextClip(txt, font=FONT,
                                     size=(800, 600), fontsize=24,
                                     method='caption', align='South',
                                     color='white')

    subtitles = SubtitlesClip("media/subtitles1.srt", generator)
    final = CompositeVideoClip([myvideo, subtitles])
    final.write_videofile(os.path.join(TMP_DIR, "subtitles1.mp4"), fps=30)

    data = [([0.0, 4.0], 'Red!'), ([5.0, 9.0], 'More Red!'),
            ([10.0, 14.0], 'Green!'), ([15.0, 19.0], 'More Green!'),
            ([20.0, 24.0], 'Blue'), ([25.0, 29.0], 'More Blue!')]

    assert subtitles.subtitles == data

    subtitles = SubtitlesClip(data, generator)
    assert subtitles.subtitles == data
    close_all_clips(locals())
github Zulko / moviepy / moviepy / video / fx / freeze_region.py View on Github external
x1, y1, x2, y2 = region
        freeze = (clip.fx(crop, *region)
                      .to_ImageClip(t=t)
                      .set_duration(clip.duration)
                      .set_position((x1,y1)))
        return CompositeVideoClip([clip, freeze])
    
    elif outside_region is not None:
        
        x1, y1, x2, y2 = outside_region
        animated_region = (clip.fx(crop, *outside_region)
                               .set_position((x1,y1)))
        freeze = (clip.to_ImageClip(t=t)
                      .set_duration(clip.duration))
        return CompositeVideoClip([freeze, animated_region])
    
    elif mask is not None:
        freeze = (clip.to_ImageClip(t=t)
                      .set_duration(clip.duration)
                      .set_mask(mask))
        return CompositeVideoClip([clip, freeze])
github Zulko / moviepy / moviepy / video / compositing / on_color.py View on Github external
:param size: size of the final clip. By default it will be the
       size of the current clip.
    :param bg_color: the background color of the final clip
    :param pos: the position of the clip in the final clip.
    :param col_opacity: should the added zones be transparent ?
    """
    
    if size is None:
        size = clip.size
    if pos is None:
        pos = 'center'
    colorclip = ColorClip(size, color=color)
    if col_opacity:
        colorclip = colorclip.with_mask().set_opacity(col_opacity)

    return CompositeVideoClip([colorclip, clip.set_position(pos)],
                              transparent=(col_opacity is not None))
github Zulko / moviepy / moviepy / video / fx / make_loopable.py View on Github external
def make_loopable(clip, cross):
    """
    Makes the clip fade in progressively at its own end, this way
    it can be looped indefinitely. ``cross`` is the duration in seconds
    of the fade-in.  """  
    d = clip.duration
    clip2 = clip.fx(transfx.crossfadein, cross).\
                 set_start(d - cross)
    return CompositeVideoClip([ clip, clip2 ]).\
                 subclip(cross,d)
github Zulko / moviepy / moviepy / video / compositing / CompositeVideoClip.py View on Github external
self.duration = max(ends)
            self.end = max(ends)

        # compute audio
        audioclips = [v.audio for v in self.clips if v.audio is not None]
        if len(audioclips) > 0:
            self.audio = CompositeAudioClip(audioclips)

        # compute mask if necessary
        if transparent:
            maskclips = [(c.mask if (c.mask is not None) else
                          c.add_mask().mask).set_position(c.pos)
                          .set_end(c.end).set_start(c.start, change_end=False)
                          for c in self.clips]

            self.mask = CompositeVideoClip(maskclips,self.size, ismask=True,
                                               bg_color=0.0)

        def make_frame(t):
            """ The clips playing at time `t` are blitted over one
                another. """

            f = self.bg.get_frame(t)
            for c in self.playing_clips(t):
                    f = c.blit_on(f, t)
            return f

        self.make_frame = make_frame
github Zulko / moviepy / moviepy / video / compositing / CompositeVideoClip.py View on Github external
# find row width and col_widths automatically if not provided
    if rows_widths is None:
        rows_widths = sizes_array[:,:,1].max(axis=1)
    if cols_widths is None:
        cols_widths = sizes_array[:,:,0].max(axis=0)
    
    xx = np.cumsum([0]+list(cols_widths)) 
    yy = np.cumsum([0]+list(rows_widths))
    
    for j,(x,cw) in list(enumerate(zip(xx[:-1],cols_widths))):
        for i,(y,rw) in list(enumerate(zip(yy[:-1],rows_widths))):
            clip = array[i,j]
            w,h = clip.size
            if (w < cw) or (h < rw):
                clip = (CompositeVideoClip([clip.set_position('center')],
                                          size = (cw,rw),
                                          bg_color = bg_color).
                                     set_duration(clip.duration))
                
            array[i,j] = clip.set_position((x,y))
                 
    return CompositeVideoClip(array.flatten(), size = (xx[-1],yy[-1]),
                              bg_color = bg_color)
github Zulko / moviepy / moviepy / video / compositing / concatenate.py View on Github external
return clips[i].get_frame(t - tt[i])

        def get_mask(c):
            mask = c.mask or ColorClip([1, 1], color=1, ismask=True)
            if mask.duration is None:
               mask.duration = c.duration
            return mask

        result = VideoClip(ismask = ismask, make_frame = make_frame)
        if any([c.mask is not None for c in clips]):
            masks = [get_mask(c) for c in clips]
            result.mask = concatenate_videoclips(masks, method="chain",
                                                 ismask=True)
            result.clips = clips
    elif method == "compose":
        result = CompositeVideoClip( [c.set_start(t).set_position('center')
                                for (c, t) in zip(clips, tt)],
               size = (w, h), bg_color=bg_color, ismask=ismask)
    else:
        raise Exception("Moviepy Error: The 'method' argument of "
                        "concatenate_videoclips must be 'chain' or 'compose'")

    result.tt = tt

    result.start_times = tt[:-1]
    result.start, result.duration, result.end = 0, tt[-1] , tt[-1]

    audio_t = [(c.audio,t) for c,t in zip(clips,tt) if c.audio is not None]
    if len(audio_t)>0:
        result.audio = CompositeAudioClip([a.set_start(t)
                                for a,t in audio_t])
github achalddave / vid / vid / utils / moviepy_wrappers / composite_clip.py View on Github external
xx = np.cumsum([0] + list(cols_widths))
    yy = np.cumsum([0] + list(rows_widths))

    for j, (x, cw) in list(enumerate(zip(xx[:-1], cols_widths))):
        for i, (y, rw) in list(enumerate(zip(yy[:-1], rows_widths))):
            clip = array[i, j]
            if clip is None:
                continue
            w, h = clip.size
            if (w < cw) or (h < rw):
                clip = (CompositeVideoClip(
                    [clip.set_pos('center')], size=(cw, rw),
                    bg_color=bg_color).set_duration(clip.duration))
            array[i, j] = clip.set_pos((x, y))

    return CompositeVideoClip(
        [x for x in array.flatten() if x is not None],
        size=(xx[-1], yy[-1]),
        bg_color=bg_color)