How to use the moviepy.editor.TextClip function in moviepy

To help you get started, we’ve selected a few moviepy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github beshrkayali / gifbook / gb.py View on Github external
clip = (
                self._clip
                .subclip((sub.start.minutes, sub.start.seconds),
                         (sub.end.minutes, sub.end.seconds))
                .resize(resize)
            )

            compositions = [clip]

            subtitles_y_pos = self._subtitles_position_y
            for line in sub.text.split('\n'):
                subtitles_y_pos += 20

                text = (
                    TextClip(line,
                             fontsize=self.subtitles_font_size,
                             color=self.subtitles_color,
                             stroke_width=self.subtitles_stroke_width,
                             stroke_color=self.subtitles_stroke_color,
                             bg_color=self.subtitles_background_color,
                             font=self.subtitles_font_name)

                    .set_pos((self._subtitles_position_x,
                              subtitles_y_pos))

                    .set_duration(clip.duration))

                compositions.append(text)

            composition = CompositeVideoClip(compositions)
github SummitKwan / transparent_latent_gan / src / model / pggan / misc.py View on Github external
def setup_text_label(text, font='Calibri', fontsize=32, padding=6, glow_size=2.0, glow_coef=3.0, glow_exp=2.0, cache_size=100): # => (alpha, glow)
    # Lookup from cache.
    key = (text, font, fontsize, padding, glow_size, glow_coef, glow_exp)
    if key in _text_label_cache:
        value = _text_label_cache[key]
        del _text_label_cache[key] # LRU policy
        _text_label_cache[key] = value
        return value

    # Limit cache size.
    while len(_text_label_cache) >= cache_size:
        _text_label_cache.popitem(last=False)

    # Render text.
    import moviepy.editor # pip install moviepy
    alpha = moviepy.editor.TextClip(text, font=font, fontsize=fontsize).mask.make_frame(0)
    alpha = np.pad(alpha, padding, mode='constant', constant_values=0.0)
    glow = scipy.ndimage.gaussian_filter(alpha, glow_size)
    glow = 1.0 - np.maximum(1.0 - glow * glow_coef, 0.0) ** glow_exp

    # Add to cache.
    value = (alpha, glow)
    _text_label_cache[key] = value
    return value
github uber-research / atari-model-zoo / atari_zoo / utils.py View on Github external
#load in all the movie clips
    for _x in range(grid_sz_x):
        for _y in range(grid_sz_y):
            pos =(label_padding + _x*x_step,label_padding + _y*y_step)
            clip_dict[(_x,_y)] = clip_dict[(_x,_y)].set_position(pos)
            #clip.write_gif(o_video_fn)

    clip_list = []
    #add background clip
    clip_list.append(mpy.ColorClip(size=composite_size, color=(255,255,255)))

    #now add x and y labels
    l_idx = 0
    if y_labels != None:
        for label in y_labels:
            txtClip = mpy.TextClip(label,color='black', fontsize=label_fontsize).set_position((0,label_padding+y_step*l_idx+(y_step/2)))
            l_idx+=1
            clip_list.append(txtClip)

    l_idx = 0
    if x_labels != None:
        for label in x_labels:
            txtClip = mpy.TextClip(label,color='black', fontsize=label_fontsize).set_position((label_padding+x_step*l_idx,label_padding/2))
            l_idx+=1
            clip_list.append(txtClip)
    
    for key in clip_dict:
        clip_list.append(clip_dict[key])
    
    cc = mpy.CompositeVideoClip(clip_list,composite_size)
    return cc
github uber-research / atari-model-zoo / atari_zoo / activation_movie.py View on Github external
layers = m.layers.copy()
    layers.insert(0,{'name':'observations'})

    for layer_idx in range(len(labels)):
        layer_name = layers[layer_idx]['name']
    
        #get clip and resize it
        clip = clip_dict[layer_name]
        clip = clip.resize(scales[layer_idx])
    
        #calculate where to place it
        _x_pos = x_pos - 0.5 * clip.size[0]
        _y_pos = y_pos
        clip = clip.set_position((_x_pos,_y_pos))
    
        txtClip = mpy.TextClip(labels[layer_idx],color='black', fontsize=label_fontsize)
        txtPos = (x_pos - 0.5 * txtClip.size[0],y_pos - txtClip.size[1])
        clip_list.append(txtClip.set_position(txtPos))
    
        #offset coordinates
        y_pos += clip.size[1]
        y_pos += padding
        clip_list.append(clip)
    
    duration = clip.duration

    clip_list.append(clip_dict['frames'].set_position((50,580)))
    #clip_list.append(clip_dict['observations'].set_position((0,50)))

    cc = mpy.CompositeVideoClip(clip_list,composite_size).subclip(0,duration)
    #cc.ipython_display()
    return cc
github musikisomorphie / swd / progressive_training / misc.py View on Github external
def setup_text_label(text, font='Calibri', fontsize=32, padding=6, glow_size=2.0, glow_coef=3.0, glow_exp=2.0, cache_size=100): # => (alpha, glow)
    # Lookup from cache.
    key = (text, font, fontsize, padding, glow_size, glow_coef, glow_exp)
    if key in _text_label_cache:
        value = _text_label_cache[key]
        del _text_label_cache[key] # LRU policy
        _text_label_cache[key] = value
        return value

    # Limit cache size.
    while len(_text_label_cache) >= cache_size:
        _text_label_cache.popitem(last=False)

    # Render text.
    import moviepy.editor # pip install moviepy
    alpha = moviepy.editor.TextClip(text, font=font, fontsize=fontsize).mask.make_frame(0)
    alpha = np.pad(alpha, padding, mode='constant', constant_values=0.0)
    glow = scipy.ndimage.gaussian_filter(alpha, glow_size)
    glow = 1.0 - np.maximum(1.0 - glow * glow_coef, 0.0) ** glow_exp

    # Add to cache.
    value = (alpha, glow)
    _text_label_cache[key] = value
    return value
github goberoi / faceit / faceit.py View on Github external
def add_caption(caption, clip):
                    text = (TextClip(caption, font='Amiri-regular', color='white', fontsize=80).
                            margin(40).
                            set_duration(clip.duration).
                            on_color(color=(0,0,0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])
                video = add_caption("Original", video)
github google / starthinker / starthinker / task / video / run.py View on Github external
if effect.get('fade', {}).get('in'):
      clip = clip.crossfadein(effect['fade']['in'])

    if effect.get('fade', {}).get('out'):
      clip = clip.crossfadeout(effect['fade']['out'])

    if effect.get('position', {}).get('rotate'):
      clip = clip.rotate(effect['position']['rotate'])
 
    yield clip

  if 'text' in effect:

    # Requires working installation of ImageMagick
    try:
      clip = mp.TextClip(
        txt=effect['text']['message'], 
        color=effect['text'].get('color', '#666666'), 
        font=effect['text'].get('font', 'Courier'), 
        fontsize=effect['text'].get('size', 12), 
        align=effect['text'].get('align', 'center'), 
        kerning=effect['text'].get('kerning', 0)
      )

    # Alternate method using Pillow - no need to set position text is already positioned within image
    except:
      clip = (
        mp.ImageClip(
          get_text_image(effect)
        )
      )