How to use the moviepy.editor.AudioFileClip function in moviepy

To help you get started, we’ve selected a few moviepy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mgaitan / miau / miau.py View on Github external
# default to a video with the same filename than the remix
        output_file = '{}.mp4'.format(os.path.basename(remix).rsplit('.')[0])
    output_extension = os.path.splitext(output_file)[1][1:]
    if output_extension not in extensions_dict:
        raise ValueError(
            'Output format not supported: {}'.format(output_extension)
        )
    output_type = extensions_dict[output_extension]['type']

    # map input files to moviepy clips
    mvp_clips = OrderedDict()
    for filename in clips:
        try:
            clip = VideoFileClip(filename)
        except KeyError:
            clip = AudioFileClip(filename)
        mvp_clips[filename] = clip

    if output_type == 'video' and not all(isinstance(clip, VideoFileClip) for clip in mvp_clips.values()):
        logging.error("Output expect to be a video but input clips aren't all videos")
        return
    elif output_type == 'audio':
        # cast clips to audio if needed
        mvp_clips = OrderedDict([(k, ensure_audio(v)) for k, v in mvp_clips.items()])

    with open(remix) as remix_fh:
        try:
            # read data from a json file (as generated by --dump option)
            # this skip the aligment
            remix_data = json.load(remix_fh)
        except json.JSONDecodeError:
            remix_fh.seek(0)
github pyannote / pyannote-video / scripts / pyannote-face.py View on Github external
if labels is not None:
        with open(labels, 'r') as f:
            labels = {}
            for line in f:
                identifier, label = line.strip().split()
                identifier = int(identifier)
                labels[identifier] = label

    video = Video(filename, ffmpeg=ffmpeg)

    from moviepy.editor import VideoClip, AudioFileClip

    make_frame = get_make_frame(video, tracking, landmark=landmark,
                                labels=labels, height=height, shift=shift)
    video_clip = VideoClip(make_frame, duration=video.duration)
    audio_clip = AudioFileClip(filename)
    clip = video_clip.set_audio(audio_clip)

    if t_end is None:
        t_end = video.duration

    clip.subclip(t_start, t_end).write_videofile(output, fps=video.frame_rate)
github rohjunha / language-grounded-driving / speech_evaluator.py View on Github external
images = [np.array(i) for i in images]
    video_from_memory(images, out_video_path, framerate=TARGET_FPS, revert=False)
    logger.info('save temporary video')

    # cut out the audio again
    audio_frames = int(round(duration * SAMPLE_WIDTH * SAMPLE_RATE))
    audio_data = audio_data[:audio_frames]
    save_audio(out_audio_path, audio_data)
    logger.info('save audio')

    # merge the audio to the video
    cmd = ['ffmpeg', '-y', '-i', str(out_video_path), '-c:v', 'libx264', str(tmp_video_path)]
    run(cmd)

    video_clip = editor.VideoFileClip(str(tmp_video_path))
    audio_clip = editor.AudioFileClip(str(out_audio_path))
    video_clip = video_clip.set_audio(audio_clip)
    video_clip.write_videofile(str(out_video_path), fps=30)
    logger.info('write the final video file {}'.format(out_video_path))
    if out_video_path.exists() and tmp_video_path.exists():
        tmp_video_path.unlink()
    return True
github msieg / deep-music-visualizer / visualize.py View on Github external
output = model(noise_vector, class_vector, truncation)

    output_cpu=output.cpu().data.numpy()

    #convert to image array and add to frames
    for out in output_cpu:    
        im=np.array(toimage(out))
        frames.append(im)
        
    #empty cuda cache
    torch.cuda.empty_cache()



#Save video  
aud = mpy.AudioFileClip(song, fps = 44100) 

if args.duration:
    aud.duration=args.duration

clip = mpy.ImageSequenceClip(frames, fps=22050/frame_length)
clip = clip.set_audio(aud)
clip.write_videofile(outname,audio_codec='aac')
github msieg / deep-music-visualizer / deep_visualizer.py View on Github external
output = model(noise_vector, class_vector, truncation)

    output_cpu=output.cpu().data.numpy()

    #convert to image array and add to frames
    for out in output_cpu:    
        im=np.array(toimage(out))
        frames.append(im)
        
    #empty cuda cache
    torch.cuda.empty_cache()



#Save video  
aud = mpy.AudioFileClip(song, fps = 44100) 

if args.duration:
    aud.duration=args.duration

clip = mpy.ImageSequenceClip(frames, fps=22050/frame_length)
clip = clip.set_audio(aud)
clip.write_videofile(outname,audio_codec='aac')
github scherroman / mugen / mugen / video / MusicVideo.py View on Github external
segment = segment.set_start(previous_segment.end)

            # Apply any crossfade for the current segment
            for effect in segment.effects:
                if isinstance(effect, v_effects.CrossFade):
                    segment = segment.set_start(previous_segment.end - effect.duration)
                    segment = segment.crossfadein(effect.duration)
                    if segment.audio:
                        segment = segment.set_audio(segment.audio.audio_fadein(effect.duration))

            composite_video_segments.append(segment)

        music_video = CompositeVideoClip(composite_video_segments)

        if self.audio_file:
            music_video.audio = AudioFileClip(self.audio_file)

        return music_video