How to use the pydub.AudioSegment.empty function in pydub

To help you get started, we’ve selected a few pydub examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github gswyhq / hello-world / speech_synthesis_语音合成 / 日语五十音图的听写.py View on Github external
'わ行': ['わ', 'を'],
}

def run(key, yin_list):
    new = AudioSegment.empty()
    for name in yin_list:
        index = wushiyintu.index(name)
        new += words[index]+silent
    # 每列保存为一个音频文件
    new.export(os.path.join(EXPORT_PATH, "{}.mp3".format(key)), format='mp3')

for key, yin_list in fenhangduan_dict.items():
    run(key=key, yin_list=yin_list)

random.shuffle(words) # 对五十音进行随机排列
new = AudioSegment.empty()
for i in words:
    new += i + silent
# 保存为一个用来听写的音频文件
new.export(os.path.join(EXPORT_PATH, 'listening.mp3'), format='mp3')

# 作者:ClarenceYK
# 链接:http://www.jianshu.com/p/a9291fa603f6
# 來源:简书

def main():
    pass


if __name__ == '__main__':
    main()
github gswyhq / hello-world / speech_synthesis_语音合成 / 日语五十音图的听写.py View on Github external
def run(key, yin_list):
    new = AudioSegment.empty()
    for name in yin_list:
        index = wushiyintu.index(name)
        new += words[index]+silent
    # 每列保存为一个音频文件
    new.export(os.path.join(EXPORT_PATH, "{}.mp3".format(key)), format='mp3')
github PatrickDuncan / cleansio / cleansio / censor / censor_file.py View on Github external
def __create_clean_segment(cls, censored_chunks):
        clean_file = AudioSegment.empty()
        s_mute = 0
        for wrapper in censored_chunks: # Join the chunks together
            # Mute the start of a chunk based on the previous chunk
            clean_file += \
                AudioSegment.silent(duration=s_mute) + wrapper.segment[s_mute:]
            s_mute = wrapper.mute_next_start
        return clean_file
github KeelyHill / Prime-Number-Audio-Book / prime_audio.py View on Github external
def processRangeForLines(range, lines, number_sounds):
    from pydub import AudioSegment # required here due to sending off in pp
    from datetime import datetime

    """helper func to translate string to sound"""
    def append_string_to_audio_segment(string, segment):
        for num in string:
            segment = segment + number_sounds[int(num)]
        return segment

    TOTAL_LINES = len(lines)
    TOTAL_LINES = 10
    PERCENT_DENOM = TOTAL_LINES / 5 # represents how often % status is reported

    audio = AudioSegment.empty() # init an output

    counter = 0
    for i in range:
        line = lines[i]

        counter += 1
        if counter % PERCENT_DENOM == 0:  # prints a % status every so often
             print(round((counter/TOTAL_LINES) * 100), "%% concatenating.",  datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

        audio = append_string_to_audio_segment(line, audio)
    return audio
github MaxStrange / AudioSegment / audiosegment.py View on Github external
def empty():
    """
    Creates a zero-duration AudioSegment object.

    :returns: An empty AudioSegment object.
    """
    dubseg = pydub.AudioSegment.empty()
    return AudioSegment(dubseg, "")
github antiboredom / audiogrep / audiogrep / audiogrep.py View on Github external
def compose(segments, out='out.mp3', padding=0, crossfade=0, layer=False):
    '''Stiches together a new audiotrack'''

    files = {}

    working_segments = []

    audio = AudioSegment.empty()

    if layer:
        total_time = max([s['end'] - s['start'] for s in segments]) * 1000
        audio = AudioSegment.silent(duration=total_time)

    for i, s in enumerate(segments):
        try:
            start = s['start'] * 1000
            end = s['end'] * 1000
            f = s['file'].replace('.transcription.txt', '')
            if f not in files:
                if f.endswith('.wav'):
                    files[f] = AudioSegment.from_wav(f)
                elif f.endswith('.mp3'):
                    files[f] = AudioSegment.from_mp3(f)
github team-re-verb / RE-VERB / server / speech_diarization / model / utils.py View on Github external
def get_full_audio(frames):
    '''
    Gets the concated audio from frames

    :param frames: the frames to concat
    :type frames: list

    :returns: the concated frames
    '''

    full_audio = AudioSegment.empty()

    for f in frames:
        full_audio += f

    return full_audio
github PatrickDuncan / cleansio / cleansio / censor / censor_realtime_mac.py View on Github external
def __init__(self, args, explicits):
        print('Initialzed realtime censor object')
        super().__init__(explicits, args.output_encoding, args.output_location)
        self.__switch_audio_source()
        create_env_var('CLEANSIO_CHUNKS_LIST', '[]')
        self.args = args
        self.directory = create_temp_dir()
        self.chunk_prefix = self.directory + time_filename() + '-'
        self.clean_file = AudioSegment.empty()
        self.processing_queue = []
        self.processing_lock = threading.Lock()
        self.playback_queue = []
        self.playback_lock = threading.Lock()
        self.samplerate = 44100 # Hertz
        self.duration = 5 # seconds