How to use the essentia.standard.AudioOnsetsMarker function in essentia

To help you get started, we’ve selected a few essentia examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github urinieto / msaf / featextract.py View on Github external
human annotations. It creates an audio file with the estimated
        beats if needed."""

    # Output file
    out_file = file_struct.features_file

    if os.path.isfile(out_file) and not overwrite:
        return  # Do nothing, file already exist and we are not overwriting it

    # Compute the features for the given audio file
    audio, features = compute_features_for_audio_file(file_struct.audio_file)

    # Save output as audio file
    if audio_beats:
        logging.info("Saving Beats as an audio file")
        marker = ES.AudioOnsetsMarker(onsets=features["beats"], type='beep',
                                      sampleRate=msaf.Anal.sample_rate)
        marked_audio = marker(audio)
        ES.MonoWriter(filename='beats.wav',
                      sampleRate=msaf.Anal.sample_rate)(marked_audio)

    # Read annotations if they exist in path/references_dir/file.jams
    if os.path.isfile(file_struct.ref_file):
        jam = jams2.load(file_struct.ref_file)

        # If beat annotations exist, compute also annotated beatsyn features
        if jam.beats != []:
            logging.info("Reading beat annotations from JAMS")
            annot = jam.beats[0]
            annot_beats = []
            for data in annot.data:
                annot_beats.append(data.time.value)
github scherroman / mugen / mugen / audio / audio.py View on Github external
def preview_audio_beats(audio_file, beat_locations):
    print("Creating audio preview...")

    # Load audio
    audio = a_util.load_audio(audio_file)
    onsets_marker = essentia.standard.AudioOnsetsMarker(onsets = beat_locations)
    mono_writer = essentia.standard.MonoWriter(filename = s.OUTPUT_PATH_BASE + "marked_audio_preview.wav", bitrate = 320)

    # Create preview audio file
    marked_audio = onsets_marker(audio)
    mono_writer(marked_audio)
github urinieto / SegmenterMIREX2014 / features.py View on Github external
# Estimate Beats
    features = {}
    ticks, conf = compute_beats(audio)
    ticks = np.concatenate(([0], ticks, [duration]))  # Add first and last time
    ticks = essentia.array(np.unique(ticks))
    features["beats"] = ticks.tolist()

    # Compute Beat-sync features
    features["mfcc"], features["hpcp"], features["tonnetz"] = \
        compute_beatsync_features(ticks, audio)

    # Save output as audio file
    if audio_beats:
        logging.info("Saving Beats as an audio file")
        marker = ES.AudioOnsetsMarker(onsets=ticks, type='beep',
                                      sampleRate=SAMPLE_RATE)
        marked_audio = marker(audio)
        ES.MonoWriter(filename='beats.wav',
                      sampleRate=SAMPLE_RATE)(marked_audio)

    # Save features
    with open(features_file, "w") as f:
        json.dump(features, f)

    return list_to_array(features)