How to use mirdata - 10 common examples

To help you get started, we’ve selected a few mirdata examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mir-dataset-loaders / mirdata / tests / test_rwc_jazz.py View on Github external
+ 'annotations/AIST.RWC-MDB-J-2001.CHORUS/RM-J004.CHORUS.TXT',
        'beats_path': 'tests/resources/mir_datasets/RWC-Jazz/'
            + 'annotations/AIST.RWC-MDB-J-2001.BEAT/RM-J004.BEAT.TXT',
        'piece_number': 'No. 4',
        'suffix': 'M01',
        'track_number': 'Tr. 04',
        'title': 'Crescent Serenade (Piano Solo)',
        'artist': 'Makoto Nakamura',
        'duration': 167,
        'variation': 'Instrumentation 1',
        'instruments': 'Pf',
    }

    expected_property_types = {
        'beats': utils.BeatData,
        'sections': utils.SectionData
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2,)
github mir-dataset-loaders / mirdata / tests / test_guitarset.py View on Github external
'audio_hex_path': 'tests/resources/mir_datasets/GuitarSet/'
            + 'audio_hex-pickup_original/03_BN3-119-G_solo_hex.wav',
        'audio_mic_path': 'tests/resources/mir_datasets/GuitarSet/'
            + 'audio_mono-mic/03_BN3-119-G_solo_mic.wav',
        'audio_mix_path': 'tests/resources/mir_datasets/GuitarSet/'
            + 'audio_mono-pickup_mix/03_BN3-119-G_solo_mix.wav',
        'jams_path': 'tests/resources/mir_datasets/GuitarSet/'
            + 'annotation/03_BN3-119-G_solo.jams',
        'player_id': '03',
        'tempo': 119,
        'mode': 'solo',
        'style': 'Bossa Nova',
    }

    expected_property_types = {
        'beats': utils.BeatData,
        'leadsheet_chords': utils.ChordData,
        'inferred_chords': utils.ChordData,
        'key_mode': utils.KeyData,
        'pitch_contours': dict,
        'notes': dict,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    assert type(track.pitch_contours['E']) is utils.F0Data
    assert type(track.notes['E']) is utils.NoteData
github mir-dataset-loaders / mirdata / tests / test_salami.py View on Github external
def test_load_sections():
    # load a file which exists
    sections_path = (
        'tests/resources/mir_datasets/Salami/'
        + 'salami-data-public-hierarchy-corrections/annotations/2/parsed/textfile1_uppercase.txt'
    )
    section_data = salami.load_sections(sections_path)

    # check types
    assert type(section_data) == utils.SectionData
    assert type(section_data.intervals) is np.ndarray
    assert type(section_data.labels) is list

    # check valuess
    assert np.array_equal(
        section_data.intervals[:, 0],
        np.array([0.0, 0.464399092, 14.379863945, 263.205419501]),
    )
    assert np.array_equal(
        section_data.intervals[:, 1],
        np.array([0.464399092, 14.379863945, 263.205419501, 264.885215419]),
    )
    assert np.array_equal(
        section_data.labels, np.array(['Silence', 'A', 'B', 'Silence'])
    )
github mir-dataset-loaders / mirdata / tests / test_jams_utils.py View on Github external
]
    f0_data_6 = [(None, None)]
    f0_data_7 = [
        (
            utils.EventData(
                np.array([0.2, 0.3]),
                np.array([0.3, 0.4]),
                np.array(['event A', 'event B']),
            ),
            None,
        )
    ]

    jam_1 = jams_utils.jams_converter(f0_data=f0_data_1)
    jam_2 = jams_utils.jams_converter(f0_data=f0_data_2)
    jam_3 = jams_utils.jams_converter(f0_data=f0_data_3)
    jam_6 = jams_utils.jams_converter(f0_data=f0_data_6)

    time, duration, value, confidence = get_jam_data(jam_1, 'pitch_contour', 0)
    assert time == [0.016, 0.048]
    assert duration == [0.0, 0.0]
    assert value == [
        {'frequency': 0.0, 'index': 0, 'voiced': False},
        {'frequency': 260.9, 'index': 0, 'voiced': True},
    ]
    assert confidence == [0.0, 1.0]

    assert jam_2.annotations[0]['sandbox']['name'] == 'f0s_1'

    time, duration, value, confidence = get_jam_data(jam_3, 'pitch_contour', 0)
    assert time == [0.016, 0.048]
    assert duration == [0.0, 0.0]
github mir-dataset-loaders / mirdata / tests / test_jams_utils.py View on Github external
jam_1 = jams_utils.jams_converter(lyrics_data=[(None, None)], metadata=metadata_1)

    assert jam_1['file_metadata']['title'] == 'Le ciel est blue'
    assert jam_1['file_metadata']['artist'] == 'Meatloaf'
    assert jam_1['file_metadata']['duration'] == 1.5
    assert jam_1['sandbox']['favourite_color'] == 'rainbow'

    # test meatadata value None
    metadata_2 = {
        'duration': 1.5,
        'artist': 'breakmaster cylinder',
        'title': None,
        'extra': None,
    }
    jam2 = jams_utils.jams_converter(metadata=metadata_2)
    assert jam2.validate()
    assert jam2['file_metadata']['duration'] == 1.5
    assert jam2['file_metadata']['artist'] == 'breakmaster cylinder'
    assert jam2['file_metadata']['title'] == ''
    assert 'extra' not in jam2['sandbox']
github mir-dataset-loaders / mirdata / tests / test_jams_utils.py View on Github external
def test_metadata():
    metadata_1 = {
        'duration': 1.5,
        'artist': 'Meatloaf',
        'title': 'Le ciel est blue',
        'favourite_color': 'rainbow',
    }

    jam_1 = jams_utils.jams_converter(lyrics_data=[(None, None)], metadata=metadata_1)

    assert jam_1['file_metadata']['title'] == 'Le ciel est blue'
    assert jam_1['file_metadata']['artist'] == 'Meatloaf'
    assert jam_1['file_metadata']['duration'] == 1.5
    assert jam_1['sandbox']['favourite_color'] == 'rainbow'

    # test meatadata value None
    metadata_2 = {
        'duration': 1.5,
        'artist': 'breakmaster cylinder',
        'title': None,
        'extra': None,
    }
    jam2 = jams_utils.jams_converter(metadata=metadata_2)
    assert jam2.validate()
    assert jam2['file_metadata']['duration'] == 1.5
github mir-dataset-loaders / mirdata / tests / test_jams_utils.py View on Github external
assert duration == [0.227, 0.51]
    assert value == ['is', 'cool']
    assert confidence == [None, None]

    time, duration, value, confidence = get_jam_data(jam_6, 'lyrics', 0)
    assert time == []
    assert duration == []
    assert value == []
    assert confidence == []

    assert type(jam_1) == jams.JAMS

    with pytest.raises(TypeError):
        jams_utils.jams_converter(lyrics_data=lyrics_data_4)
    with pytest.raises(TypeError):
        jams_utils.jams_converter(lyrics_data=lyrics_data_5)
    with pytest.raises(TypeError):
        jams_utils.jams_converter(lyrics_data=lyrics_data_7)
github mir-dataset-loaders / mirdata / tests / test_jams_utils.py View on Github external
assert value == [
        {'frequency': 0.0, 'index': 0, 'voiced': False},
        {'frequency': 230.5, 'index': 0, 'voiced': True},
    ]
    assert confidence == [0.0, 1.0]

    time, duration, value, confidence = get_jam_data(jam_6, 'pitch_contour', 0)
    assert time == []
    assert duration == []
    assert value == []
    assert confidence == []

    assert type(jam_1) == jams.JAMS

    with pytest.raises(TypeError):
        jams_utils.jams_converter(f0_data=f0_data_4)
    with pytest.raises(TypeError):
        jams_utils.jams_converter(f0_data=f0_data_5)
    with pytest.raises(TypeError):
        jams_utils.jams_converter(f0_data=f0_data_7)
github mir-dataset-loaders / mirdata / tests / test_jams_utils.py View on Github external
],
    ]
    lyrics_data_6 = [(None, None)]
    lyrics_data_7 = [
        (
            utils.EventData(
                np.array([0.2, 0.3]),
                np.array([0.3, 0.4]),
                np.array(['event A', 'event B']),
            ),
            None,
        )
    ]

    jam_1 = jams_utils.jams_converter(lyrics_data=lyrics_data_1)
    jam_2 = jams_utils.jams_converter(lyrics_data=lyrics_data_2)
    jam_3 = jams_utils.jams_converter(lyrics_data=lyrics_data_3)
    jam_6 = jams_utils.jams_converter(lyrics_data=lyrics_data_6)

    time, duration, value, confidence = get_jam_data(jam_1, 'lyrics', 0)
    assert time == [0.027, 0.232]
    assert duration == [0.2, 0.51]
    assert value == ['The', 'Test']
    assert confidence == [None, None]

    assert jam_2.annotations[0]['sandbox']['name'] == 'lyrics_1'

    time, duration, value, confidence = get_jam_data(jam_3, 'lyrics', 0)
    assert time == [0.027, 0.232]
    assert duration == [0.2, 0.51]
    assert value == ['The', 'Test']
    assert confidence == [None, None]
github mir-dataset-loaders / mirdata / tests / test_groove_midi.py View on Github external
def test_download(httpserver):
    data_home = 'tests/resources/mir_datasets/Groove-MIDI_download'
    if os.path.exists(data_home):
        shutil.rmtree(data_home)

    httpserver.serve_content(
        open('tests/resources/download/groove-v1-0.0.zip', 'rb').read()
    )

    groove_midi.REMOTES = {
        'all': download_utils.RemoteFileMetadata(
            filename='groove-v1-0.0.zip',
            url=httpserver.url,
            checksum=('97a9a888d2a65cc87bb26e74df08b011'),
            destination_dir=None,
        )
    }
    groove_midi.download(data_home=data_home)

    assert os.path.exists(data_home)
    assert not os.path.exists(os.path.join(data_home, 'groove'))

    assert os.path.exists(os.path.join(data_home, "info.csv"))
    track = groove_midi.Track('drummer1/eval_session/1', data_home=data_home)
    assert os.path.exists(track.midi_path)
    assert os.path.exists(track.audio_path)