How to use the mirdata.utils function in mirdata

To help you get started, we’ve selected a few mirdata examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mir-dataset-loaders / mirdata / tests / test_salami.py View on Github external
def test_load_sections():
    # load a file which exists
    sections_path = (
        'tests/resources/mir_datasets/Salami/'
        + 'salami-data-public-hierarchy-corrections/annotations/2/parsed/textfile1_uppercase.txt'
    )
    section_data = salami.load_sections(sections_path)

    # check types
    assert type(section_data) == utils.SectionData
    assert type(section_data.intervals) is np.ndarray
    assert type(section_data.labels) is list

    # check valuess
    assert np.array_equal(
        section_data.intervals[:, 0],
        np.array([0.0, 0.464399092, 14.379863945, 263.205419501]),
    )
    assert np.array_equal(
        section_data.intervals[:, 1],
        np.array([0.464399092, 14.379863945, 263.205419501, 264.885215419]),
    )
    assert np.array_equal(
        section_data.labels, np.array(['Silence', 'A', 'B', 'Silence'])
    )
github mir-dataset-loaders / mirdata / mirdata / rwc_classical.py View on Github external
def load_sections(sections_path):
    if not os.path.exists(sections_path):
        return None
    begs = []  # timestamps of section beginnings
    ends = []  # timestamps of section endings
    secs = []  # section labels

    with open(sections_path, 'r') as fhandle:
        reader = csv.reader(fhandle, delimiter='\t')
        for line in reader:
            begs.append(float(line[0]) / 100.0)
            ends.append(float(line[1]) / 100.0)
            secs.append(line[2])

    return utils.SectionData(np.array([begs, ends]).T, secs)
github mir-dataset-loaders / mirdata / mirdata / gtzan_genre.py View on Github external
from mirdata import track
from mirdata import utils


DATASET_DIR = "GTZAN-Genre"

REMOTES = {
    'all': download_utils.RemoteFileMetadata(
        filename="genres.tar.gz",
        url="http://opihi.cs.uvic.ca/sound/genres.tar.gz",
        checksum="5b3d6dddb579ab49814ab86dba69e7c7",
        destination_dir="gtzan_genre",
    )
}

DATA = utils.LargeData("gtzan_genre_index.json")


class Track(track.Track):
    """gtzan_genre Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): path to the audio file
        genre (str): annotated genre
        track_id (str): track id

    """
github mir-dataset-loaders / mirdata / mirdata / msd.py View on Github external
def _filename(track_id, data_home=None):
    if data_home is None:
        data_home = utils.get_default_dataset_path(DATASET_DIR)

    # format: 'data/J/D/V/TRJDVIB12903CF9F35.h5'
    return os.path.join(
        data_home, 'data', track_id[2], track_id[3], track_id[4], track_id + '.h5'
    )
github mir-dataset-loaders / mirdata / mirdata / medley_solos_db.py View on Github external
def download(data_home=None):
    """Download Medley-solos-DB.

    Args:
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`
    """
    if data_home is None:
        data_home = utils.get_default_dataset_path(DATASET_DIR)

    download_utils.downloader(
        data_home,
        tar_downloads=[AUDIO_REMOTE],
        file_downloads=[ANNOTATION_REMOTE],
        cleanup=True,
    )
github mir-dataset-loaders / mirdata / mirdata / tinysol.py View on Github external
def __init__(self, track_id, data_home=None):
        if track_id not in DATA.index:
            raise ValueError("{} is not a valid track ID in TinySOL".format(track_id))

        self.track_id = track_id

        if data_home is None:
            data_home = utils.get_default_dataset_path(DATASET_DIR)

        self._data_home = data_home
        self._track_paths = DATA.index[track_id]

        metadata = DATA.metadata(data_home)
        if metadata is not None and track_id in metadata:
            self._track_metadata = metadata[track_id]
        else:
            self._track_metadata = {
                "Family": None,
                "Instrument (abbr.)": None,
                "Instrument (in full)": None,
                "Technique (abbr.)": None,
                "Technique (in full)": None,
                "Pitch": None,
                "Pitch ID": None,