How to use the mirdata.track.Track function in mirdata

To help you get started, we’ve selected a few mirdata examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mir-dataset-loaders / mirdata / tests / test_track.py View on Github external
def h(self):
            return "I'm a function!"

    expected1 = """Track(\n  a="asdf",\n  b=1.2345678,\n  """
    expected2 = """c={1: 'a', 'b': 2},\n  e=None,\n  """
    expected3 = """long="...{}",\n  """.format('b' * 50 + 'c' * 50)
    expected4 = """f: ThisObjectType,\n  g: I have an improper docstring,\n)"""

    test_track = TestTrack()
    actual = test_track.__repr__()
    assert actual == expected1 + expected2 + expected3 + expected4

    with pytest.raises(NotImplementedError):
        test_track.to_jams()

    class NoDocsTrack(track.Track):
        @property
        def no_doc(self):
            return "whee!"

    bad_track = NoDocsTrack()
    with pytest.raises(ValueError):
        bad_track.__repr__()
github mir-dataset-loaders / mirdata / mirdata / maestro.py View on Github external
raw_metadata = json.load(fhandle)

    metadata = {}
    for mdata in raw_metadata:
        track_id = mdata['midi_filename'].split('.')[0]
        metadata[track_id] = mdata

    metadata['data_home'] = data_home

    return metadata


DATA = utils.LargeData('maestro_index.json', _load_metadata)


class Track(track.Track):
    """MAESTRO Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): Path to the track's audio file
        canonical_composer (str): Composer of the piece, standardized on a
            single spelling for a given name.
        canonical_title (str): Title of the piece. Not guaranteed to be
            standardized to a single representation.
        duration (float): Duration in seconds, based on the MIDI file.
        midi_path (str): Path to the track's MIDI file
        split (str): Suggested train/validation/test split.
github mir-dataset-loaders / mirdata / mirdata / gtzan_genre.py View on Github external
DATASET_DIR = "GTZAN-Genre"

REMOTES = {
    'all': download_utils.RemoteFileMetadata(
        filename="genres.tar.gz",
        url="http://opihi.cs.uvic.ca/sound/genres.tar.gz",
        checksum="5b3d6dddb579ab49814ab86dba69e7c7",
        destination_dir="gtzan_genre",
    )
}

DATA = utils.LargeData("gtzan_genre_index.json")


class Track(track.Track):
    """gtzan_genre Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): path to the audio file
        genre (str): annotated genre
        track_id (str): track id

    """

    def __init__(self, track_id, data_home=None):
        if track_id not in DATA.index:
github mir-dataset-loaders / mirdata / mirdata / rwc_classical.py View on Github external
'title': line[3],
            'composer': line[4],
            'artist': line[5],
            'duration': _duration_to_sec(line[6]),
            'category': line[7],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('rwc_classical_index.json', _load_metadata)


class Track(track.Track):
    """rwc_classical Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        artist (str): the track's artist
        audio_path (str): path of the audio file
        beats_path (str): path of the beat annotation file
        category (str): One of 'Symphony', 'Concerto', 'Orchestral',
            'Solo', 'Chamber', 'Vocal', or blank.
        composer (str): Composer of this Track.
        duration (float): Duration of the track in seconds
        piece_number (str): Piece number of this Track, [1-50]
github mir-dataset-loaders / mirdata / mirdata / orchset.py View on Github external
'only_winds': tf_dict[line[7]],
            'only_brass': tf_dict[line[8]],
            'composer': id_split[0],
            'work': '-'.join(id_split[1:-1]),
            'excerpt': id_split[-1][2:],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('orchset_index.json', _load_metadata)


class Track(track.Track):
    """orchset Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        alternating_melody (bool): True if the melody alternates between instruments
        audio_path_mono (str): path to the mono audio file
        audio_path_stereo (str): path to the stereo audio file
        composer (str): the work's composer
        contains_brass (bool): True if the track contains any brass instrument
        contains_strings (bool): True if the track contains any string instrument
        contains_winds (bool): True if the track contains any wind instrument
        excerpt (str): True if the track is an excerpt
github mir-dataset-loaders / mirdata / mirdata / salami.py View on Github external
'artist': line[8],
            'annotator_1_time': line[10],
            'annotator_2_time': line[11],
            'class': line[14],
            'genre': line[15],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('salami_index.json', _load_metadata)


class Track(track.Track):
    """salami Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        annotator_1_id (str): number that identifies annotator 1
        annotator_1_time (str): time that the annotator 1 took to complete the annotation
        annotator_2_id (str): number that identifies annotator 1
        annotator_2_time (str): time that the annotator 1 took to complete the annotation
        artist (str): song artist
        audio_path (str): path to the audio file
        broad_genre (str): broad genre of the song
        duration (float): duration of song in seconds
github mir-dataset-loaders / mirdata / mirdata / medleydb_melody.py View on Github external
if not os.path.exists(metadata_path):
        logging.info('Metadata file {} not found.'.format(metadata_path))
        return None

    with open(metadata_path, 'r') as fhandle:
        metadata = json.load(fhandle)

    metadata['data_home'] = data_home
    return metadata


DATA = utils.LargeData('medleydb_melody_index.json', _load_metadata)


class Track(track.Track):
    """medleydb_melody Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        artist (str): artist
        audio_path (str): path to the audio file
        genre (str): genre
        is_excerpt (bool): True if the track is an excerpt
        is_instrumental (bool): True of the track does not contain vocals
        melody1_path (str): path to the melody1 annotation file
        melody2_path (str): path to the melody2 annotation file
        melody3_path (str): path to the melody3 annotation file
github mir-dataset-loaders / mirdata / mirdata / ikala.py View on Github external
reader = csv.reader(fhandle, delimiter='\t')
        singer_map = {}
        for line in reader:
            if line[0] == 'singer':
                continue
            singer_map[line[1]] = line[0]

    singer_map['data_home'] = data_home

    return singer_map


DATA = utils.LargeData('ikala_index.json', _load_metadata)


class Track(track.Track):
    """ikala Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): path to the track's audio file
        f0_path (str): path to the track's f0 annotation file
        lyrics_path (str): path to the track's lyric annotation file
        section (str): section. Either 'verse' or 'chorus'
        singer_id (str): singer id
        song_id (str): song id of the track
        track_id (str): track id
github mir-dataset-loaders / mirdata / mirdata / dali.py View on Github external
def _load_metadata(data_home):
    metadata_path = os.path.join(data_home, os.path.join('dali_metadata.json'))
    if not os.path.exists(metadata_path):
        logging.info('Metadata file {} not found.'.format(metadata_path))
        return None
    with open(metadata_path, 'r') as fhandle:
        metadata_index = json.load(fhandle)

    metadata_index['data_home'] = data_home
    return metadata_index


DATA = utils.LargeData('dali_index.json', _load_metadata)


class Track(track.Track):
    """DALI melody Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        album (str): the track's album
        annotation_path (str): path to the track's annotation file
        artist (str): the track's artist
        audio_path (str): path to the track's audio file
        audio_url (str): youtube ID
        dataset_version (int): dataset annotation version
        ground_truth (bool): True if the annotation is verified
        language (str): sung language
github mir-dataset-loaders / mirdata / mirdata / rwc_jazz.py View on Github external
'title': line[3],
            'artist': line[4],
            'duration': _duration_to_sec(line[5]),
            'variation': line[6],
            'instruments': line[7],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('rwc_jazz_index.json', _load_metadata)


class Track(track.Track):
    """rwc_jazz Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        artist (str): Artist name
        audio_path (str): path of the audio file
        beats_path (str): path of the beat annotation file
        duration (float): Duration of the track in seconds
        instruments (str): list of used instruments.
        piece_number (str): Piece number of this Track, [1-50]
        sections_path (str): path of the section annotation file
        suffix (str): M01-M04