How to use the nnmnkwii.datasets.vctk function in nnmnkwii

To help you get started, we’ve selected a few nnmnkwii examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github r9y9 / nnmnkwii / tests / test_real_datasets.py View on Github external
def test_vctk_dummy():
    assert len(vctk.available_speakers) == 108
    data_sources = [vctk.TranscriptionDataSource,
                    vctk.WavFileDataSource]

    for data_source in data_sources:
        @raises(RuntimeError)
        def f(source):
            source("dummy")

        f(data_source)
github r9y9 / nnmnkwii / tests / test_real_datasets.py View on Github external
X = FileSourceDataset(data_source)
    assert X[0] == "Please call Stella."
    n_225 = len(X)

    data_source = MyTextDataSource(DATA_DIR, speakers=["p228"])
    X = FileSourceDataset(data_source)
    assert X[0] == "Please call Stella."
    n_228 = len(X)

    # multiple speakers
    data_source = MyTextDataSource(DATA_DIR, speakers=["225", "228"])
    X = FileSourceDataset(data_source)
    assert len(X) == n_225 + n_228

    # All speakers
    data_source = MyTextDataSource(DATA_DIR, speakers=vctk.available_speakers)
    X = FileSourceDataset(data_source)
    assert X[0] == "Please call Stella."
    assert len(X) == 44085

    # Speaker labels
    data_source = MyTextDataSource(DATA_DIR, speakers=["225", "228"])
    X = FileSourceDataset(data_source)
    labels = data_source.labels
    assert len(X) == len(labels)
    assert (labels[:n_225] == 0).all()
    assert (labels[n_225:] == 1).all()

    # max files
    max_files = 16
    data_source = MyTextDataSource(DATA_DIR, speakers=["225", "228"], max_files=max_files)
    X = FileSourceDataset(data_source)
github hash2430 / dv3_world / vctk.py View on Github external
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
    executor = ProcessPoolExecutor(max_workers=num_workers)
    futures = []

    speakers = vctk.available_speakers

    td = vctk.TranscriptionDataSource(in_dir, speakers=speakers)
    transcriptions = td.collect_files()
    speaker_ids = td.labels
    speaker_ids_unique = np.unique(speaker_ids)
    speaker_to_speaker_id = {}
    for i, j in zip(speakers, speaker_ids_unique):
        speaker_to_speaker_id[i] = j
    wav_paths = vctk.WavFileDataSource(
        in_dir, speakers=speakers).collect_files()

    _ignore_speaker = hparams.not_for_train_speaker.split(", ")
    ignore_speaker = [speaker_to_speaker_id[i] for i in _ignore_speaker]
    for index, (speaker_id, text, wav_path) in enumerate(
            zip(speaker_ids, transcriptions, wav_paths)):
        if speaker_id in ignore_speaker:
github r9y9 / deepvoice3_pytorch / vctk.py View on Github external
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
    executor = ProcessPoolExecutor(max_workers=num_workers)
    futures = []

    speakers = vctk.available_speakers

    td = vctk.TranscriptionDataSource(in_dir, speakers=speakers)
    transcriptions = td.collect_files()
    speaker_ids = td.labels
    wav_paths = vctk.WavFileDataSource(
        in_dir, speakers=speakers).collect_files()

    for index, (speaker_id, text, wav_path) in enumerate(
            zip(speaker_ids, transcriptions, wav_paths)):
        futures.append(executor.submit(
            partial(_process_utterance, out_dir, index + 1, speaker_id, wav_path, text)))
    return [future.result() for future in tqdm(futures)]
github Sharad24 / Neural-Voice-Cloning-with-Few-Samples / deepvoice3_pytorch / vctk.py View on Github external
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
    executor = ProcessPoolExecutor(max_workers=num_workers)
    futures = []

    speakers = vctk.available_speakers

    td = vctk.TranscriptionDataSource(in_dir, speakers=speakers)
    transcriptions = td.collect_files()
    speaker_ids = td.labels
    wav_paths = vctk.WavFileDataSource(
        in_dir, speakers=speakers).collect_files()

    for index, (speaker_id, text, wav_path) in enumerate(
            zip(speaker_ids, transcriptions, wav_paths)):
        futures.append(executor.submit(
            partial(_process_utterance, out_dir, index + 1, speaker_id, wav_path, text)))
    return [future.result() for future in tqdm(futures)]
github SforAiDl / Neural-Voice-Cloning-With-Few-Samples / dv3 / vctk_preprocess / prepare_vctk_labels.py View on Github external
from subprocess import Popen, PIPE
from tqdm import tqdm


def do(cmd):
    print(cmd)
    p = Popen(cmd, shell=True)
    p.wait()


if __name__ == "__main__":
    args = docopt(__doc__)
    data_root = args[""]
    out_dir = args[""]

    for idx in tqdm(range(len(vctk.available_speakers))):
        speaker = vctk.available_speakers[idx]

        wav_root = join(data_root, "wav48/p{}".format(speaker))
        txt_root = join(data_root, "txt/p{}".format(speaker))
        assert exists(wav_root)
        assert exists(txt_root)
        print(wav_root, txt_root)

        # Do alignments
        cmd = "python ./extract_feats.py -w {} -t {}".format(wav_root, txt_root)
        do(cmd)

        # Copy
        lab_dir = join(out_dir, "p{}".format(speaker))
        if not exists(lab_dir):
            os.makedirs(lab_dir)
github r9y9 / deepvoice3_pytorch / vctk_preprocess / prepare_htk_alignments_vctk.py View on Github external
parser.set_defaults(disfluency=False)
    parser.add_argument(
        '--log', default="INFO",
        help='the log level (DEBUG, INFO, WARNING, ERROR, or CRITICAL)')
    parser.add_argument('data_root', type=str, help='Data root')

    args = parser.parse_args()

    log_level = args.log.upper()
    logging.getLogger().setLevel(log_level)
    disfluencies = set(['uh', 'um'])

    data_root = args.data_root

    # Do for all speakers
    speakers = vctk.available_speakers

    # Collect all transcripts/wav files
    td = vctk.TranscriptionDataSource(data_root, speakers=speakers)
    transcriptions = td.collect_files()
    wav_paths = vctk.WavFileDataSource(
        data_root, speakers=speakers).collect_files()

    # Save dir
    save_dir = join(data_root, "lab")
    if not exists(save_dir):
        os.makedirs(save_dir)

    resources = gentle.Resources()

    for idx in tqdm(range(len(wav_paths))):
        transcript = transcriptions[idx]