How to use the nilearn.datasets.utils._get_dataset_dir function in nilearn

To help you get started, we’ve selected a few nilearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nilearn / nilearn / nilearn / datasets / atlas.py View on Github external
def _get_talairach_all_levels(data_dir=None, verbose=1):
    """Get the path to Talairach atlas and labels

    The atlas is downloaded and the files are created if necessary.

    The image contains all five levels of the atlas, each encoded on 8 bits
    (least significant octet contains the hemisphere, the next one the lobe,
    then gyrus, tissue, and ba).

    The labels json file contains
    [['level name', ['labels', 'for', 'this', 'level' ...]], ...],
    where the levels are in the order mentionned above.

    """
    data_dir = _get_dataset_dir(
        'talairach_atlas', data_dir=data_dir, verbose=verbose)
    img_file = os.path.join(data_dir, 'talairach.nii')
    labels_file = os.path.join(data_dir, 'talairach_labels.json')
    if os.path.isfile(img_file) and os.path.isfile(labels_file):
        return img_file, labels_file
    atlas_url = 'http://www.talairach.org/talairach.nii'
    temp_dir = mkdtemp()
    try:
        temp_file = _fetch_files(
            temp_dir, [('talairach.nii', atlas_url, {})], verbose=verbose)[0]
        atlas_img = nb.load(temp_file, mmap=False)
        atlas_img = check_niimg(atlas_img)
    finally:
        shutil.rmtree(temp_dir)
    labels = atlas_img.header.extensions[0].get_content()
    labels = labels.strip().decode('utf-8').split('\n')
github nilearn / nilearn / nilearn / datasets / func.py View on Github external
'RHV2d.nii.gz',
        'RHV2v.nii.gz',
        'RHV3A.nii.gz',
        'RHV3.nii.gz',
        'RHV4v.nii.gz',
        'RHVP.nii.gz'
    ]

    file_mask = [(os.path.join('mask', m), url, opts) for m in file_mask]

    file_names = func_figure + func_random + \
                 label_figure + label_random + \
                 file_mask

    dataset_name = 'miyawaki2008'
    data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
                                verbose=verbose)
    files = _fetch_files(data_dir, file_names, resume=resume, verbose=verbose)

    # Fetch the background image
    bg_img = _fetch_files(data_dir, [('bg.nii.gz', url, opts)], resume=resume,
                          verbose=verbose)[0]

    fdescr = _get_dataset_descr(dataset_name)

    # Return the data
    return Bunch(
        func=files[:32],
        label=files[32:64],
        mask=files[64],
        mask_roi=files[65:],
        background=bg_img,
github neurospin / pypreprocess / pypreprocess / datasets.py View on Github external
glob it.

    Returns
    -------
    data: sklearn.datasets.base.Bunch
        Dictionary-like object, the interest attributes are:
        - 'func': string list. Paths to functional images
        - 'anat': string list. Path to anat image

    References
    ----------
    :download:
        http://www.fil.ion.ucl.ac.uk/spm/data/auditory/

    """
    data_dir = _get_dataset_dir(data_name, data_dir=data_dir,
                                verbose=verbose)
    subject_dir = os.path.join(data_dir, subject_id)

    def _glob_spm_auditory_data():
        """glob data from subject_dir.

        """

        if not os.path.exists(subject_dir):
            return None

        subject_data = {}
        for file_name in SPM_AUDITORY_DATA_FILES:
            file_path = os.path.join(subject_dir, file_name)
            if os.path.exists(file_path):
                subject_data[file_name] = file_path
github nilearn / nilearn / nilearn / datasets / neurovault.py View on Github external
def _fetch_neurovault_implementation(
    max_images=_DEFAULT_MAX_IMAGES, collection_terms=basic_collection_terms(),
    collection_filter=_empty_filter, image_terms=basic_image_terms(),
    image_filter=_empty_filter, collection_ids=None, image_ids=None,
    mode='download_new', data_dir=None, fetch_neurosynth_words=False,
        vectorize_words=True, verbose=3, **kwarg_image_filters):
    """Download data from neurovault.org and neurosynth.org."""
    image_terms = dict(image_terms, **kwarg_image_filters)
    neurovault_data_dir = _get_dataset_dir('neurovault', data_dir)
    if mode != 'offline' and not os.access(neurovault_data_dir, os.W_OK):
        warnings.warn("You don't have write access to neurovault dir: {0}; "
                      "fetch_neurovault is working offline.".format(
                          neurovault_data_dir))
        mode = 'offline'

    download_params = _read_download_params(
        neurovault_data_dir, download_mode=mode,
        collection_terms=collection_terms,
        collection_filter=collection_filter, image_terms=image_terms,
        image_filter=image_filter, wanted_collection_ids=collection_ids,
        wanted_image_ids=image_ids, max_images=max_images, verbose=verbose,
        fetch_neurosynth_words=fetch_neurosynth_words,
        vectorize_words=vectorize_words)
    download_params = _prepare_download_params(download_params)
github nilearn / nilearn / nilearn / datasets / struct.py View on Github external
def _fetch_surf_fsaverage5_sphere(data_dir=None):
    """Helper function to ship fsaverage5 spherical meshes.

    These meshes can be used for visualization purposes, but also to run
    cortical surface-based searchlight decoding.

    The source of the data is downloaded from OSF.
    """

    fsaverage_dir = _get_dataset_dir('fsaverage', data_dir=data_dir)
    dataset_dir = _get_dataset_dir('fsaverage5_sphere', data_dir=fsaverage_dir)
    url = 'https://osf.io/b79fy/download'
    opts = {'uncompress': True}
    names = ['sphere_right', 'sphere_left']
    filenames = [('{}.gii'.format(name), url, opts)
                 for name in names]
    _fetch_files(dataset_dir, filenames)
    result = {
        name: os.path.join(dataset_dir, '{}.gii'.format(name))
        for name in names}

    result['description'] = str(_get_dataset_descr('fsaverage5_sphere'))
    return Bunch(**result)
github nistats / nistats / nistats / datasets.py View on Github external
glob it.

    Returns
    -------
    data: sklearn.datasets.base.Bunch
        Dictionary-like object, the interest attributes are:
        - 'func': string list. Paths to functional images
        - 'anat': string list. Path to anat image

    References
    ----------
    :download:
        http://www.fil.ion.ucl.ac.uk/spm/data/auditory/

    """
    data_dir = _get_dataset_dir(data_name, data_dir=data_dir,
                                verbose=verbose)
    subject_dir = os.path.join(data_dir, subject_id)
    if not os.path.exists(subject_dir):
        _download_spm_auditory_data(data_dir, subject_dir, subject_id)
    spm_auditory_data = _prepare_downloaded_spm_auditory_data(subject_dir)
    try:
        spm_auditory_data['events']
    except KeyError:
        events_filepath = _make_path_events_file_spm_auditory_data(
            spm_auditory_data)
        if not os.path.isfile(events_filepath):
            _make_events_file_spm_auditory_data(events_filepath)
        spm_auditory_data['events'] = events_filepath
    return spm_auditory_data
github nilearn / nilearn / nilearn / datasets / atlas.py View on Github external
base_url = ('https://raw.githubusercontent.com/ThomasYeoLab/CBIG/'
                    'v0.14.3-Update_Yeo2011_Schaefer2018_labelname/'
                    'stable_projects/brain_parcellation/'
                    'Schaefer2018_LocalGlobal/Parcellations/MNI/'
                    )

    files = []
    labels_file_template = 'Schaefer2018_{}Parcels_{}Networks_order.txt'
    img_file_template = ('Schaefer2018_{}Parcels_'
                         '{}Networks_order_FSLMNI152_{}mm.nii.gz')
    for f in [labels_file_template.format(n_rois, yeo_networks),
              img_file_template.format(n_rois, yeo_networks, resolution_mm)]:
        files.append((f, base_url + f, {}))

    dataset_name = 'schaefer_2018'
    data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
                                verbose=verbose)
    labels_file, atlas_file = _fetch_files(data_dir, files, resume=resume,
                                           verbose=verbose)

    labels = np.genfromtxt(labels_file, usecols=1, dtype="S", delimiter="\t")
    fdescr = _get_dataset_descr(dataset_name)

    return Bunch(maps=atlas_file,
                 labels=labels,
                 description=fdescr)
github nilearn / nilearn / nilearn / datasets / func.py View on Github external
n_subjects = 25

    anat_anon = []
    anat_skull = []
    func = []
    session = []
    for i in sessions:
        if not (i in [1, 2, 3]):
            raise ValueError('NYU dataset session id must be in [1, 2, 3]')
        anat_anon += anat_anon_files[i - 1][:n_subjects]
        anat_skull += anat_skull_files[i - 1][:n_subjects]
        func += func_files[i - 1][:n_subjects]
        session += [i] * n_subjects

    dataset_name = 'nyu_rest'
    data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
                                verbose=verbose)
    anat_anon = _fetch_files(data_dir, anat_anon, resume=resume,
                             verbose=verbose)
    anat_skull = _fetch_files(data_dir, anat_skull, resume=resume,
                              verbose=verbose)
    func = _fetch_files(data_dir, func, resume=resume,
                        verbose=verbose)

    fdescr = _get_dataset_descr(dataset_name)

    return Bunch(anat_anon=anat_anon, anat_skull=anat_skull, func=func,
                 session=session, description=fdescr)
github nilearn / nilearn / nilearn / datasets / atlas.py View on Github external
pp. 562-573, Lecture Notes in Computer Science.

    :Other references:
        `Learning and comparing functional connectomes across subjects
        `_.
        Gael Varoquaux, R.C. Craddock NeuroImage, 2013.

    """
    url = 'https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip'
    opts = {'uncompress': True}

    dataset_name = "msdl_atlas"
    files = [(os.path.join('MSDL_rois', 'msdl_rois_labels.csv'), url, opts),
             (os.path.join('MSDL_rois', 'msdl_rois.nii'), url, opts)]

    data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
                                verbose=verbose)
    files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
    csv_data = np.recfromcsv(files[0])
    labels = [name.strip() for name in csv_data['name'].tolist()]
    labels = [label.decode("utf-8") for label in labels]
    with warnings.catch_warnings():
        warnings.filterwarnings('ignore', module='numpy',
                                category=FutureWarning)
        region_coords = csv_data[['x', 'y', 'z']].tolist()
    net_names = [net_name.strip() for net_name in csv_data['net_name'].tolist()]
    fdescr = _get_dataset_descr(dataset_name)

    return Bunch(maps=files[1], labels=labels, region_coords=region_coords,
                 networks=net_names, description=fdescr)