How to use nilearn - 10 common examples

To help you get started, we’ve selected a few nilearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ME-ICA / tedana / tedana / io.py View on Github external
raise ValueError('Number of echos must be specified. '
                         'Confirm that TE times are provided with the `-e` argument.')

    if isinstance(data, list):
        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError('Cannot run `tedana` with only two echos: '
                             '{}'.format(data))
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([utils.load_image(f) for f in data], axis=1)
            ref_img = check_niimg(data[0])
            ref_img.header.extensions = []
            return np.atleast_3d(fdata), ref_img

    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = utils.load_image(img.get_data().reshape(nx, ny, nz, n_echos, -1, order='F'))
    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz, 1)), affine=img.affine,
                            header=img.header, extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
github nilearn / nilearn / nilearn / input_data / nifti_masker.py View on Github external
"""
        try:
            from nilearn import plotting
        except ImportError:
            with warnings.catch_warnings():
                mpl_unavail_msg = ('Matplotlib is not imported! '
                                'No reports will be generated.')
                warnings.filterwarnings('always', message=mpl_unavail_msg)
                warnings.warn(category=ImportWarning,
                            message=mpl_unavail_msg)
                return [None]

        img = self._reporting_data['images']
        mask = self._reporting_data['mask']
        if img is not None:
            dim = image.load_img(img).shape
            if len(dim) == 4:
                # compute middle image from 4D series for plotting
                img = image.index_img(img, dim[-1] // 2)
        else:  # images were not provided to fit
            img = mask

        # create display of retained input mask, image
        # for visual comparison
        init_display = plotting.plot_img(img,
                                         black_bg=False,
                                         cmap='CMRmap_r')
        init_display.add_contours(mask, levels=[.5], colors='g',
                                  linewidths=2.5)

        if 'transform' not in self._reporting_data:
            return [init_display]
github nilearn / nilearn / nilearn / input_data / nifti_masker.py View on Github external
linewidths=2.5)

        if 'transform' not in self._reporting_data:
            return [init_display]

        else:  # if resampling was performed
            self._report_description = (self._report_description +
                                        self._overlay_text)

            # create display of resampled NiftiImage and mask
            # assuming that resampl_img has same dim as img
            resampl_img, resampl_mask = self._reporting_data['transform']
            if resampl_img is not None:
                if len(dim) == 4:
                    # compute middle image from 4D series for plotting
                    resampl_img = image.index_img(resampl_img, dim[-1] // 2)
            else:  # images were not provided to fit
                resampl_img = resampl_mask

            final_display = plotting.plot_img(resampl_img,
                                              black_bg=False,
                                              cmap='CMRmap_r')
            final_display.add_contours(resampl_mask, levels=[.5],
                                       colors='g', linewidths=2.5)

        return [init_display, final_display]
github nistats / nistats / examples / 06_second_level_models_non_parametric_tests / plot_oasis.py View on Github external
from nilearn.image import math_img
from nilearn.input_data import NiftiMasker
p_val = second_level_model.compute_contrast(second_level_contrast='age',
                                            output_type='p_value')
masker = NiftiMasker(mask_img=mask_img).fit(p_val)
n_voxel = np.size(masker.transform(p_val))
# Correcting the p-values for multiple testing and taking neg log
neg_log_pval = math_img("-np.log10(np.minimum(1, img * {}))"
                        .format(str(n_voxel)),
                        img=p_val)

###########################################################################
# Let us plot the second level contrast
from nilearn import plotting
cut_coords = [-4, 26]
display = plotting.plot_stat_map(
    neg_log_pval, colorbar=True, display_mode='z', cut_coords=cut_coords)
plotting.show()

##############################################################################
# Computing the (corrected) p-values with permutation test
from nistats.second_level_model import non_parametric_inference
neg_log_pvals_permuted_ols_unmasked = \
    non_parametric_inference(gray_matter_map_filenames,
                             design_matrix=design_matrix,
                             second_level_contrast='age',
                             model_intercept=True, n_perm=1000,
                             two_sided_test=False, mask=mask_img,
                             smoothing_fwhm=2.0, n_jobs=1)

###########################################################################
# Let us plot the second level contrast
github nistats / nistats / nistats / reporting / _visual_testing / _glm_reporter_visual_inspection_suite_.py View on Github external
def report_flm_fiac():  # pragma: no cover
    data = nistats_datasets.fetch_fiac_first_level()
    fmri_img = [data['func1'], data['func2']]

    from nilearn.image import mean_img
    mean_img_ = mean_img(fmri_img[0])

    design_files = [data['design_matrix1'], data['design_matrix2']]
    design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]

    fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
    fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)

    n_columns = design_matrices[0].shape[1]

    contrasts = {
            'SStSSp_minus_DStDSp': _pad_vector([1, 0, 0, -1], n_columns),
            'DStDSp_minus_SStSSp': _pad_vector([-1, 0, 0, 1], n_columns),
            'DSt_minus_SSt': _pad_vector([-1, -1, 1, 1], n_columns),
            'DSp_minus_SSp': _pad_vector([-1, 1, -1, 1], n_columns),
            'DSt_minus_SSt_for_DSp': _pad_vector([0, -1, 0, 1], n_columns),
            'DSp_minus_SSp_for_DSt': _pad_vector([0, 0, -1, 1], n_columns),
github nistats / nistats / nistats / reporting / _visual_testing / _glm_reporter_visual_inspection_suite_.py View on Github external
def report_flm_adhd_dmn():  # pragma: no cover
    t_r = 2.
    slice_time_ref = 0.
    n_scans = 176
    pcc_coords = (0, -53, 26)
    adhd_dataset = nilearn.datasets.fetch_adhd(n_subjects=1)
    seed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True,
                                     standardize=True, low_pass=0.1,
                                     high_pass=0.01, t_r=2.,
                                     memory='nilearn_cache',
                                     memory_level=1, verbose=0)
    seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
    frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
    design_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm',
                                                   add_regs=seed_time_series,
                                                   add_reg_names=["pcc_seed"])
    dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
    contrasts = {'seed_based_glm': dmn_contrast}

    first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
    first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
                                              design_matrices=design_matrix)
github nilearn / nilearn / nilearn / _utils / testing.py View on Github external
def mock_chunk_read_(response, local_file, initial_size=0, chunk_size=8192,
                         report_hook=None, verbose=0):
        if not isinstance(response, _basestring):
            return _chunk_read_(response, local_file,
                                initial_size=initial_size,
                                chunk_size=chunk_size,
                                report_hook=report_hook, verbose=verbose)
        return response
    return mock_chunk_read_
github arthurmensch / cogspaces / scripts / unmask.py View on Github external
def unmask(data_dir=None, unmasked_dir='unmasked',
           n_jobs=30):
    data_dir = get_data_dir(data_dir)
    unmasked_dir = join(data_dir, unmasked_dir)
    if not os.path.exists(unmasked_dir):
        os.makedirs(unmasked_dir)
    contrasts = fetch_all()
    mask = fetch_mask()
    masker = NiftiMasker(smoothing_fwhm=4, mask_img=mask,
                         verbose=0, memory_level=1, memory=None).fit()
    imgs = contrasts['z_map'].values
    n_samples = imgs.shape[0]
    batches = list(gen_batches(n_samples, 1))
    unmask_single(masker, imgs, create_structure=True)
    Parallel(n_jobs=n_jobs, verbose=10)(delayed(unmask_single)(masker,
                                                               imgs[batch])
                                        for batch in batches)
github nilearn / nilearn / plot_nifti_advanced.py View on Github external
# Display helper
background = np.mean(haxby_func, axis=-1)[..., 27]


def display_mask(background, mask, title):
    plt.axis('off')
    plt.imshow(np.rot90(background), interpolation='nearest', cmap=plt.cm.gray)
    ma = np.ma.masked_equal(mask, False)
    plt.imshow(np.rot90(ma), interpolation='nearest',
              cmap=plt.cm.autumn, alpha=0.5)
    plt.title(title)

# Generate mask with default parameters
from nilearn.input_data import NiftiMasker
masker = NiftiMasker()
masker.fit(haxby_img)
default_mask = masker.mask_img_.get_data().astype(np.bool)
plt.figure(figsize=(3, 5))
display_mask(background, default_mask[..., 27], 'Default mask')
plt.tight_layout()

# Generate mask with opening
masker = NiftiMasker(mask_opening=0)
masker.fit(haxby_img)
opening_mask = masker.mask_img_.get_data().astype(np.bool)
plt.figure(figsize=(3, 5))
display_mask(background, opening_mask[..., 27], 'Mask without opening')
plt.tight_layout()

# Generate mask with upper cutoff
masker = NiftiMasker(mask_opening=True, mask_upper_cutoff=0.8)
github arthurmensch / modl / examples / experimental / fmri / hcp_analysis.py View on Github external
def get_init_objective(output_dir):
    mask, func_filenames = get_hcp_data(raw=True)

    masker = NiftiMasker(mask_img=mask, smoothing_fwhm=None,
                         standardize=False)
    masker.fit()

    rsn70 = fetch_atlas_smith_2009().rsn70
    components = masker.transform(rsn70)
    print(components.shape)
    enet_scale(components.T, inplace=True)
    print(np.sum(np.abs(components), axis=1))
    test_data = func_filenames[(-n_test_records * 2)::2]

    n_samples, n_voxels = np.load(test_data[-1], mmap_mode='r').shape
    X = np.empty((n_test_records * n_samples, n_voxels))

    for i, this_data in enumerate(test_data):
        X[i * n_samples:(i + 1) * n_samples] = np.load(this_data,
                                                       mmap_mode='r')