How to use the nilearn.image function in nilearn

To help you get started, we’ve selected a few nilearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nilearn / nilearn / nilearn / input_data / nifti_masker.py View on Github external
"""
        try:
            from nilearn import plotting
        except ImportError:
            with warnings.catch_warnings():
                mpl_unavail_msg = ('Matplotlib is not imported! '
                                'No reports will be generated.')
                warnings.filterwarnings('always', message=mpl_unavail_msg)
                warnings.warn(category=ImportWarning,
                            message=mpl_unavail_msg)
                return [None]

        img = self._reporting_data['images']
        mask = self._reporting_data['mask']
        if img is not None:
            dim = image.load_img(img).shape
            if len(dim) == 4:
                # compute middle image from 4D series for plotting
                img = image.index_img(img, dim[-1] // 2)
        else:  # images were not provided to fit
            img = mask

        # create display of retained input mask, image
        # for visual comparison
        init_display = plotting.plot_img(img,
                                         black_bg=False,
                                         cmap='CMRmap_r')
        init_display.add_contours(mask, levels=[.5], colors='g',
                                  linewidths=2.5)

        if 'transform' not in self._reporting_data:
            return [init_display]
github edickie / ciftify / ciftify / bin / ciftify_clean_img.py View on Github external
settings = UserSettings(arguments)
    settings.print_settings()

    # check the confounds define the true confounds for nilearn
    confound_signals = mangle_confounds(settings)

    # if input is cifti - we convert to fake nifti
    ## convert to nifti
    if settings.func.type == "cifti":
        input_nifti = os.path.join(tmpdir,'func_fnifti.nii.gz')
        ciftify.utils.run(['wb_command','-cifti-convert','-to-nifti',settings.func.path, input_nifti])
    else:
        input_nifti = settings.func.path

    # load image as nilearn image
    nib_image = nilearn.image.load_img(input_nifti)

    if settings.start_from_tr > 0:
        trimmed_nifti = image_drop_dummy_trs(nib_image, settings.start_from_tr)
    else:
        trimmed_nifti = nib_image

    # the nilearn cleaning step..
    clean_output = clean_image_with_nilearn(trimmed_nifti, confound_signals, settings)

    # or nilearn image smooth if nifti input
    if settings.func.type == "nifti":
        if settings.smooth.fwhm > 0 :
            smoothed_vol = nilearn.image.smooth_img(clean_output, settings.smooth.fwhm)
            smoothed_vol.to_filename(settings.output_func)
        else:
            clean_output.to_filename(settings.output_func)
github poldracklab / niworkflows / niworkflows / interfaces / utils.py View on Github external
if out_file is None:
        out_file = fname_presuffix(
            fixed_image, suffix="_reference", newpath=os.getcwd()
        )

    # Moving images may not be RAS/LPS (more generally, transverse-longitudinal-axial)
    reoriented_moving_img = nb.as_closest_canonical(nb.load(moving_image))
    new_zooms = reoriented_moving_img.header.get_zooms()[:3]

    # Avoid small differences in reported resolution to cause changes to
    # FOV. See https://github.com/nipreps/fmriprep/issues/512
    # A positive diagonal affine is RAS, hence the need to reorient above.
    new_affine = np.diag(np.round(new_zooms, 3))

    resampled = nli.resample_img(
        fixed_image, target_affine=new_affine, interpolation="nearest"
    )

    if fov_mask is not None:
        # If we have a mask, resample again dropping (empty) samples
        # out of the FoV.
        fixednii = nb.load(fixed_image)
        masknii = nb.load(fov_mask)

        if np.all(masknii.shape[:3] != fixednii.shape[:3]):
            raise RuntimeError("Fixed image and mask do not have the same dimensions.")

        if not np.allclose(masknii.affine, fixednii.affine, atol=1e-5):
            raise RuntimeError("Fixed image and mask have different affines")

        # Get mask into reference space
github edickie / ciftify / ciftify / bin / ciftify_clean_img.py View on Github external
def image_drop_dummy_trs(nib_image, start_from_tr):
    ''' use nilearn to drop the number of trs from the image'''
    data_out = nib_image.get_data()[:,:,:, start_from_tr:]
    img_out = nilearn.image.new_img_like(nib_image, data_out, nib_image.affine, copy_header = True)
    return img_out
github nidata / nidata / nidata / _external / nilearn / examples / connectivity / plot_ica_resting_state.py View on Github external
components_masked /= components_masked.std(axis=0)
# Threshold
components_masked[components_masked < .8] = 0

# Now invert the masking operation, going back to a full 3D
# representation
component_img = masker.inverse_transform(components_masked)

### Visualize the results #####################################################
# Show some interesting components
import matplotlib.pyplot as plt
from nilearn import image
from nilearn.plotting import plot_stat_map

# Use the mean as a background
mean_img = image.mean_img(func_filename)

plot_stat_map(image.index_img(component_img, 5), mean_img)

plot_stat_map(image.index_img(component_img, 12), mean_img)

plt.show()
github nilearn / nilearn / examples / 04_manipulating_images / plot_roi_extraction.py View on Github external
# When using methods that are not robust to noise, it is useful to apply a
# spatial filtering kernel on the data. Such data smoothing is usually applied
# using a Gaussian function with 4mm to 12mm full-width at half-maximum (this
# is where the FWHM comes from). The function :func:`nilearn.image.smooth_img`
# accounts for potential anisotropy in the image affine (i.e., non-indentical
# voxel size in all the three dimensions). Analogous to the majority of nilearn
# functions, smooth_img function can also use file names as input parameters.

# Smooth the data using image processing module from nilearn
from nilearn import image

# Functional data
fmri_filename = haxby_dataset.func[0]
# smoothing: first argument as functional data filename and smoothing value
# (integer) in second argument. Output returns in Nifti image.
fmri_img = image.smooth_img(fmri_filename, fwhm=6)

# Visualize the mean of the smoothed EPI image using plotting function
# `plot_epi`
from nilearn.plotting import plot_epi

# First, compute the voxel-wise mean of smooth EPI image (first argument) using
# image processing module `image`
mean_img = image.mean_img(fmri_img)
# Second, we visualize the mean image with coordinates positioned manually
plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=cut_coords)

##############################################################################
# Given the smoothed functional data stored in variable 'fmri_img', we then
# select two features of interest with face and house experimental conditions.
# The method we will be using is a simple Student's t-test. The below section
# gives us brief motivation example about why selecting features in high
github nidata / nidata / nidata / _external / nilearn / examples / manipulating_visualizing / plot_roi_extraction.py View on Github external
### Visualization function ####################################################

import matplotlib.pyplot as plt
from nilearn.plotting import plot_epi, plot_stat_map, plot_roi
from nilearn.input_data import NiftiLabelsMasker

### Find voxels of interest ###################################################

# Smooth the data
from nilearn import image
fmri_filename = haxby_dataset.func[0]
fmri_img = image.smooth_img(fmri_filename, fwhm=6)

# Plot the mean image
mean_img = image.mean_img(fmri_img)
plot_epi(mean_img, title='Smoothed mean EPI', cut_coords=cut_coords)

# Run a T-test for face and houses
from scipy import stats
fmri_data = fmri_img.get_data()
_, p_values = stats.ttest_ind(fmri_data[..., haxby_labels == b'face'],
                              fmri_data[..., haxby_labels == b'house'],
                              axis=-1)

# Use a log scale for p-values
log_p_values = -np.log10(p_values)
log_p_values[np.isnan(log_p_values)] = 0.
log_p_values[log_p_values > 10.] = 10.
plot_stat_map(nibabel.Nifti1Image(log_p_values, fmri_img.get_affine()),
              mean_img, title="p-values", cut_coords=cut_coords)
### Build a mask ##############################################################
github nilearn / nilearn / nilearn / input_data / nifti_maps_masker.py View on Github external
imgs = _utils.check_niimg_4d(imgs)

    if resample_on_maps:
        if verbose > 0:
            print("Resampling images")
        imgs = cache(
            image.resample_img, memory, func_memory_level=2,
            memory_level=memory_level)(
                imgs, interpolation="continuous",
                target_shape=maps_img.shape[:3],
                target_affine=maps_img.get_affine())

    if smoothing_fwhm is not None:
        if verbose > 0:
            print("Smoothing images")
        imgs = cache(image.smooth_img, memory, func_memory_level=2,
                     memory_level=memory_level)(
            imgs, fwhm=smoothing_fwhm)

    if verbose > 0:
        print("Extracting maps signals")
    region_signals, labels_ = cache(
        region.img_to_signals_maps, memory, func_memory_level=2,
        memory_level=memory_level)(
            imgs, maps_img, mask_img=mask_img)

    if verbose > 0:
        print("Cleaning extracted signals")
    region_signals = cache(signal.clean, memory, func_memory_level=2,
                           memory_level=memory_level)(
        region_signals, detrend=detrend, standardize=standardize,
        t_r=t_r, low_pass=low_pass, high_pass=high_pass,