How to use the nilearn._utils.check_niimg function in nilearn

To help you get started, we’ve selected a few nilearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ME-ICA / tedana / tedana / io.py View on Github external
raise ValueError('Number of echos must be specified. '
                         'Confirm that TE times are provided with the `-e` argument.')

    if isinstance(data, list):
        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError('Cannot run `tedana` with only two echos: '
                             '{}'.format(data))
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([utils.load_image(f) for f in data], axis=1)
            ref_img = check_niimg(data[0])
            ref_img.header.extensions = []
            return np.atleast_3d(fdata), ref_img

    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = utils.load_image(img.get_data().reshape(nx, ny, nz, n_echos, -1, order='F'))
    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz, 1)), affine=img.affine,
                            header=img.header, extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
github arthurmensch / cogspaces / cogspaces / plotting.py View on Github external
def plot_all(img, names=None, output_dir=None,
             colors=None,
             view_types=['stat_map'],
             threshold=True,
             n_jobs=1, verbose=10):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if colors is None:
        colors = repeat(None)

    if 'surf_stat_map_right' in view_types or 'surf_stat_map_left' in view_types:
        fetch_surf_fsaverage5()
    filename = img
    img = check_niimg(img, ensure_ndim=4)
    img.get_data()
    if names is None or isinstance(names, str):
        if names is None:
            dirname, filename = os.path.split(filename)
            names = filename.replace('.nii.gz', '')
        names = numbered_names(names)
    else:
        assert len(names) == img.get_shape()[3]

    masker = get_masker()
    components = masker.transform(img)
    n_components = len(components)
    threshold = np.percentile(np.abs(components),
                              100. * (1 - 1. / n_components)) if threshold else 0

    imgs = Parallel(n_jobs=n_jobs, verbose=verbose)(
github nilearn / nilearn / nilearn / input_data / nifti_spheres_masker.py View on Github external
This parameter is passed to signal.clean. Please see the related
            documentation for details.
            shape: (number of scans, number of confounds)

        Returns
        =======
        signals: 2D numpy.ndarray
            Signal for each region.
            shape: (number of scans, number of regions)

        """
        self._check_fitted()

        logger.log("loading images: %s" %
                   _utils._repr_niimgs(imgs)[:200], verbose=self.verbose)
        imgs = _utils.check_niimg(imgs)

        if self.smoothing_fwhm is not None:
            logger.log("smoothing images", verbose=self.verbose)
            imgs = self._cache(image.smooth_img)(
                imgs, fwhm=self.smoothing_fwhm)

        logger.log("extracting region signals", verbose=self.verbose)
        signals = self._cache(_signals_from_spheres)(
                self.seeds_, imgs, radius=self.radius, mask_img=self.mask_img)

        logger.log("cleaning extracted signals", verbose=self.verbose)
        signals = self._cache(signal.clean
                                     )(signals,
                                       detrend=self.detrend,
                                       standardize=self.standardize,
                                       t_r=self.t_r,
github nilearn / nilearn / nilearn / region_decomposition / region_extractor.py View on Github external
random walker procedure and then decomposed into each assigned into a
        each a seperate region.
    smooth_fwhm: scalar
        a value in millimetres which is used to smooth an image to locate seed
        points.
    mask_img: Nifti-like image/object, default is None, optional
        an option used to mask the input brain map image.

    Returns
    -------
    regions: a Nifti-like images
        contains the images of segmented regions each 3D image appended as a
        seperate brain activated image.
    """
    regions_accumulated = []
    map_img = check_niimg(map_img)
    if len(map_img.shape) == 0 or len(map_img.shape) == 4:
        raise ValueError('A 3D Nifti image or path to a 3D image should '
                         'be submitted.')

    extract_methods = ['auto', 'local_regions']
    if extract_type not in extract_methods:
        message = ("'extract_type' should be given "
                   "either of these {0}").format(extract_methods)
        raise ValueError(message)

    map_data = map_img.get_data()
    affine = map_img.get_affine()
    # Mark the seeds using random walker
    if extract_type == 'local_regions':
        smooth_map_data = _smooth_array(map_data, affine, smooth_fwhm)
        seeds = peak_local_max(smooth_map_data, indices=False,
github arthurmensch / cogspaces / sandbox / maps_old.py View on Github external
def plot(output_dir, baseline_output_dir, plot_components=True,
         plot_classif=True, plot_components_3d=True, n_jobs=1):
    introspect_dir = join(output_dir, 'maps')
    baseline_introspect_dir = join(baseline_output_dir, 'maps')
    plot_dir = join(introspect_dir, 'plot')
    if not os.path.exists(plot_dir):
        os.makedirs(plot_dir)

    if plot_components:
        components = check_niimg(join(introspect_dir, 'components.nii.gz'))
        plot_all(components, plot_dir, 'components', n_jobs=n_jobs)
        # components = check_niimg(join(introspect_dir, 'snr.nii.gz'))
        # plot_all(components, plot_dir, 'snr')
    if plot_components_3d:
        components = check_niimg(join(introspect_dir, 'components.nii.gz'))
        plot_all_3d(components, plot_dir, 'components_3d', n_jobs=n_jobs)
    if plot_classif:
        names = load(join(introspect_dir, 'names.pkl'))
        imgs = join(baseline_introspect_dir, 'classif.nii.gz')
        baseline_imgs = join(baseline_introspect_dir, 'classif.nii.gz')
        plot_face_to_face(imgs, baseline_imgs, names, plot_dir, n_jobs=n_jobs)
github nilearn / nilearn / nilearn / regions / region_extractor.py View on Github external
index_of_each_map: numpy array
        an array of list of indices where each index denotes the identity
        of each extracted region to their family of brain maps.

    See Also
    --------
    nilearn.regions.connected_label_regions : A function can be used for
        extraction of regions on labels based atlas images.

    nilearn.regions.RegionExtractor : A class can be used for both
        region extraction on continuous type atlas images and
        also time series signals extraction from regions extracted.
    """
    all_regions_imgs = []
    index_of_each_map = []
    maps_img = check_niimg(maps_img, atleast_4d=True)
    maps = _safe_get_data(maps_img).copy()
    affine = maps_img.affine
    min_region_size = min_region_size / np.abs(np.linalg.det(affine[:3, :3]))

    allowed_extract_types = ['connected_components', 'local_regions']
    if extract_type not in allowed_extract_types:
        message = ("'extract_type' should be given either of these {0} "
                   "You provided extract_type='{1}'").format(allowed_extract_types, extract_type)
        raise ValueError(message)

    if mask_img is not None:
        if not _check_same_fov(maps_img, mask_img):
            mask_img = resample_img(mask_img,
                                    target_affine=maps_img.affine,
                                    target_shape=maps_img.shape[:3],
                                    interpolation="nearest")
github nilearn / nilearn / nilearn / image / resampling.py View on Github external
elif interpolation == 'linear':
        interpolation_order = 1
    elif interpolation == 'nearest':
        interpolation_order = 0
    else:
        message = ("interpolation must be either 'continuous', 'linear' "
                   "or 'nearest' but it was set to '{0}'").format(interpolation)
        raise ValueError(message)

    if isinstance(img, _basestring):
        # Avoid a useless copy
        input_img_is_string = True
    else:
        input_img_is_string = False

    img = _utils.check_niimg(img)

    # noop cases
    if target_affine is None and target_shape is None:
        if copy and not input_img_is_string:
            img = _utils.copy_img(img)
        return img
    if target_affine is not None:
        target_affine = np.asarray(target_affine)

    shape = img.shape
    affine = img.affine

    if (np.all(np.array(target_shape) == shape[:3]) and
            np.allclose(target_affine, affine)):
        if copy and not input_img_is_string:
            img = _utils.copy_img(img)
github nilearn / nilearn / nilearn / image / image.py View on Github external
If no file matches the regular expression, a ValueError exception is
        raised.

    dtype: {dtype, "auto"}
        Data type toward which the data should be converted. If "auto", the
        data will be converted to int32 if dtype is discrete and float32 if it
        is continuous.

    Returns
    -------
    result: 3D/4D Niimg-like object
        Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed
        that the returned object has an affine attributes and that
        nilearn.image.get_data returns its data.
    """
    return check_niimg(img, wildcards=wildcards, dtype=dtype)
github nilearn / nilearn / nilearn / regions / region_extractor.py View on Github external
index_of_each_map: numpy array
        an array of list of indices where each index denotes the identity
        of each extracted region to their family of brain maps.

    See Also
    --------
    nilearn.regions.connected_label_regions : A function can be used for
        extraction of regions on labels based atlas images.

    nilearn.regions.RegionExtractor : A class can be used for both
        region extraction on continuous type atlas images and
        also time series signals extraction from regions extracted.
    """
    all_regions_imgs = []
    index_of_each_map = []
    maps_img = check_niimg(maps_img, atleast_4d=True)
    maps = _safe_get_data(maps_img).copy()
    affine = maps_img.affine
    min_region_size = min_region_size / np.abs(np.linalg.det(affine[:3, :3]))

    allowed_extract_types = ['connected_components', 'local_regions']
    if extract_type not in allowed_extract_types:
        message = ("'extract_type' should be given either of these {0} "
                   "You provided extract_type='{1}'").format(allowed_extract_types, extract_type)
        raise ValueError(message)

    if mask_img is not None:
        if not _check_same_fov(maps_img, mask_img):
            mask_img = resample_img(mask_img,
                                    target_affine=maps_img.affine,
                                    target_shape=maps_img.shape[:3],
                                    interpolation="nearest")
github nilearn / nilearn / nilearn / image / image.py View on Github external
Returns
    -------
    mean: nibabel.Nifti1Image
        mean image

    See Also
    --------
    nilearn.image.math_img : For more general operations on images

    """
    if (isinstance(imgs, _basestring) or
            not isinstance(imgs, collections.Iterable)):
        imgs = [imgs, ]

    imgs_iter = iter(imgs)
    first_img = check_niimg(next(imgs_iter))

    # Compute the first mean to retrieve the reference
    # target_affine and target_shape if_needed
    n_imgs = 1
    running_mean, first_affine = _compute_mean(first_img,
                target_affine=target_affine,
                target_shape=target_shape)

    if target_affine is None or target_shape is None:
        target_affine = first_affine
        target_shape = running_mean.shape[:3]

    for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)(
            delayed(_compute_mean)(n, target_affine=target_affine,
                                   target_shape=target_shape)
            for n in imgs_iter):