How to use the nilearn.image.resample_img function in nilearn

To help you get started, we’ve selected a few nilearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nilearn / nilearn / nilearn / input_data / nifti_masker.py View on Github external
target_affine=self.target_affine,
            target_shape=self.target_shape,
            copy=False, interpolation='nearest')
        if self.target_affine is not None:  # resample image to target affine
            self.affine_ = self.target_affine
        else:  # resample image to mask affine
            self.affine_ = self.mask_img_.affine
        # Load data in memory
        get_data(self.mask_img_)
        if self.verbose > 10:
            print("[%s.fit] Finished fit" % self.__class__.__name__)

        if (self.target_shape is not None) or (self.target_affine is not None):
            if self.reports:
                if imgs is not None:
                    resampl_imgs = self._cache(image.resample_img)(
                        imgs, target_affine=self.affine_,
                        copy=False, interpolation='nearest')
                else:  # imgs not provided to fit
                    resampl_imgs = None
                self._reporting_data['transform'] = [resampl_imgs, self.mask_img_]

        return self
github GalDude33 / Fetal-MRI-Segmentation / unet3d / utils / utils.py View on Github external
def resize(image, new_shape, interpolation="continuous"):
    input_shape = np.asarray(image.shape, dtype=np.float16)
    ras_image = reorder_img(image, resample=interpolation)
    output_shape = np.asarray(new_shape)
    new_spacing = input_shape/output_shape
    new_affine = np.copy(ras_image.affine)
    new_affine[:3, :3] = ras_image.affine[:3, :3] * np.diag(new_spacing)
    return resample_img(ras_image, target_affine=new_affine, target_shape=output_shape, interpolation=interpolation)
github dPys / PyNets / pynets / core / nodemaker.py View on Github external
"""
    from pynets.core import nodemaker
    from nilearn.image import resample_img
    from nilearn import masking
    import os.path as op

    mask_img = nib.load(roi)
    mask_aff = mask_img.affine
    mask_data, _ = masking._load_mask_img(roi)
    mask_img.uncache()

    i = 0
    indices = []
    for parcel in parcel_list:
        parcel_vol = np.zeros(mask_data.shape, dtype=bool)
        parcel_data_reshaped = np.asarray(resample_img(parcel, target_affine=mask_aff,
                                                       target_shape=mask_data.shape).dataobj)
        parcel_vol[parcel_data_reshaped == 1] = 1

        # Count number of unique voxels where overlap of parcel and mask occurs
        overlap_count = len(np.unique(np.where((mask_data.astype('uint16') == 1) & (parcel_vol.astype('uint16') == 1))))

        # Count number of total unique voxels within the parcel
        total_count = len(np.unique(np.where((parcel_vol.astype('uint16') == 1))))

        # Calculate % overlap
        try:
            overlap = float(overlap_count / total_count)
        except:
            print("%s%s%s" % ('\nWarning: No overlap of parcel', labels[i], 'with roi mask!\n'))
            overlap = float(0)
github nilearn / nilearn / nilearn / input_data / base_masker.py View on Github external
print("[%s] Loading data from %s" % (
            class_name,
            _utils._repr_niimgs(imgs)[:200]))
    imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4)

    sample_mask = parameters.get('sample_mask')
    if sample_mask is not None:
        imgs = image.index_img(imgs, sample_mask)

    target_shape = parameters.get('target_shape')
    target_affine = parameters.get('target_affine')
    if target_shape is not None or target_affine is not None:
        if verbose > 0:
            print("[%s] Resampling images" % class_name)
        imgs = cache(
            image.resample_img, memory, func_memory_level=2,
            memory_level=memory_level, ignore=['copy'])(
                imgs, interpolation="continuous",
                target_shape=target_shape,
                target_affine=target_affine,
                copy=copy)

    smoothing_fwhm = parameters.get('smoothing_fwhm')
    if smoothing_fwhm is not None:
        if verbose > 0:
            print("[%s] Smoothing images" % class_name)
        imgs = cache(
            image.smooth_img, memory, func_memory_level=2,
            memory_level=memory_level)(
                imgs, parameters['smoothing_fwhm'])

    if verbose > 0:
github nilearn / nilearn / nilearn / _utils / niimg_conversions.py View on Github external
niimg, ensure_ndim=ndim_minus_one, atleast_4d=atleast_4d)
            if i == 0:
                ndim_minus_one = len(niimg.shape)
                if ref_fov is None:
                    ref_fov = (niimg.get_affine(), niimg.shape[:3])
                    resample_to_first_img = True

            if not _check_fov(niimg, ref_fov[0], ref_fov[1]):
                if target_fov is not None:
                    from nilearn import image  # we avoid a circular import
                    if resample_to_first_img:
                        warnings.warn('Affine is different across subjects.'
                                      ' Realignement on first subject '
                                      'affine forced')
                    niimg = cache(
                        image.resample_img, memory, func_memory_level=2,
                        memory_level=memory_level)(
                            niimg, target_affine=ref_fov[0],
                            target_shape=ref_fov[1])
                else:
                    raise ValueError(
                        "Field of view of image #%d is different from "
                        "reference FOV.\n"
                        "Reference affine:\n%r\nImage affine:\n%r\n"
                        "Reference shape:\n%r\nImage shape:\n%r\n"
                        % (i, ref_fov[0], niimg.get_affine(), ref_fov[1],
                           niimg.shape))
            yield niimg
        except TypeError as exc:
            img_name = ''
            if isinstance(niimg, _basestring):
                img_name = " (%s) " % niimg
github poldracklab / niworkflows / niworkflows / interfaces / images.py View on Github external
scale_factor = target_zooms / zooms
                target_affine[:3, :3] = reoriented.affine[:3, :3].dot(
                    np.diag(scale_factor)
                )

            if resize:
                # The shift is applied after scaling.
                # Use a proportional shift to maintain relative position in dataset
                size_factor = target_span / (zooms * shape)
                # Use integer shifts to avoid unnecessary interpolation
                offset = (
                    reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3]
                )
                target_affine[:3, 3] = reoriented.affine[:3, 3] + offset.astype(int)

            data = nli.resample_img(reoriented, target_affine, target_shape).dataobj
            conform_xfm = np.linalg.inv(reoriented.affine).dot(target_affine)
            reoriented = reoriented.__class__(data, target_affine, reoriented.header)

        # Image may be reoriented, rescaled, and/or resized
        if reoriented is not orig_img:
            out_name = fname_presuffix(fname, suffix="_ras", newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        transform = ornt_xfm.dot(conform_xfm)
        if not np.allclose(orig_img.affine.dot(transform), target_affine):
            raise ValueError("Original and target affines are not similar")

        mat_name = fname_presuffix(
            fname, suffix=".mat", newpath=runtime.cwd, use_ext=False
github neurodata / m2g / m2g / utils / reg_utils.py View on Github external
Resamples the image such that images which have already been aligned
    in real coordinates also overlap in the image/voxel space.

    **Positional Arguments**
            base:
                - Image to be aligned
            ingested:
                - Name of image after alignment
            template:
                - Image that is the target of the alignment
    """
    # Loads images
    template_im = nib.load(template)
    base_im = nib.load(base)
    # Aligns images
    target_im = nl.resample_img(
        base_im,
        target_affine=template_im.get_affine(),
        target_shape=template_im.get_data().shape,
        interpolation="nearest",
    )
    # Saves new image
    nib.save(target_im, ingested)
github nilearn / nilearn / nilearn / input_data / nifti_spheres_masker.py View on Github external
def _iter_signals_from_spheres(seeds, niimg, radius, mask_img=None):
    seeds = list(seeds)
    niimg = check_niimg(niimg)
    affine = niimg.get_affine()

    # Compute world coordinates of all in-mask voxels.

    if mask_img is not None:
        mask_img = check_niimg_3d(mask_img)
        mask_img = image.resample_img(mask_img, target_affine=affine,
                                      target_shape=niimg.shape[:3],
                                      interpolation='nearest')
        mask, _ = masking._load_mask_img(mask_img)
        mask_coords = list(np.where(mask != 0))

        X = masking._apply_mask_fmri(niimg, mask_img)
    else:
        mask_coords = list(zip(*np.ndindex(niimg.shape[:3])))
        X = niimg.get_data().reshape([-1, niimg.shape[3]]).T
    mask_coords.append(np.ones(len(mask_coords[0]), dtype=np.int))
    mask_coords = np.asarray(mask_coords)
    mask_coords = np.dot(affine, mask_coords)[:3].T

    if (radius is not None and
            LooseVersion(sklearn.__version__) < LooseVersion('0.16')):
        # Fix for scikit learn versions below 0.16. See
github nilearn / nilearn / nilearn / input_data / multi_nifti_masker.py View on Github external
memory=self.memory,
                    verbose=max(0, self.verbose - 1),
                    **mask_args)
        else:
            if imgs is not None:
                warnings.warn('[%s.fit] Generation of a mask has been'
                              ' requested (imgs != None) while a mask has'
                              ' been provided at masker creation. Given mask'
                              ' will be used.' % self.__class__.__name__)
            self.mask_img_ = _utils.check_niimg_3d(self.mask_img)

        # If resampling is requested, resample the mask as well.
        # Resampling: allows the user to change the affine, the shape or both.
        if self.verbose > 0:
            print("[%s.transform] Resampling mask" % self.__class__.__name__)
        self.mask_img_ = self._cache(image.resample_img)(
            self.mask_img_,
            target_affine=self.target_affine,
            target_shape=self.target_shape,
            interpolation='nearest', copy=False)
        if self.target_affine is not None:
            self.affine_ = self.target_affine
        else:
            self.affine_ = self.mask_img_.affine
        # Load data in memory
        self.mask_img_.get_data()
        return self
github Danielhiversen / NeuroImageRegistration / image_registration.py View on Github external
n4bias.inputs.num_threads = NUM_THREADS_ANTS
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data/np.amax(temp_data)*100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)
    del temp_img

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * slice_size
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if be_method == 0:
        img.init_transform = path + name + '_InitRegTo' + str(img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath