How to use the nltools.utils.check_brain_data function in nltools

To help you get started, we’ve selected a few nltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cosanlab / nltools / nltools / data / brain_data.py View on Github external
""" Mask Brain_Data instance

        Note target data will be resampled into the same space as the mask. If you would like the mask
        resampled into the Brain_Data space, then set resample_mask_to_brain=True.

        Args:
            mask: (Brain_Data or nifti object) mask to apply to Brain_Data object.
            resample_mask_to_brain: (bool) Will resample mask to brain space before applying mask (default=False).

        Returns:
            masked: (Brain_Data) masked Brain_Data object

        """

        masked = deepcopy(self)
        mask = check_brain_data(mask)
        if not check_brain_data_is_single(mask):
            raise ValueError('Mask must be a single image')

        n_vox = len(self) if check_brain_data_is_single(self) else self.shape()[1]
        if resample_mask_to_brain: 
            mask = resample_to_img(mask.to_nifti(), masked.to_nifti())
            mask = check_brain_data(mask, masked.mask)

        nifti_masker = NiftiMasker(mask_img=mask.to_nifti()).fit()

        if n_vox == len(mask):
            if check_brain_data_is_single(masked):
                masked.data = masked.data[mask.data.astype(bool)]
            else:
                masked.data = masked.data[:, mask.data.astype(bool)]
        else:
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
def __init__(self, data, mask):

        data = check_brain_data(data)
        mask = check_brain_data(mask)

        mask.data = np.round(mask.data).astype(int)
        if len(mask.shape()) <= 1:
            if len(np.unique(mask.data)) > 2:
                mask = expand_mask(mask)
            else:
                raise ValueError('mask does not have enough groups.')

        self.mask = mask
        self.split(data, mask)
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
def __init__(self, data, mask):

        data = check_brain_data(data)
        mask = check_brain_data(mask)

        mask.data = np.round(mask.data).astype(int)
        if len(mask.shape()) <= 1:
            if len(np.unique(mask.data)) > 2:
                mask = expand_mask(mask)
            else:
                raise ValueError('mask does not have enough groups.')

        self.mask = mask
        self.split(data, mask)
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
def __init__(self, data, mask):

        data = check_brain_data(data)
        mask = check_brain_data(mask)

        mask.data = np.round(mask.data).astype(int)
        if len(mask.shape()) <= 1:
            if len(np.unique(mask.data)) > 2:
                mask = expand_mask(mask)
            else:
                raise ValueError('mask does not have enough groups.')

        self.mask = mask
        self.split(data, mask)
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
axis: (int) axis to align on

        Returns:
            out: (dict) a dictionary containing transformed object,
                transformation matrix, and the shared response matrix

        '''

        if method not in ['probabilistic_srm', 'deterministic_srm', 'procrustes']:
            raise ValueError("Method must be ['probabilistic_srm','deterministic_srm','procrustes']")
        
        source = self.copy()
        data1 = self.data.copy()

        if method == 'procrustes':
            target = check_brain_data(target)
            data2 = target.data.copy()
            
            # pad columns if different shapes
            sizes_1 = [x.shape[1] for x in [data1, data2]]
            C = max(sizes_1)
            y = data1[:, 0:C]
            missing = C - y.shape[1]
            add = np.zeros((y.shape[0], missing))
            data1 = np.append(y, add, axis=1)
        else:
            data2 = target.copy()

        if axis == 1:
            data1 = data1.T
            data2 = data2.T
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
def similarity(self, image, method='correlation'):
        """ Calculate similarity of Brain_Data() instance with single
            Brain_Data or Nibabel image

            Args:
                image: (Brain_Data, nifti)  image to evaluate similarity
                method: (str) Type of similarity
                        ['correlation','dot_product','cosine']
            Returns:
                pexp: (list) Outputs a vector of pattern expression values

        """

        image = check_brain_data(image)

        # Check to make sure masks are the same for each dataset and if not
        # create a union mask
        # This might be handy code for a new Brain_Data method
        if np.sum(self.nifti_masker.mask_img.get_data() == 1) != np.sum(image.nifti_masker.mask_img.get_data() == 1):
            new_mask = intersect_masks([self.nifti_masker.mask_img,
                                        image.nifti_masker.mask_img],
                                       threshold=1, connected=False)
            new_nifti_masker = NiftiMasker(mask_img=new_mask)
            data2 = new_nifti_masker.fit_transform(self.to_nifti())
            image2 = new_nifti_masker.fit_transform(image.to_nifti())
        else:
            data2 = self.data
            image2 = image.data

        def vector2array(data):
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
method: (str) alignment method to use
                ['probabilistic_srm','deterministic_srm','procrustes']
            n_features: (int) number of features to align to common space.
                If None then will select number of voxels
            axis: (int) axis to align on

        Returns:
            out: (dict) a dictionary containing transformed object,
                transformation matrix, and the shared response matrix

        '''

        source = self.copy()
        common = target.copy()

        target = check_brain_data(target)

        if method not in ['probabilistic_srm', 'deterministic_srm', 'procrustes']:
            raise ValueError("Method must be ['probabilistic_srm','deterministic_srm','procrustes']")

        data1 = source.data.T
        data2 = target.data.T

        if axis == 1:
            data1 = data1.T
            data2 = data2.T

        out = dict()
        if method in ['deterministic_srm', 'probabilistic_srm']:
            if n_features is None:
                n_features = data1.shape[0]
            if method == 'deterministic_srm':
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
def append(self, data, **kwargs):
        """ Append data to Brain_Data instance

        Args:
            data: (Brain_Data) Brain_Data instance to append
            kwargs: optional inputs to Design_Matrix append

        Returns:
            out: (Brain_Data) new appended Brain_Data instance
        """

        data = check_brain_data(data)

        if self.isempty():
            out = deepcopy(data)
        else:
            error_string = ("Data to append has different number of voxels "
                            "then Brain_Data instance.")
            if len(self.shape()) == 1 & len(data.shape()) == 1:
                if self.shape()[0] != data.shape()[0]:
                    raise ValueError(error_string)
            elif len(self.shape()) == 1 & len(data.shape()) > 1:
                if self.shape()[0] != data.shape()[1]:
                    raise ValueError(error_string)
            elif len(self.shape()) > 1 & len(data.shape()) == 1:
                if self.shape()[1] != data.shape()[0]:
                    raise ValueError(error_string)
            elif self.shape()[1] != data.shape()[1]:
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
Args:
            mask: (nifti) nibabel mask can be binary or numbered for
                  different rois
            metric: type of extraction method ['mean', 'median', 'pca'], (default=mean)
                    NOTE: Only mean currently works!
            n_components: if metric='pca', number of components to return (takes any input into sklearn.Decomposition.PCA)

        Returns:
            out: mean within each ROI across images

        """

        metrics = ['mean','median','pca']

        mask = check_brain_data(mask)
        ma = mask.copy()

        if metric not in metrics:
            raise NotImplementedError

        if len(np.unique(ma.data)) == 2:
            masked = self.apply_mask(ma)
            if check_brain_data_is_single(masked):
                if metric == 'mean':
                    out = masked.mean()
                elif metric == 'median':
                    out = masked.median()
                else:
                    raise ValueError('Not possible to run PCA on a single image')
            else:
                if metric == 'mean':
github cosanlab / nltools / nltools / data / brain_data.py View on Github external
def extract_roi(self, mask, method='mean'):
        """ Extract activity from mask

        Args:
            mask: (nifiti) nibabel mask can be binary or numbered for
                  different rois
            method: type of extraction method (default=mean)
                    NOTE: Only mean currently works!

        Returns:
            out: mean within each ROI across images

        """
        mask = check_brain_data(mask)
        ma = mask.copy()

        if method != 'mean':
            raise ValueError('Only mean is currently implemented.')

        if len(np.unique(ma.data)) == 2:
            out = np.mean(self.data[:, np.where(ma.data)].squeeze(), axis=1)
        elif len(np.unique(ma.data)) > 2:
            # make sure each ROI id is an integer
            ma.data = np.round(ma.data).astype(int)
            all_mask = expand_mask(ma)
            out = []
            for i in range(all_mask.shape()[0]):
                out.append(np.mean(self.data[:, np.where(all_mask[i].data)].squeeze(), axis=1))
            out = np.array(out)
        return out