How to use the nltools.data.Brain_Data function in nltools

To help you get started, we’ve selected a few nltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cosanlab / nltools / tests / test_data.py View on Github external
# Test Regress
    dat.X = pd.DataFrame({'Intercept':np.ones(len(dat.Y)),'X1':np.array(dat.Y).flatten()},index=None)
    out = dat.regress()
    assert out['beta'].shape() == (2,shape_2d[1])

    # Test indexing
    assert out['t'][1].shape()[0] == shape_2d[1]

    # Test threshold
    i=1
    tt = threshold(out['t'][i], out['p'][i], .05)
    assert isinstance(tt,Brain_Data)

    # Test write
    dat.write(os.path.join(str(tmpdir.join('test_write.nii'))))
    assert Brain_Data(os.path.join(str(tmpdir.join('test_write.nii'))))

    # Test append
    assert dat.append(dat).shape()[0]==shape_2d[0]*2

    # Test distance
    distance = dat.distance(method='euclidean')
    assert distance.shape==(shape_2d[0],shape_2d[0])

    # Test predict
    stats = dat.predict(algorithm='svm', cv_dict={'type': 'kfolds','n_folds': 2, 'n':len(dat.Y)}, plot=False,**{'kernel':"linear"})

    # Support Vector Regression, with 5 fold cross-validation with Platt Scaling
    # This will output probabilities of each class
    stats = dat.predict(algorithm='svm', cv_dict=None, plot=False,**{'kernel':'linear','probability':True})

    assert isinstance(stats['weight_map'],Brain_Data)
github cosanlab / nltools / tests / test_analysis.py View on Github external
def test_roc(tmpdir, sim):
    r = 10
    sigma = .1
    y = [0, 1]
    n_reps = 10
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=None)
    dat = Brain_Data(data=sim.data,Y=pd.DataFrame(sim.y))

    algorithm = 'svm'
    output_dir = str(tmpdir)
    # cv = {'type': 'kfolds', 'n_folds': 5, 'subject_id': sim.rep_id}
    extra = {'kernel': 'linear'}

    output = dat.predict(algorithm='svm',**extra)
    # predict = analysis.Predict(sim.data, sim.y, algorithm=algorithm,
    #                            output_dir=output_dir,
    #                            cv_dict=cv,
    #                            **extra)

    # predict.predict() # Save_Plot isn't working for SVM analysis, planning on deprecating analysis.Predict at some point, so not a big deal
    # predict.predict(save_plot=False)

    # Single-Interval
github cosanlab / nltools / tests / test_data.py View on Github external
assert isinstance(stats['weight_map'],Brain_Data)

    # Ridge classificiation, with 5 fold between-subject cross-validation, where data for each subject is held out together.
    stats = dat.predict(algorithm='ridgeClassifier', cv_dict=None,plot=False)
    assert isinstance(stats['weight_map'],Brain_Data)

    # Test Similarity
    r = dat.similarity(stats['weight_map'])
    assert len(r)==shape_2d[0]
    r2 = dat.similarity(stats['weight_map'].to_nifti())
    assert len(r2)==shape_2d[0]
    
    # Test apply_mask - might move part of this to test mask suite
    s1 = create_sphere([41, 64, 55], radius=10)
    assert isinstance(s1,nb.Nifti1Image)
    s2 = Brain_Data(s1)
    masked_dat = dat.apply_mask(s1)
    assert masked_dat.shape()[1]==np.sum(s2.data!=0)

    # Test extract_roi
    mask = create_sphere([41, 64, 55], radius=10)
    assert len(dat.extract_roi(mask))==shape_2d[0]
github cosanlab / nltools / examples / 02_Analysis / plot_univariate_regression.py View on Github external
#########################################################################
# Run Linear Contrast
# -------------------
# 
# Obviously, the univariate regression isn't a great idea when there are only
# three observations per subject.  As we predict a monotonic increase in pain
# across pain intensities, we can also calculate a linear contrast c=(-1,0,1).
# This is simple using matrix multiplication on the centered pain intensity values.

all_sub = []
for sub in subject_id.unique():
    sdat = data[metadata['SubjectID']==sub]
    sdat.X = pd.DataFrame(data={'Pain':sdat.X['PainLevel']})
    all_sub.append(sdat * np.array(sdat.X['Pain'] - 2))
all_sub = Brain_Data(all_sub)

#########################################################################
# We can again run a one-sample t-test at every voxel using an FDR threshold
# of q < 0.001.

t_stats = all_sub.ttest(threshold_dict={'fdr':.001})
t_stats['thr_t'].plot()
github cosanlab / nltools / examples / 02_Analysis / plot_univariate_regression.py View on Github external
#########################################################################
# Run Univariate Regression
# -------------------------
# 
# We can loop over subjects and predict the intensity of each voxel from a 
# simple model of pain intensity and an intercept.  This is just for illustration
# purposes as there are only 3 observations per subject.  We initialize an empty
# Brain_Data() instance and loop over all subjects running a univariate regression 
# separately for each participant.  We aggregate the beta estimates for pain intensity
# across subjects.

from nltools.data import Brain_Data
import numpy as np
import pandas as pd

all_sub = Brain_Data()
for s in subject_id.unique():
    sdat = data[np.where(metadata['SubjectID']==s)[0]]
    sdat.X = pd.DataFrame(data={'Intercept':np.ones(sdat.shape()[0]),'Pain':sdat.X['PainLevel']})
    stats = sdat.regress()
    all_sub = all_sub.append(stats['beta'][1])

#########################################################################
# We can now run a one-sample t-test at every voxel to test whether it is 
# significantly different from zero across participants.  We will threshold
# the results using FDR correction, q < 0.001.

t_stats = all_sub.ttest(threshold_dict={'fdr':.001})
t_stats['thr_t'].plot()

#########################################################################
# Run Linear Contrast
github cosanlab / nltools / nltools / mask.py View on Github external
if isinstance(mask, nib.Nifti1Image):
            mask = Brain_Data(mask, mask=custom_mask)
        else:
            raise ValueError('Make sure mask is a nibabel or Brain_Data '
                             'instance.')

    if len(mask.shape()) > 1:
        if len(mask) > 1:
            out = mask.empty()

            # Create list of masks and find any overlaps
            m_list = []
            for x in range(len(mask)):
                m_list.append(mask[x].to_nifti())
            intersect = intersect_masks(m_list, threshold=1, connected=False)
            intersect = Brain_Data(nib.Nifti1Image(
                            np.abs(intersect.get_data()-1),
                            intersect.get_affine()), mask=custom_mask)

            merge = []
            if auto_label:
                # Combine all masks into sequential order
                # ignoring any areas of overlap
                for i in range(len(m_list)):
                    merge.append(np.multiply(
                                Brain_Data(m_list[i], mask=custom_mask).data,
                                intersect.data)*(i+1))
                out.data = np.sum(np.array(merge).T, 1).astype(int)
            else:
                # Collapse masks using value as label
                for i in range(len(m_list)):
                    merge.append(np.multiply(
github cosanlab / nltools / nltools / stats.py View on Github external
raise ValueError('Make sure thresh is a list of p-values')

    affine = t_map.to_nifti().get_affine()
    pos_out = np.zeros(t_map.to_nifti().shape)
    neg_out = deepcopy(pos_out)
    for thr in thresh:
        t = threshold(t_map, p_map, thr=thr)
        t_pos = deepcopy(t)
        t_pos.data = np.zeros(len(t_pos.data))
        t_neg = deepcopy(t_pos)
        t_pos.data[t.data > 0] = 1
        t_neg.data[t.data < 0] = 1
        pos_out = pos_out+t_pos.to_nifti().get_data()
        neg_out = neg_out+t_neg.to_nifti().get_data()
    pos_out = pos_out + neg_out*-1
    return Brain_Data(nib.Nifti1Image(pos_out, affine))
github cosanlab / nltools / examples / 01_DataOperations / plot_mask.py View on Github external
from sklearn.metrics import pairwise_distances
from nltools.data import Adjacency
from nltools.mask import roi_to_brain
import pandas as pd
import numpy as np

sub_list = data.X['SubjectID'].unique()

# perform matrix multiplication to compute linear contrast for each subject
lin_contrast = []
for sub in sub_list:
    lin_contrast.append(data[data.X['SubjectID'] == sub] * np.array([1, -1,  0])) 

# concatenate list of Brain_Data instances into a single instance
lin_contrast = Brain_Data(lin_contrast) 

# Compute correlation distance between each ROI
dist = Adjacency(pairwise_distances(lin_contrast.extract_roi(mask), metric='correlation'), matrix_type='distance')

# Threshold functional connectivity and convert to Adjacency Matrix. Plot as heatmap
dist.threshold(upper=.4, binarize=True).plot()

# Convert Adjacency matrix to networkX instance
g = dist.threshold(upper=.4, binarize=True).to_graph()

# Compute degree centrality and convert back into Brain_Data instance.
degree_centrality = roi_to_brain(pd.Series(dict(g.degree())), mask_x)

degree_centrality.plot()
github cosanlab / nltools / nltools / utils.py View on Github external
def check_brain_data(data):
    '''Check if data is a Brain_Data Instance.'''
    from nltools.data import Brain_Data

    if not isinstance(data, Brain_Data):
        if isinstance(data, nib.Nifti1Image):
            data = Brain_Data(data)
        else:
            raise ValueError("Make sure data is a Brain_Data instance.")
    return data
github cosanlab / nltools / nltools / mask.py View on Github external
activation[mask_r] = 1
        translation_affine = np.array([[1, 0, 0, p[0]-m[0]],
                                       [0, 1, 0, p[1]-m[1]],
                                       [0, 0, 1, p[2]-m[2]],
                                       [0, 0, 0, 1]])

        return nib.Nifti1Image(activation, affine=translation_affine)

    if any(isinstance(i, list) for i in coordinates):
        if isinstance(radius, list):
            if len(radius) != len(coordinates):
                raise ValueError('Make sure length of radius list matches'
                                 'length of coordinate list.')
        elif isinstance(radius, int):
            radius = [radius]*len(coordinates)
        out = Brain_Data(nib.Nifti1Image(np.zeros_like(mask.get_data()),
                                         affine=mask.affine), mask=mask)
        for r, c in zip(radius, coordinates):
            out = out + Brain_Data(sphere(r, c, mask), mask=mask)
    else:
        out = Brain_Data(sphere(radius, coordinates, mask), mask=mask)
    out = out.to_nifti()
    out.get_data()[out.get_data() > 0.5] = 1
    out.get_data()[out.get_data() < 0.5] = 0
    return out