How to use the dipy.reconst.dti.TensorModel function in dipy

To help you get started, we’ve selected a few dipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nipy / dipy / 1.0.0 / _downloads / 034a6637eb62a224f34de94d97ecf916 / tracking_stopping_criterion.py View on Github external
- threshold: float

**Stopping States**

- 'ENDPOINT': stops at a position where metric_map < threshold; the streamline
reached the target stopping area.
- 'OUTSIDEIMAGE': stops at a position outside of metric_map; the streamline
reached an area outside the image where no direction data is available.
- 'TRACKPOINT': stops at a position because no direction is available; the
streamline is stopping where metric_map >= threshold, but there is no valid
direction to follow.
- 'INVALIDPOINT': N/A.
"""


tensor_model = TensorModel(gtab)
tenfit = tensor_model.fit(data, mask=labels > 0)
FA = fractional_anisotropy(tenfit.evals)

threshold_criterion = ThresholdStoppingCriterion(FA, .2)

fig = plt.figure()
mask_fa = FA.copy()
mask_fa[mask_fa < 0.2] = 0
plt.xticks([])
plt.yticks([])
plt.imshow(mask_fa[:, :, data.shape[2] // 2].T, cmap='gray', origin='lower',
           interpolation='nearest')
fig.tight_layout()
fig.savefig('threshold_fa.png')

"""
github nipy / dipy / doc / examples / reconst_mcsd.py View on Github external
above:
"""

hmrf = TissueClassifierHMRF()
initial_segmentation, final_segmentation, PVE = hmrf.classify(ap, nclass, beta)
print(PVE.shape)

"""
Now that we have the segmentation step, we would like to classify the tissues
into ``wm``, ``gm`` and ``csf`` We do
so using the Fractional Anisotropy (FA) and Mean Diffusivity (MD) metrics
obtained from the Diffusion Tensor Imaging Model (DTI) fit as follows:
"""

# Construct the  DTI model
tenmodel = dti.TensorModel(gtab)

# fit the denoised data with DTI model
tenfit = tenmodel.fit(denoised_arr)

# obtain the FA and MD metrics
FA = tenfit.fa
MD = tenfit.md

"""
Now that we have the FA and the MD obtained from DTI, we use it to distinguish
between the ``wm``, ``gm`` and ``csf``. As we can see
from the shape of the PVE, the last dimension refers to the classification. We
will now index them as: 0 -> ``csf``, 1 -> ``gm`` and 2 -> ``wm`` as per their
FA values and the confidence of prediction obtained from
``TissueClassifierHMRF``.
"""
github yeatmanlab / pyAFQ / AFQ / dti.py View on Github external
def _fit(gtab, data, mask=None):
    dtimodel = dti.TensorModel(gtab)
    return dtimodel.fit(data, mask=mask)
github nipy / dipy / dipy / reconst / csdeconv.py View on Github external
ratio : float
        The ratio between smallest versus largest eigenvalue of the response.

    Notes
    -----
    See csdeconv.auto_response() or csdeconv.recursive_response() if you don't
    have a computed mask for the response function estimation.

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the
    fiber orientation density function from diffusion-weighted MRI
    data using spherical deconvolution
    """

    ten = TensorModel(gtab)
    indices = np.where(mask > 0)

    if indices[0].size == 0:
        msg = "No voxel in mask with value > 0 were found."
        warnings.warn(msg, UserWarning)
        return (np.nan, np.nan), np.nan

    tenfit = ten.fit(data[indices])
    lambdas = tenfit.evals[:, :2]
    S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]]

    return _get_response(S0s, lambdas)
github yeatmanlab / pyAFQ / AFQ / api.py View on Github external
def _dti_fit(self, row):
        dti_params_file = self._dti(row)
        dti_params = nib.load(dti_params_file).get_fdata()
        tm = dpy_dti.TensorModel(row['gtab'])
        tf = dpy_dti.TensorFit(tm, dti_params)
        return tf
github nidata / nidata / nidata / multimodal / hcp / example2.py View on Github external
# only fetch anatomical files.
files = dataset.fetch(n_subjects=1, data_types=['anat'])
for fil in files:
    print(fil)

import matplotlib
import matplotlib.pyplot as plt

import nibabel as nib

import dipy.reconst.dti as dti
import dipy.core.gradients as grad
gtab = grad.gradient_table(files[0], files[1])
data = nib.load(files[2]).get_data()
model = dti.TensorModel(gtab)

fit = model.fit(data)
plt.matshow(fit.fa[..., fit.fa.shape[-1]//2], cmap=matplotlib.cm.bone)
plt.show()
github AthenaEPI / dmipy / dmipy / tissue_response / white_matter_response.py View on Github external
if data.ndim == 4:
        # calculate brain mask on 4D data (x, y, z, DWI)
        b0_mask, mask = median_otsu(
            input_volume=data,
            vol_idx=np.where(acquisition_scheme.b0_mask)[0],
            median_radius=4, numpass=4)  # based on dipy default
        # needs to be eroded 3 times.
        mask_eroded = binary_erosion(mask, iterations=3)
        data_to_fit = data[mask_eroded]
    else:
        # can't calculate brain mask on other than 4D data.
        # assume the data was prepared.
        data_to_fit = data.reshape([-1, data_shape[-1]])

    gtab = gtab_dmipy2dipy(acquisition_scheme)
    tenmod = dti.TensorModel(gtab)
    tenfit = tenmod.fit(data_to_fit)
    fa = tenfit.fa

    # selected based on FA
    selected_indices = np.argsort(fa)[-N_candidate_voxels:]
    selected_data = data_to_fit[selected_indices]
    S0_wm, TR2_wm_model = estimate_TR2_anisotropic_tissue_response_model(
        acquisition_scheme, selected_data)
    return S0_wm, TR2_wm_model, selected_indices
github nipy / dipy / scratch / profile_dti.py View on Github external
To use:

import profile_dti as p
import dipy.reconst.dti as dti
lprun -f dti.restore_fit_tensor -f p.tm.fit_method p.func()

"""

import dipy.core.gradients as grad
import dipy.data as dpd
import dipy.reconst.dti as dti

img, gtab = dpd.read_stanford_hardi()
dd = img.get_data()

tm = dti.TensorModel(gtab)
tf = tm.fit(dd)

def func():
    tf.odf(dpd.default_sphere)

if __name__=="__main__":
    func()