How to use tedana - 10 common examples

To help you get started, we’ve selected a few tedana examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ME-ICA / tedana / tedana / selection / tedpca.py View on Github external
eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
                                return_val=True)

    diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
    lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
        kappa_thr = kappa_lim[getelbow(kappa_lim)]

        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
        stabilize = True
        LGR.info('kdaw set to -1. Switching TEDPCA algorithm to '
                 'kundu-stabilize')
    elif int(rdaw) == -1:
        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
    else:
        kappa_thr = np.average(
github ME-ICA / tedana / tedana / io.py View on Github external
Output data where `S` is samples, `E` is echos, and `T` is time
    ref_img : :obj:`str` or :obj:`numpy.ndarray`
        Filepath to reference image for saving output files or NIFTI-like array
    """
    if n_echos is None:
        raise ValueError('Number of echos must be specified. '
                         'Confirm that TE times are provided with the `-e` argument.')

    if isinstance(data, list):
        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError('Cannot run `tedana` with only two echos: '
                             '{}'.format(data))
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([utils.load_image(f) for f in data], axis=1)
            ref_img = check_niimg(data[0])
            ref_img.header.extensions = []
            return np.atleast_3d(fdata), ref_img

    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = utils.load_image(img.get_data().reshape(nx, ny, nz, n_echos, -1, order='F'))
    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz, 1)), affine=img.affine,
                            header=img.header, extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
github ME-ICA / tedana / tedana / decomposition / eigendecomp.py View on Github external
"""
    eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
                                return_val=True)

    diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
    lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = utils.getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
        kappa_thr = kappa_lim[getelbow(kappa_lim)]

        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
        stabilize = True
        LGR.info('kdaw set to -1. Switching TEDPCA method to '
                 'kundu-stabilize')
    elif int(rdaw) == -1:
        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
    else:
        kappa_thr = np.average(
github ME-ICA / tedana / tedana / selection / tedpca.py View on Github external
or -1 (a special value). Default is 1.
    stabilize : :obj:`bool`, optional
        Whether to stabilize convergence by reducing dimensionality, for low
        quality data. Default is False.

    Returns
    -------
    comptable : :obj:`pandas.DataFrame`
        Component table with components classified as 'accepted', 'rejected',
        or 'ignored'.
    """
    LGR.info('Performing PCA component selection with Kundu decision tree')
    comptable['classification'] = 'accepted'
    comptable['rationale'] = ''

    eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
                                return_val=True)

    diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
    lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
github ME-ICA / tedana / tedana / decomposition / eigendecomp.py View on Github external
Kappa dimensionality augmentation weight. Must be a non-negative float,
        or -1 (a special value).
    rdaw : :obj:`float`
        Rho dimensionality augmentation weight. Must be a non-negative float,
        or -1 (a special value).
    stabilize : :obj:`bool`, optional
        Whether to stabilize convergence by reducing dimensionality, for low
        quality data. Default is False.

    Returns
    -------
    comptable : :obj:`pandas.DataFrame`
        Component table with components classified as 'accepted', 'rejected',
        or 'ignored'.
    """
    eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
                                return_val=True)

    diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
    lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = utils.getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
github ME-ICA / tedana / tedana / selection / tedpca.py View on Github external
lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
        kappa_thr = kappa_lim[getelbow(kappa_lim)]

        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
        stabilize = True
        LGR.info('kdaw set to -1. Switching TEDPCA algorithm to '
                 'kundu-stabilize')
    elif int(rdaw) == -1:
        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
    else:
        kappa_thr = np.average(
            sorted([fmin, (getelbow(comptable['kappa'], return_val=True) / 2), fmid]),
            weights=[kdaw, 1, 1])
        rho_thr = np.average(
            sorted([fmin, (getelbow_cons(comptable['rho'], return_val=True) / 2), fmid]),
            weights=[rdaw, 1, 1])
github ME-ICA / tedana / tedana / decomposition / eigendecomp.py View on Github external
lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = utils.getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
        kappa_thr = kappa_lim[getelbow(kappa_lim)]

        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
        stabilize = True
        LGR.info('kdaw set to -1. Switching TEDPCA method to '
                 'kundu-stabilize')
    elif int(rdaw) == -1:
        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
    else:
        kappa_thr = np.average(
            sorted([fmin, (getelbow(comptable['kappa'], return_val=True) / 2), fmid]),
            weights=[kdaw, 1, 1])
        rho_thr = np.average(
            sorted([fmin, (getelbow_cons(comptable['rho'], return_val=True) / 2), fmid]),
            weights=[rdaw, 1, 1])
github ME-ICA / tedana / tedana / io.py View on Github external
if isinstance(data, list):
        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError('Cannot run `tedana` with only two echos: '
                             '{}'.format(data))
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([utils.load_image(f) for f in data], axis=1)
            ref_img = check_niimg(data[0])
            ref_img.header.extensions = []
            return np.atleast_3d(fdata), ref_img

    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = utils.load_image(img.get_data().reshape(nx, ny, nz, n_echos, -1, order='F'))
    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz, 1)), affine=img.affine,
                            header=img.header, extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
github ME-ICA / tedana / tedana / io.py View on Github external
Output data where `S` is samples, `E` is echos, and `T` is time
    ref_img : :obj:`str` or :obj:`numpy.ndarray`
        Filepath to reference image for saving output files or NIFTI-like array
    """
    if n_echos is None:
        raise ValueError('Number of echos must be specified. '
                         'Confirm that TE times are provided with the `-e` argument.')

    if isinstance(data, list):
        if len(data) == 1:  # a z-concatenated file was provided
            data = data[0]
        elif len(data) == 2:  # inviable -- need more than 2 echos
            raise ValueError('Cannot run `tedana` with only two echos: '
                             '{}'.format(data))
        else:  # individual echo files were provided (surface or volumetric)
            fdata = np.stack([utils.load_image(f) for f in data], axis=1)
            ref_img = check_niimg(data[0])
            ref_img.header.extensions = []
            return np.atleast_3d(fdata), ref_img

    img = check_niimg(data)
    (nx, ny), nz = img.shape[:2], img.shape[2] // n_echos
    fdata = utils.load_image(img.get_data().reshape(nx, ny, nz, n_echos, -1, order='F'))

    # create reference image
    ref_img = img.__class__(np.zeros((nx, ny, nz)), affine=img.affine,
                            header=img.header, extra=img.extra)
    ref_img.header.extensions = []
    ref_img.header.set_sform(ref_img.header.get_sform(), code=1)

    return fdata, ref_img
github ME-ICA / tedana / tedana / decomposition / eigendecomp.py View on Github external
Component table with components classified as 'accepted', 'rejected',
        or 'ignored'.
    """
    eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
                                return_val=True)

    diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
    lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = utils.getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
        kappa_thr = kappa_lim[getelbow(kappa_lim)]

        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
        stabilize = True
        LGR.info('kdaw set to -1. Switching TEDPCA method to '
                 'kundu-stabilize')
    elif int(rdaw) == -1:
        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]