How to use the tedana.utils function in tedana

To help you get started, we’ve selected a few tedana examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ME-ICA / tedana / tedana / selection / tedpca.py View on Github external
eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
                                return_val=True)

    diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
    lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
    varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
                              diff_varex_norm.min()])
    varex_norm_min = comptable['normalized variance explained'][
        (len(diff_varex_norm) // 2) +
        np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
    varex_norm_cum = np.cumsum(comptable['normalized variance explained'])

    fmin, fmid, fmax = getfbounds(n_echos)
    if int(kdaw) == -1:
        lim_idx = utils.andb([comptable['kappa'] < fmid,
                              comptable['kappa'] > fmin]) == 2
        kappa_lim = comptable.loc[lim_idx, 'kappa'].values
        kappa_thr = kappa_lim[getelbow(kappa_lim)]

        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
        stabilize = True
        LGR.info('kdaw set to -1. Switching TEDPCA algorithm to '
                 'kundu-stabilize')
    elif int(rdaw) == -1:
        lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
        rho_lim = comptable.loc[lim_idx, 'rho'].values
        rho_thr = rho_lim[getelbow(rho_lim)]
    else:
        kappa_thr = np.average(
github ME-ICA / tedana / tedana / selection / select_comps.py View on Github external
comptable['countsigFR2'] = F_R2_clmaps.sum(axis=0)
    comptable['countsigFS0'] = F_S0_clmaps.sum(axis=0)

    """
    Generate Dice values for R2 and S0 models
    - dice_FR2: Dice value of cluster-extent thresholded maps of R2-model betas
      and F-statistics.
    - dice_FS0: Dice value of cluster-extent thresholded maps of S0-model betas
      and F-statistics.
    """
    comptable['dice_FR2'] = np.zeros(all_comps.shape[0])
    comptable['dice_FS0'] = np.zeros(all_comps.shape[0])
    for i_comp in acc:
        comptable.loc[i_comp, 'dice_FR2'] = utils.dice(Br_R2_clmaps[:, i_comp],
                                                       F_R2_clmaps[:, i_comp])
        comptable.loc[i_comp, 'dice_FS0'] = utils.dice(Br_S0_clmaps[:, i_comp],
                                                       F_S0_clmaps[:, i_comp])

    comptable.loc[np.isnan(comptable['dice_FR2']), 'dice_FR2'] = 0
    comptable.loc[np.isnan(comptable['dice_FS0']), 'dice_FS0'] = 0

    """
    Generate three metrics of component noise:
    - countnoise: Number of "noise" voxels (voxels highly weighted for
      component, but not from clusters)
    - signal-noise_t: T-statistic for two-sample t-test of F-statistics from
      "signal" voxels (voxels in clusters) against "noise" voxels (voxels not
      in clusters) for R2 model.
    - signal-noise_p: P-value from t-test.
    """
    comptable['countnoise'] = 0
    comptable['signal-noise_t'] = 0
github ME-ICA / tedana / tedana / utils / io.py View on Github external
# get variance explained by retained components
    betas = model.get_lstsq_coeffs(dmdata.T, mmix, mask=None)
    varexpl = (1 - ((dmdata.T - betas.dot(mmix.T))**2.).sum() /
               (dmdata**2.).sum()) * 100
    LGR.info('Variance explained by ICA decomposition: '
             '{:.02f}%'.format(varexpl))

    # create component and de-noised time series and save to files
    hikts = betas[:, acc].dot(mmix.T[acc, :])
    midkts = betas[:, midk].dot(mmix.T[midk, :])
    lowkts = betas[:, rej].dot(mmix.T[rej, :])
    dnts = data[mask] - lowkts - midkts

    if len(acc) != 0:
        fout = utils.filewrite(utils.unmask(hikts, mask),
                               'hik_ts_{0}'.format(suffix), ref_img)
        LGR.info('Writing high-Kappa time series: {}'.format(op.abspath(fout)))

    if len(midk) != 0:
        fout = utils.filewrite(utils.unmask(midkts, mask),
                               'midk_ts_{0}'.format(suffix), ref_img)
        LGR.info('Writing mid-Kappa time series: {}'.format(op.abspath(fout)))

    if len(rej) != 0:
        fout = utils.filewrite(utils.unmask(lowkts, mask),
                               'lowk_ts_{0}'.format(suffix), ref_img)
        LGR.info('Writing low-Kappa time series: {}'.format(op.abspath(fout)))

    fout = utils.filewrite(utils.unmask(dnts, mask),
                           'dn_ts_{0}'.format(suffix), ref_img)
    LGR.info('Writing denoised time series: {}'.format(op.abspath(fout)))
github ME-ICA / tedana / tedana / stats.py View on Github external
# coerce X to >=2d
    X = np.atleast_2d(X)

    if len(X) == 1:
        X = X.T

    if add_const:  # add intercept, if specified
        X = np.column_stack([X, np.ones((len(X), 1))])

    betas = np.linalg.lstsq(X, mdata, rcond=None)[0].T
    if add_const:  # drop beta for intercept, if specified
        betas = betas[:, :-1]

    if mask is not None:
        betas = utils.unmask(betas, mask)

    return betas