How to use the nimare.io.convert_sleuth_to_dataset function in NiMARE

To help you get started, we’ve selected a few NiMARE examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github neurostuff / NiMARE / nimare / workflows / ale.py View on Github external
LGR.info('Performing meta-analysis...')
        results = ale.fit(dset)
        corr = FWECorrector(method='permutation', n_iters=n_iters,
                            voxel_thresh=v_thr, n_cores=n_cores)
        cres = corr.transform(results)

        boilerplate = boilerplate.format(
            n_exps=len(dset.ids),
            n_subs=n_subs,
            n_foci=dset.coordinates.shape[0],
            unc=v_thr,
            n_iters=n_iters,
            fwhm_str=fwhm_str)
    else:
        dset1 = convert_sleuth_to_dataset(sleuth_file, target='ale_2mm')
        dset2 = convert_sleuth_to_dataset(sleuth_file2, target='ale_2mm')
        n_subs1 = dset1.coordinates.drop_duplicates('id')['n'].astype(float).astype(int).sum()
        n_subs2 = dset2.coordinates.drop_duplicates('id')['n'].astype(float).astype(int).sum()

        boilerplate = """
Activation likelihood estimation (ALE; Turkeltaub, Eden, Jones, & Zeffiro,
2002; Eickhoff, Bzdok, Laird, Kurth, & Fox, 2012; Turkeltaub et al., 2012)
meta-analyses were performed using NiMARE for each of two datasets.
The first input dataset included {n_foci1} foci from {n_subs1} participants
across {n_exps1} studies/experiments. The second input dataset included
{n_foci2} foci from {n_subs2} participants across {n_exps2} studies/experiments.

Foci were convolved with Gaussian kernels {fwhm_str},
implemented on the MNI 152 template (Fonov et al., 2009; Fonov et al., 2011)
at 2x2x2mm resolution.
github neurostuff / NiMARE / nimare / workflows / ale.py View on Github external
LGR.info('Performing meta-analysis...')
        results = ale.fit(dset)
        corr = FWECorrector(method='permutation', n_iters=n_iters,
                            voxel_thresh=v_thr, n_cores=n_cores)
        cres = corr.transform(results)

        boilerplate = boilerplate.format(
            n_exps=len(dset.ids),
            n_subs=n_subs,
            n_foci=dset.coordinates.shape[0],
            unc=v_thr,
            n_iters=n_iters,
            fwhm_str=fwhm_str)
    else:
        dset1 = convert_sleuth_to_dataset(sleuth_file, target='ale_2mm')
        dset2 = convert_sleuth_to_dataset(sleuth_file2, target='ale_2mm')
        n_subs1 = dset1.coordinates.drop_duplicates('id')['n'].astype(float).astype(int).sum()
        n_subs2 = dset2.coordinates.drop_duplicates('id')['n'].astype(float).astype(int).sum()

        boilerplate = """
Activation likelihood estimation (ALE; Turkeltaub, Eden, Jones, & Zeffiro,
2002; Eickhoff, Bzdok, Laird, Kurth, & Fox, 2012; Turkeltaub et al., 2012)
meta-analyses were performed using NiMARE for each of two datasets.
The first input dataset included {n_foci1} foci from {n_subs1} participants
across {n_exps1} studies/experiments. The second input dataset included
{n_foci2} foci from {n_subs2} participants across {n_exps2} studies/experiments.

Foci were convolved with Gaussian kernels {fwhm_str},
implemented on the MNI 152 template (Fonov et al., 2009; Fonov et al., 2011)
at 2x2x2mm resolution.

-> If the cluster-level FWE-corrected results were used, include the following:
github neurostuff / NiMARE / nimare / workflows / cluster.py View on Github external
sigma = 0.0
        for x in X:
            p = len(x) / n
            for y in Y:
                q = len(y) / n
                r = len(set(x) & set(y)) / n
                if r > 0.0:
                    sigma += r * (log(r / p, 2) + log(r / q, 2))
        return abs(sigma)
    # template_file = get_template(space='mni152_1mm', mask=None)
    if dataset_file.endswith('.json'):
        db = dataset_file  # how do I read in a generic dataset_file file? do I need options for source type?
        ids = db.ids
        dset = db.get_dataset(ids, target='mni152_2mm')
    elif dataset_file.endswith('.txt'):
        db = convert_sleuth_to_dataset(dataset_file)
        dset = db.get_dataset(target='mni152_2mm')
    else:
        raise ValueError("You've provided a dataset file that metacluster "
                         "can't read. :(")
    # imgs = dset.images
    if coord:
        if kernel == 'ALEKernel':
            kern = ALEKernel(dset.coordinates, 'template_img')
        elif kernel == 'MKDAKernel':
            kern = MKDAKernel(dset.coordinates, 'template_img')
        elif kernel == 'KDAKernel':
            kern = KDAKernel(dset.coordinates, 'template_img')
        elif kernel == 'Peaks2MapsKernel':
            kern = Peaks2MapsKernel(dset.coordinates, 'template_img')
        imgs = kern.transform(dset.ids)
    imgs_arr = []
github neurostuff / NiMARE / nimare / workflows / scale.py View on Github external
def scale_workflow(dataset_file, baseline, output_dir=None, prefix=None,
                   n_iters=2500, v_thr=0.001):
    """
    Perform SCALE meta-analysis from Sleuth text file or NiMARE json file.

    Warnings
    --------
    This method is not yet implemented.
    """
    if dataset_file.endswith('.json'):
        dset = Dataset(dataset_file, target='mni152_2mm')
    if dataset_file.endswith('.txt'):
        dset = convert_sleuth_to_dataset(dataset_file, target='mni152_2mm')

    boilerplate = """
A specific coactivation likelihood estimation (SCALE; Langner et al., 2014)
meta-analysis was performed using NiMARE. The input dataset included {n}
studies/experiments.

Voxel-specific null distributions were generated using base rates from {bl}
with {n_iters} iterations. Results were thresholded at p < {thr}.

References
----------
- Langner, R., Rottschy, C., Laird, A. R., Fox, P. T., & Eickhoff, S. B. (2014).
Meta-analytic connectivity modeling revisited: controlling for activation base
rates. NeuroImage, 99, 559-570.
    """
    boilerplate = boilerplate.format(
github neurostuff / NiMARE / nimare / workflows / peaks2maps.py View on Github external
def peaks2maps_workflow(sleuth_file, output_dir=None, prefix=None, n_iters=10000):
    """
    """
    LGR.info('Loading coordinates...')
    dset = convert_sleuth_to_dataset(sleuth_file)

    LGR.info('Reconstructing unthresholded maps...')
    k = Peaks2MapsKernel(resample_to_mask=False)
    imgs = k.transform(dset, masked=False)

    mask_img = resample_to_img(dset.mask, imgs[0], interpolation='nearest')
    z_data = apply_mask(imgs, mask_img)

    LGR.info('Estimating the null distribution...')
    res = rfx_glm(z_data, null='empirical', n_iters=n_iters)
    res = MetaResult('rfx_glm', maps=res, mask=mask_img)

    if output_dir is None:
        output_dir = os.path.dirname(os.path.abspath(sleuth_file))
    else:
        pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
github neurostuff / NiMARE / nimare / workflows / ale.py View on Github external
def ale_sleuth_workflow(sleuth_file, sleuth_file2=None, output_dir=None,
                        prefix=None, n_iters=10000, v_thr=0.001,
                        fwhm=None, n_cores=-1):
    """
    Perform ALE meta-analysis from Sleuth text file.
    """
    LGR.info('Loading coordinates...')

    if fwhm:
        fwhm_str = 'of {0} mm'.format(fwhm)
    else:
        fwhm_str = 'determined by sample size'

    if not sleuth_file2:
        dset = convert_sleuth_to_dataset(sleuth_file, target='ale_2mm')
        n_subs = dset.coordinates.drop_duplicates('id')['n'].astype(float).astype(int).sum()

        boilerplate = """
An activation likelihood estimation (ALE; Turkeltaub, Eden, Jones, & Zeffiro,
2002; Eickhoff, Bzdok, Laird, Kurth, & Fox, 2012; Turkeltaub et al., 2012)
meta-analysis was performed using NiMARE. The input dataset included {n_foci}
foci from {n_subs} participants across {n_exps} studies/experiments.

Modeled activation maps were generated for each study/experiment by convolving
each focus with a Gaussian kernel {fwhm_str}.
For voxels with overlapping kernels, the maximum value was retained.
The modeled activation maps were rendered in MNI 152 space (Fonov et al., 2009;
Fonov et al., 2011) at 2x2x2mm resolution. A map of ALE values was then
computed for the sample as the union of modeled activation values across
studies/experiments. Voxelwise statistical significance was determined based on
an analytically derived null distribution using the method described in