How to use the clinica.pipelines.engine.Pipeline function in clinica

To help you get started, we’ve selected a few clinica examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aramis-lab / clinica / clinica / pipelines / t1_volume_existing_dartel / t1_volume_existing_dartel_pipeline.py View on Github external
# coding: utf8

import clinica.pipelines.engine as cpe

__author__ = "Jorge Samper-Gonzalez"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Jorge Samper-Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Jorge Samper-Gonzalez"
__email__ = "jorge.samper-gonzalez@inria.fr"
__status__ = "Development"


class T1VolumeExistingDartel(cpe.Pipeline):
    """T1VolumeExistingDartel - Reuse existing Dartel template.

    Args:
        input_dir: A BIDS directory.
        output_dir: An empty output directory where CAPS structured data will be written.
        subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).

    Returns:
        A clinica pipeline object containing the T1VolumeExistingDartel pipeline.
    """
    def __init__(self,
                 bids_directory=None,
                 caps_directory=None,
                 tsv_file=None,
                 base_dir=None,
                 name=None,
github aramis-lab / clinica / clinica / pipelines / statistics_surface / statistics_surface_pipeline.py View on Github external
# coding: utf8

import clinica.pipelines.engine as cpe

__author__ = "Junhao Wen"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Junhao Wen"
__email__ = "Junhao.Wen@inria.fr"
__status__ = "Development"


class StatisticsSurface(cpe.Pipeline):
    """
        Based on the Matlab toolbox [SurfStat](http://www.math.mcgill.ca/keith/surfstat/), which performs statistical
        analyses of univariate and multivariate surface and volumetric data using the generalized linear model (GLM),
        this pipelines performs analyses including group comparison and correlation with the surface-based features.
        Currently, this pipelines fits the normalised cortical thickness on FsAverage from `t1-freesurfer` pipelines.
        New features will be added in the future.


    Args:
        caps_directory: str, the output folder of recon-all which will contain the result files: ?h.thickness.fwhm**.mgh.
        tsv_file: str, Path to the tsv containing the information for GLM.
        design_matrix: str, the linear model that fits into the GLM, for example '1+group'.
        contrast: string, the contrast matrix for GLM, if the factor you choose is categorized variable, clinica_surfstat will create two contrasts,
                  for example, contrast = 'Label', this will create contrastpos = Label.AD - Label.CN, contrastneg = Label.CN - Label.AD; if the fac-
                  tory that you choose is a continuous factor, clinica_surfstat will just create one contrast, for example, contrast = 'Age', but note,
                  the string name that you choose should be exactly the same with the columns names in your subjects_visits_tsv.
github aramis-lab / clinica / clinica / pipelines / t1_volume_create_dartel / t1_volume_create_dartel_pipeline.py View on Github external
# coding: utf8

import clinica.pipelines.engine as cpe

__author__ = "Jorge Samper-Gonzalez"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Jorge Samper-Gonzalez"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Jorge Samper-Gonzalez"
__email__ = "jorge.samper-gonzalez@inria.fr"
__status__ = "Development"


class T1VolumeCreateDartel(cpe.Pipeline):
    """T1VolumeCreateDartel - Create new Dartel template.

    Args:
        input_dir: A BIDS directory.
        output_dir: An empty output directory where CAPS structured data will be written.
        subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).

    Returns:
        A clinica pipeline object containing the T1VolumeCreateDartel pipeline.
    """
    def __init__(self,
                 bids_directory=None,
                 caps_directory=None,
                 tsv_file=None,
                 base_dir=None,
                 name=None,
github aramis-lab / clinica / clinica / pipelines / machine_learning_spatial_svm / spatial_svm_pipeline.py View on Github external
# WARNING: Don't put any import statement here except if it's absolutely
# necessary. Put it *inside* the different methods.
# Otherwise it will slow down the dynamic loading of the pipelines list by the
# command line tool.
import clinica.pipelines.engine as cpe

__author__ = "Simona Bottani"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Simona Bottani"
__email__ = "simona.bottani@icm-institute.org"
__status__ = "Development"


class SpatialSVM(cpe.Pipeline):
    """SpatialSVM - Prepare input data for SVM with spatial and anatomical regularization.

    Todos:
        - [ ] Final version of CAPS.
        - [ ] Remove --voxel_size flag and detect automatically this parameter.

    Args:
        input_dir: A BIDS directory.
        output_dir: An empty output directory where CAPS structured data will be written.
        subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).

    Returns:
        A clinica pipeline object containing the SpatialSVM pipeline.

    Raises:
    """
github aramis-lab / clinica / clinica / pipelines / dwi_dti / dwi_dti_pipeline.py View on Github external
from nipype import config

__author__ = ["Alexandre Routier", "Thomas Jacquemont"]
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Nipype"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__status__ = "Development"

# Use hash instead of parameters for iterables folder names
# Otherwise path will be too long and generate OSError
cfg = dict(execution={'parameterize_dirs': False})
config.update_config(cfg)


class DwiDti(cpe.Pipeline):
    """DTI-based processing of DWI datasets.

    Args:
        input_dir(str): Input directory in a CAPS hierarchy.
        output_dir(str): Output directory in a CAPS hierarchy.
        subjects_sessions_list(str): The Subjects-Sessions list file (in .tsv
            format).

    Returns:
        A clinica pipeline object containing the DwiDti pipeline.

    Raises:

    """

    def check_custom_dependencies(self): pass
github aramis-lab / clinica / clinica / pipelines / dwi_connectome / dwi_connectome_pipeline.py View on Github external
# coding: utf8

# WARNING: Don't put any import statement here except if it's absolutely
# necessary. Put it *inside* the different methods.
# Otherwise it will slow down the dynamic loading of the pipelines list by the
# command line tool.
import clinica.pipelines.engine as cpe

# Use hash instead of parameters for iterables folder names
from nipype import config
cfg = dict(execution={'parameterize_dirs': False})
config.update_config(cfg)


class DwiConnectome(cpe.Pipeline):
    """Connectome-based processing of corrected DWI datasets.

    Args:
        input_dir: A BIDS directory.
        output_dir: An empty output directory where CAPS structured data will
        be written.
        subjects_sessions_list: The Subjects-Sessions list file (in .tsv
        format).

    Returns:
        A clinica pipeline object containing the DwiConnectome pipeline.
    """

    def check_custom_dependencies(self):
        """Check dependencies that can not be listed in the `info.json` file.
        """
github aramis-lab / clinica / clinica / pipelines / t1_freesurfer_longitudinal / t1_freesurfer_longitudinal_correction_pipeline.py View on Github external
# coding: utf8

__author__ = "Alexis Guyot"
__copyright__ = "Copyright 2016-2019, The Aramis Lab Team"
__credits__ = ["Alexis Guyot"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Alexis Guyot"
__email__ = "alexis.guyot@icm-institute.org"
__status__ = "Development"

import clinica.pipelines.engine as cpe


class T1FreeSurferLongitudinalCorrection(cpe.Pipeline):
    """FreeSurfer Longitudinal correction class

    Creates a pipeline that runs the Freesurfer longitudinal
    (correction) processing module for each subjects in a .tsv-defined
    list of subjects/sessions. This requires a prior run of
    t1-freesurfer on the TSV file, followed by a run of
    t1-freesurfer-template on the same TSV file. For each subject, all the
    timepoints (sessions) are re-processed based on a template computed
    with t1-freesurfer-template for that specific subject.

    Todos: N/A

    Returns:
        A clinica pipeline object containing the T1FreeSurferLongitudinalCorrection pipeline

    Raises:
github aramis-lab / clinica / clinica / pipelines / t1_volume_parcellation / t1_volume_parcellation_pipeline.py View on Github external
# coding: utf8

import clinica.pipelines.engine as cpe

__author__ = "Simona Bottani"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Simona Bottani"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Simona Bottani"
__email__ = "simona.bottani@icm-institute.org"
__status__ = "Development"


class T1VolumeParcellation(cpe.Pipeline):
    """T1VolumeParcellation - Computation of mean GM concentration for a set of regions

    Args:
        input_dir: A BIDS directory.
        output_dir: An empty output directory where CAPS structured data will be written.
        subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).

    Returns:
        A clinica pipeline object containing the T1VolumeParcellation pipeline.
    """
    def check_custom_dependencies(self):
        """Check dependencies that can not be listed in the `info.json` file.
        """
        pass

    def get_input_fields(self):
github aramis-lab / clinica / clinica / pipelines / pet_surface / pet_surface_pipeline.py View on Github external
# coding: utf-8

__author__ = "Arnaud Marcoux"
__copyright__ = "Copyright 2016-2019 The Aramis Lab Team"
__credits__ = ["Arnaud Marcoux", "Michael Bacci"]
__license__ = "See LICENSE.txt file"
__version__ = "1.0.0"
__maintainer__ = "Arnaud Marcoux"
__email__ = "arnaud.marcoux@inria.fr"
__status__ = "Development"

import clinica.pipelines.engine as cpe


class PetSurface(cpe.Pipeline):
    """Project PET signal onto the surface of the cortex.

    Args:
        input_dir: A BIDS directory.
        output_dir: An empty output directory where CAPS structured data will be
            written.
        subjects_sessions_list: The Subjects-Sessions list file (in .tsv
            format).

    Returns:
        A clinica pipeline object containing the PetSurface pipeline.

    """

    def check_custom_dependencies(self):
        """Check dependencies that can not be listed in the `info.json` file.
github aramis-lab / clinica / clinica / pipelines / dwi_preprocessing_noddi / dwi_preprocessing_noddi_pipeline.py View on Github external
# coding: utf8

__author__ = "Junhao Wen"
__copyright__ = "Copyright 2016-2018, The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "Junhao Wen"
__email__ = "Junhao.Wen@inria.fr"
__status__ = "Development"

# command line tool.
import clinica.pipelines.engine as cpe


class DwiPreprocessingNoddi(cpe.Pipeline):
    """dwi_preprocessing_noddi SHORT DESCRIPTION.

    Warnings:
        - A WARNING.

    Todos:
        - [x] A FILLED TODO ITEM.
        - [ ] AN ON-GOING TODO ITEM.

    Args:
        input_dir: A BIDS directory.
        output_dir: An empty output directory where CAPS structured data will be written.
        subjects_sessions_list: The Subjects-Sessions list file (in .tsv format).

    Returns:
        A clinica pipeline object containing the dwi_preprocessing_noddi pipeline.