How to use the mriqc.__version__ function in mriqc

To help you get started, we’ve selected a few mriqc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github poldracklab / mriqc / mriqc / utils / mriqc_run.py View on Github external
plugin_settings = loadyml(pfile)
    else:
        # Setup multiprocessing
        if settings['n_procs'] == 0:
            settings['n_procs'] = 1
            max_parallel_ants = cpu_count() // settings['ants_nthreads']
            if max_parallel_ants > 1:
                settings['n_procs'] = max_parallel_ants

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}

    MRIQC_LOG.info(
        'Running MRIQC-%s (analysis_level=%s, participant_label=%s)\n\tSettings=%s',
        __version__, opts.analysis_level, opts.participant_label, settings)

    # Set up participant level
    if opts.analysis_level == 'participant':
        for qctype in opts.data_type:
            ms_func = getattr(mwc, 'ms_' + qctype)
            workflow = ms_func(subject_id=opts.participant_label, session_id=opts.session_id,
                               run_id=opts.run_id, settings=settings)
            if workflow is None:
                MRIQC_LOG.warn(
                    '%s QC workflow - no scans were found for the given inputs',
                    'Anatomical' if qctype[:4] == 'anat' else 'Functional')
                continue

            workflow.base_dir = settings['work_dir']
            if settings.get('write_graph', False):
                workflow.write_graph()
github poldracklab / mriqc / mriqc / config.py View on Github external
import os
    import sys

    from uuid import uuid4
    from pathlib import Path
    from time import strftime
    from nipype import __version__ as _nipype_ver
    from templateflow import __version__ as _tf_ver
    from . import __version__

if not hasattr(sys, "_is_pytest_session"):
    sys._is_pytest_session = False  # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
    (
        "+" in __version__,
        __version__.endswith(".dirty"),
        os.getenv("MRIQC_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
    )
):
    os.environ["PYTHONWARNINGS"] = "ignore"

logging.addLevelName(25, "IMPORTANT")  # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE")  # Add a new level between INFO and DEBUG

DEFAULT_MEMORY_MIN_GB = 0.01
DSA_MESSAGE = """\
IMPORTANT: Anonymized quality metrics (IQMs) will be submitted to MRIQC's metrics \
repository. \
Submission of IQMs can be disabled using the ``--no-sub`` argument. \
Please visit https://mriqc.readthedocs.io/en/latest/dsa.html to revise MRIQC's \
Data Sharing Agreement."""
github poldracklab / mriqc / mriqc / bin / mriqc_clf.py View on Github external
from ..classifier.helper import CVHelper

    warnings.showwarning = warn_redirect

    opts = get_parser().parse_args()

    log_level = int(max(3 - opts.verbose_count, 0) * 10)
    if opts.verbose_count > 1:
        log_level = int(max(25 - 5 * opts.verbose_count, 1))

    LOGGER.setLevel(log_level)

    base_name = "mclf_run-%s_mod-%s_ver-%s_class-%d_cv-%s" % (
        datetime.now().strftime("%Y%m%d-%H%M%S"),
        opts.model,
        re.sub(r"[\+_@]", ".", __version__),
        3 if opts.multiclass else 2,
        opts.cv,
    )

    if opts.nested_cv_kfold:
        base_name += "_ncv-kfold"
    elif opts.nested_cv:
        base_name += "_ncv-loso"

    if opts.log_file is None or len(opts.log_file) > 0:
        log_file = opts.log_file if opts.log_file else base_name + ".log"
        fhl = logging.FileHandler(log_file)
        fhl.setFormatter(fmt=logging.Formatter(LOG_FORMAT))
        fhl.setLevel(log_level)
        LOGGER.addHandler(fhl)
github poldracklab / mriqc / mriqc / bin / mriqc_run.py View on Github external
log_level = int(max(25 - 5 * opts.verbose_count, 1))

    # Set logging level
    logging.getLogger('mriqc').setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    logger = logging.getLogger('mriqc')
    INIT_MSG = """
    Running MRIQC version {version}:
      * BIDS dataset path: {bids_dir}.
      * Output folder: {output_dir}.
      * Analysis levels: {levels}.
    """.format(
        version=__version__,
        bids_dir=opts.bids_dir.expanduser().resolve(),
        output_dir=opts.output_dir.expanduser().resolve(),
        levels=', '.join(reversed(list(analysis_levels)))
    )
    logger.log(25, INIT_MSG)

    # Set up participant level
    if 'participant' in analysis_levels:
        logger.info('Participant level started. Checking BIDS dataset...')

        # Call build_workflow(opts, retval)
        with Manager() as mgr:
            retval = mgr.dict()
            p = Process(target=init_mriqc, args=(opts, retval))
            p.start()
            p.join()
github poldracklab / mriqc / mriqc / bin / subject_wrangler.py View on Github external
"""Entry point"""
    parser = ArgumentParser(
        formatter_class=RawTextHelpFormatter,
        description=dedent(
            """\
BIDS-Apps participants wrangler tool
------------------------------------

This command arranges the participant labels in groups for computation, and checks that the \
requested participants have the corresponding folder in the bids_dir.\
"""
        ),
    )

    parser.add_argument(
        "-v", "--version", action="version", version="mriqc v{}".format(__version__)
    )

    parser.add_argument(
        "bids_dir",
        action="store",
        help="The directory with the input dataset "
        "formatted according to the BIDS standard.",
    )
    parser.add_argument(
        "output_dir",
        action="store",
        help="The directory where the output files "
        "should be stored. If you are running group level analysis "
        "this folder should be prepopulated with the results of the"
        "participant level analysis.",
    )
github poldracklab / mriqc / mriqc / bin / mriqc_run.py View on Github external
if settings['n_procs'] > 1:
                # always leave one extra thread for non ANTs work,
                # don't use more than 8 threads - the speed up is minimal
                settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)
            else:
                settings['ants_nthreads'] = 1

        if settings['n_procs'] > 1:
            plugin_settings['plugin'] = 'MultiProc'
            plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}
            if opts.mem_gb:
                plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    MRIQC_LOG.info(
        'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
        __version__, ', '.join(analysis_levels), opts.participant_label, settings)

    # Process data types
    modalities = opts.modalities

    dataset = collect_bids_data(settings['bids_dir'],
                                participant_label=opts.participant_label)

    # Set up participant level
    if 'participant' in analysis_levels:
        workflow = Workflow(name='workflow_enumerator')
        workflow.base_dir = settings['work_dir']

        wf_list = []
        for mod in modalities:
            if not dataset[mod]:
                MRIQC_LOG.warn('No %s scans were found in %s', mod, settings['bids_dir'])
github poldracklab / mriqc / mriqc / bin / mriqc_run.py View on Github external
help='The directory with the input dataset '
                             'formatted according to the BIDS standard.')
    parser.add_argument('output_dir', action='store', type=Path,
                        help='The directory where the output files '
                             'should be stored. If you are running group level analysis '
                             'this folder should be prepopulated with the results of the'
                             'participant level analysis.')
    parser.add_argument('analysis_level', action='store', nargs='+',
                        help='Level of the analysis that will be performed. '
                             'Multiple participant level analyses can be run independently '
                             '(in parallel) using the same output_dir.',
                        choices=['participant', 'group'])

    # optional arguments
    parser.add_argument('--version', action='version',
                        version='mriqc v{}'.format(__version__))

    # BIDS selectors
    g_bids = parser.add_argument_group('Options for filtering the input BIDS dataset')
    g_bids.add_argument('--participant_label', '--participant-label', action='store', nargs='*',
                        help='one or more participant identifiers (the sub- prefix can be '
                             'removed)')
    g_bids.add_argument('--session-id', action='store', nargs='*', type=str,
                        help='filter input dataset by session id')
    g_bids.add_argument('--run-id', action='store', type=int, nargs='*',
                        help='filter input dataset by run id '
                             '(only integer run ids are valid)')
    g_bids.add_argument('--task-id', action='store', nargs='*', type=str,
                        help='filter input dataset by task id')
    g_bids.add_argument('-m', '--modalities', action='store', nargs='*',
                        help='filter input dataset by MRI type')
    g_bids.add_argument('--dsname', type=str, help='a dataset name')
github poldracklab / mriqc / mriqc / cli / version.py View on Github external
# https://raw.githubusercontent.com/poldracklab/mriqc/master/.versions.json
    flagged = tuple()
    try:
        response = requests.get(
            url="""\
https://raw.githubusercontent.com/poldracklab/mriqc/master/.versions.json""",
            timeout=1.0,
        )
    except Exception:
        response = None

    if response and response.status_code == 200:
        flagged = response.json().get("flagged", {}) or {}

    if __version__ in flagged:
        return True, flagged[__version__]

    return False, None
github poldracklab / mriqc / mriqc / config.py View on Github external
import sys

    from uuid import uuid4
    from pathlib import Path
    from time import strftime
    from nipype import __version__ as _nipype_ver
    from templateflow import __version__ as _tf_ver
    from . import __version__

if not hasattr(sys, "_is_pytest_session"):
    sys._is_pytest_session = False  # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
    (
        "+" in __version__,
        __version__.endswith(".dirty"),
        os.getenv("MRIQC_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
    )
):
    os.environ["PYTHONWARNINGS"] = "ignore"

logging.addLevelName(25, "IMPORTANT")  # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE")  # Add a new level between INFO and DEBUG

DEFAULT_MEMORY_MIN_GB = 0.01
DSA_MESSAGE = """\
IMPORTANT: Anonymized quality metrics (IQMs) will be submitted to MRIQC's metrics \
repository. \
Submission of IQMs can be disabled using the ``--no-sub`` argument. \
Please visit https://mriqc.readthedocs.io/en/latest/dsa.html to revise MRIQC's \
Data Sharing Agreement."""