How to use the clinica.utils.stream.cprint function in clinica

To help you get started, we’ve selected a few clinica examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aramis-lab / clinica / clinica / utils / ux.py View on Github external
def print_crash_files_and_exit(log_file, working_directory):
    """Print command(s) to type in order to extract details after a Nipype RuntimeError and exit with an exception."""
    from colorama import Fore
    from .filemanip import extract_crash_files_from_log_file
    from .exceptions import ClinicaException
    from .stream import cprint

    cprint('%s\nError details can be found by opening the crash file(s) with the following command(s):%s' %
           (Fore.YELLOW, Fore.RESET))

    crash_files = extract_crash_files_from_log_file(log_file)
    for file in crash_files:
        cprint('%s- nipypecli crash %s%s' %
               (Fore.YELLOW, file,  Fore.RESET))

    cprint('%s\n'
           'If your pipeline crashed due to lack of space of network issues, '
           're-run the pipeline with the working directory (-wd %s).\n'
           'Known issues are displayed here: http://www.clinica.run/doc/InteractingWithClinica/#known-issues\n'
           'Otherwise, you can delete it.%s' %
           (Fore.YELLOW, working_directory, Fore.RESET))
    # Force the display of "Documentation can be found..."
    raise ClinicaException('')
github aramis-lab / clinica / clinica / pipelines / statistics_surface / statistics_surface_cli.py View on Github external
'group_label': args.group_id,
            'glm_type': args.glm_type,
            'custom_file': args.custom_file,
            'feature_label': args.feature_label,
            'full_width_at_half_maximum': args.full_width_at_half_maximum,
            'threshold_uncorrected_pvalue': args.threshold_uncorrected_pvalue,
            'threshold_corrected_pvalue': args.threshold_corrected_pvalue,
            'cluster_threshold': args.cluster_threshold
        }

        check_inputs(pipeline.caps_directory,
                     pipeline.parameters['custom_file'],
                     pipeline.parameters['full_width_at_half_maximum'],
                     pipeline.tsv_file)

        cprint("Parameters used for this pipeline:")
        cprint(pipeline.parameters)

        if args.n_procs:
            exec_pipeline = pipeline.run(plugin='MultiProc',
                                         plugin_args={'n_procs': args.n_procs})
        else:
            exec_pipeline = pipeline.run()

        if isinstance(exec_pipeline, Graph):
            print_end_pipeline(self.name, pipeline.base_dir, pipeline.base_dir_was_specified)
        else:
            print_crash_files_and_exit(args.logname, pipeline.base_dir)
github aramis-lab / clinica / clinica / iotools / converters / nifd_to_bids / nifd_to_bids.py View on Github external
"clinical_info.tsv will be created from DataDictionary_NIFD_2017.10.18.xlsx")

        else:
            cprint(
                "\nclinical_info.tsv does not exist and DataDictionary_NIFD_2017.10.18.xlsx was not found in the "
                "clinical data directory"
                ", to create it please enter path/to/DataDictionary_NIFD_2017.10.18.xlsx :")
            path_DataDictionary_NIFD_2017 = input()
            path_DataDictionary_NIFD_2017 = path_DataDictionary_NIFD_2017.strip(' ')
        cprint("Creating clinica_info.tsv ...")

        path_clinicals = os.path.join(path_converter, '..', 'clinical_data_bids_correspondence')

        update_info_clinical(path_DataDictionary_NIFD_2017, path_clinicals, path_to_clinical_file, path_to_clinical)
        assert os.path.isfile(path_to_clinical_info), 'Failed to create clinical_info.tsv'
        cprint("clinical_info.tsv successfully created !")

    cprint("Parsing files to be converted...")
    medical_images = get_all_med_name(path_to_dataset)
    assert len(medical_images) > 0, 'The dataset contains no medical image'

    descriptors = get_descriptors(os.path.join(path_converter, 'config_files'))
    assert len(descriptors) > 0, 'Failed to load the descriptors'

    # equivalences['medical_image_name'] = (Descriptor_instance, modalityLabel)
    # ex : equivalences['T1_mprage_S3_DIS3D'] -> (, 'T1w')
    equivalences = dict_conversion(medical_images, descriptors)

    # patients_source_files[subject_ID] = [paths_to_all_medical_images_of_subject]
    # Only contains files that are available in the provided dataset
    patients_source_files = get_patients_source_files(path_to_dataset, path_to_ida)
    patients_source_files = filter_patients_source_files(patients_source_files, path_to_dataset, descriptors)
github aramis-lab / clinica / clinica / utils / filemanip.py View on Github external
if e.errno != errno.EEXIST:  # EEXIST: folder already exists
            raise e

    if out_file:
        tsv_file = os.path.join(out_folder, out_file)
    else:
        tsv_file = os.path.join(out_folder, 'participants.tsv')

    try:
        data = pandas.DataFrame({
            'participant_id': participant_ids,
            'session_id': session_ids,
        })
        data.to_csv(tsv_file, sep='\t', index=False, encoding='utf-8')
    except Exception as e:
        cprint("Impossible to save %s with pandas" % out_file)
        raise e
github aramis-lab / clinica / clinica / iotools / converters / nifd_to_bids / nifd_to_bids.py View on Github external
else:
            cprint(
                "\nclinical_info.tsv does not exist and DataDictionary_NIFD_2017.10.18.xlsx was not found in the "
                "clinical data directory"
                ", to create it please enter path/to/DataDictionary_NIFD_2017.10.18.xlsx :")
            path_DataDictionary_NIFD_2017 = input()
            path_DataDictionary_NIFD_2017 = path_DataDictionary_NIFD_2017.strip(' ')
        cprint("Creating clinica_info.tsv ...")

        path_clinicals = os.path.join(path_converter, '..', 'clinical_data_bids_correspondence')

        update_info_clinical(path_DataDictionary_NIFD_2017, path_clinicals, path_to_clinical_file, path_to_clinical)
        assert os.path.isfile(path_to_clinical_info), 'Failed to create clinical_info.tsv'
        cprint("clinical_info.tsv successfully created !")

    cprint("Parsing files to be converted...")
    medical_images = get_all_med_name(path_to_dataset)
    assert len(medical_images) > 0, 'The dataset contains no medical image'

    descriptors = get_descriptors(os.path.join(path_converter, 'config_files'))
    assert len(descriptors) > 0, 'Failed to load the descriptors'

    # equivalences['medical_image_name'] = (Descriptor_instance, modalityLabel)
    # ex : equivalences['T1_mprage_S3_DIS3D'] -> (, 'T1w')
    equivalences = dict_conversion(medical_images, descriptors)

    # patients_source_files[subject_ID] = [paths_to_all_medical_images_of_subject]
    # Only contains files that are available in the provided dataset
    patients_source_files = get_patients_source_files(path_to_dataset, path_to_ida)
    patients_source_files = filter_patients_source_files(patients_source_files, path_to_dataset, descriptors)
    patients_source_files = {pat: patients_source_files[pat] for pat in patients_source_files if
                             patients_source_files[pat] != []}
github aramis-lab / clinica / clinica / iotools / converters / adni_to_bids / adni_modalities / adni_dwi.py View on Github external
import pandas as pd
    from os import path
    from clinica.utils.stream import cprint
    from colorama import Fore

    if subjs_list is None:
        adni_merge_path = path.join(csv_dir, 'ADNIMERGE.csv')
        adni_merge = pd.read_csv(adni_merge_path, sep=',', low_memory=False)
        subjs_list = list(adni_merge.PTID.unique())

    cprint('Calculating paths of DWI images. Output will be stored in ' + path.join(dest_dir, 'conversion_info') + '.')
    images = compute_dwi_paths(source_dir, csv_dir, dest_dir, subjs_list)
    cprint('Paths of DWI images found. Exporting images into BIDS ...')
    dwi_paths_to_bids(images, dest_dir)
    cprint(Fore.GREEN + 'DWI conversion done.' + Fore.RESET)
github aramis-lab / clinica / clinica / iotools / converters / adni_to_bids / adni_modalities / adni_t1.py View on Github external
# If image is too close to the date between two visits we prefer the earlier visit
            if (datetime.strptime(min_visit.EXAMDATE, "%Y-%m-%d")
                    > datetime.strptime(image.ScanDate, "%Y-%m-%d")
                    > datetime.strptime(min_visit2.EXAMDATE, "%Y-%m-%d")):
                dif = days_between(min_visit.EXAMDATE, min_visit2.EXAMDATE)
                if abs((dif / 2.0) - min_db) < 30:
                    min_visit = min_visit2

            cprint('We prefer ' + min_visit.VISCODE)

        key_min_visit = (min_visit.VISCODE, min_visit.COLPROT, min_visit.ORIGPROT)
        if key_min_visit not in visits.keys():
            visits[key_min_visit] = image.Visit
        elif visits[key_min_visit] != image.Visit:
            cprint('[T1] Subject ' + subject + ' has multiple visits for one timepoint.')
            # cprint(key_min_visit)
            # cprint(visits[key_min_visit])
            # cprint(image.Visit)

    return visits
github aramis-lab / clinica / clinica / iotools / converters / adni_to_bids / adni_modalities / adni_t1.py View on Github external
if not qc_passed:
        if scan.Sequence == 'MP-RAGE':
            original_img_seq = 'MPR'
        else:  # 'MP-RAGE REPEAT'
            original_img_seq = 'MPR-R'

        processing_seq = qc_prev_sequence[qc_prev_sequence.find(';'):qc_prev_sequence.find('Scaled') - 2]
        sequence = original_img_seq + processing_seq
        # print sequence

    qc = mri_quality_subj[mri_quality_subj.LONIUID == 'S' + str(scan.SeriesID)]
    if qc.shape[0] > 0 and qc.iloc[0].PASS != 1:
        # TODO - LOG THIS
        cprint('QC found but NOT passed')
        cprint('Subject ' + subject_id + ' - Series: ' + str(scan.SeriesID) + ' - Study: ' + str(scan.StudyID))
        return False

    return True
github aramis-lab / clinica / clinica / iotools / converters / adni_to_bids / adni_modalities / adni_t1.py View on Github external
def adni3_image(subject_id, timepoint, visit_str, mprage_meta_subj, mayo_mri_qc_subj):

    from clinica.iotools.converters.adni_to_bids.adni_utils import replace_sequence_chars
    from clinica.utils.stream import cprint

    filtered_scan = mprage_meta_subj[(mprage_meta_subj['Orig/Proc'] == 'Original')
                                     & (mprage_meta_subj.Visit == visit_str)
                                     & mprage_meta_subj.Sequence.map(
        lambda x: (x.lower().find('accel') > -1) & ~(x.lower().endswith('_ND')))]

    if filtered_scan.shape[0] < 1:
        # TODO - LOG THIS
        cprint('NO MPRAGE Meta for ADNI3: ' + subject_id + ' for visit ' + timepoint + ' - ' + visit_str)
        return None

    scan = select_scan_qc_adni2(filtered_scan, mayo_mri_qc_subj, preferred_field_strength=3.0)
    sequence = replace_sequence_chars(scan.Sequence)

    return {'Subject_ID': subject_id,
            'VISCODE': timepoint,
            'Visit': visit_str,
            'Sequence': sequence,
            'Scan_Date': scan.ScanDate,
            'Study_ID': str(scan.StudyID),
            'Series_ID': str(scan.SeriesID),
            'Image_ID': str(scan.ImageUID),
            'Field_Strength': scan.MagStrength,
            'Original': True}
github aramis-lab / clinica / clinica / pipelines / t1_volume_tissue_segmentation / t1_volume_tissue_segmentation_utils.py View on Github external
def print_end_pipeline(subject_id, final_file):
    """
    Display end message for  when  is connected.
    """
    import datetime
    from colorama import Fore
    from clinica.utils.stream import cprint

    now = datetime.datetime.now().strftime('%H:%M:%S')
    cprint('%s[%s]%s ...%s has completed.' % (
        Fore.GREEN, now, Fore.RESET, subject_id.replace('_', '|')))