How to use the nibabel.as_closest_canonical function in nibabel

To help you get started, we’ve selected a few nibabel examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github trislett / TFCE_mediation / tfce_mediation / misc_scripts / fast_bet.py View on Github external
def run(opts):

	img = nib.as_closest_canonical(nib.load(opts.image[0])) # for pesky LR flipping
	data = img.get_data()
	hdr = img.get_header()
	low_threshold, _ = autothreshold(data, threshold_type = opts.thresholdalgorithm)
	print(low_threshold)
	mask = np.zeros_like(data)
	mask[:] = data
	mask[mask < low_threshold] = 0
#	mask[mask != 0] = 1
	nib.save(nib.Nifti1Image(mask.astype(np.float32, order = "C"),affine=img.affine),'temp.nii.gz')
	os.system(os.environ["FSLDIR"] + "/bin/bet temp.nii.gz temp_brain.nii.gz -m -f 0.3")
	betmask = nib.as_closest_canonical(nib.load('temp_brain_mask.nii.gz')).get_data()
	data[betmask!=1] = 0
	if opts.output:
		nib.save(nib.Nifti1Image(data.astype(np.float32, order = "C"),affine=img.affine), opts.output[0])
	elif opts.replace:
		base, name = os.path.split(opts.replace[0])
github dPys / PyNets / pynets / registration / reg_utils.py View on Github external
Returns
    -------
    out_fname : str
        File path to the reoriented dwi Nifti1Image.
    out_bvec_fname : str
        File path to corresponding reoriented bvecs file.
    """
    from pynets.registration.reg_utils import normalize_xform
    fname = dwi_prep
    bvec_fname = bvecs
    out_bvec_fname = "%s%s" % (out_dir, '/bvecs_reor.bvec')

    input_img = nib.load(fname)
    input_axcodes = nib.aff2axcodes(input_img.affine)
    reoriented = nib.as_closest_canonical(input_img)
    normalized = normalize_xform(reoriented)
    # Is the input image oriented how we want?
    new_axcodes = ('R', 'A', 'S')
    if normalized is not input_img:
        out_fname = "%s%s%s%s%s" % (out_dir, '/', dwi_prep.split('/')[-1].split('.nii')[0], '_reor-RAS.nii',
                                    dwi_prep.split('/')[-1].split('.nii')[1])
        print("%s%s%s" % ('Reorienting ', dwi_prep, ' to RAS+...'))

        # Flip the bvecs
        transform_orientation = nib.orientations.ornt_transform(nib.orientations.axcodes2ornt(input_axcodes),
                                                                nib.orientations.axcodes2ornt(new_axcodes))
        bvec_array = np.loadtxt(bvec_fname)
        if bvec_array.shape[0] != 3:
            bvec_array = bvec_array.T
        if not bvec_array.shape[0] == transform_orientation.shape[0]:
            raise ValueError("Unrecognized bvec format")
github invesalius / invesalius3 / invesalius / reader / others_reader.py View on Github external
to canonical orientation.

    :param dir_: file path
    :return: imagedata object
    """

    if not const.VTK_WARNING:
        log_path = os.path.join(inv_paths.USER_LOG_DIR, 'vtkoutput.txt')
        fow = vtk.vtkFileOutputWindow()
        fow.SetFileName(log_path.encode(const.FS_ENCODE))
        ow = vtk.vtkOutputWindow()
        ow.SetInstance(fow)

    try:
        imagedata = nib.squeeze_image(nib.load(dir_))
        imagedata = nib.as_closest_canonical(imagedata)
        imagedata.update_header()
    except(nib.filebasedimages.ImageFileError):
        return False

    return imagedata
github poldracklab / niworkflows / niworkflows / interfaces / images.py View on Github external
def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Set target shape information
        target_zooms = np.array(self.inputs.target_zooms)
        target_shape = np.array(self.inputs.target_shape)
        target_span = target_shape * target_zooms

        zooms = np.array(reoriented.header.get_zooms()[:3])
        shape = np.array(reoriented.shape[:3])

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(orig_img.affine), orig_img.shape
        )
        # Identity unless proven otherwise
        target_affine = reoriented.affine.copy()
        conform_xfm = np.eye(4)
github neurodata / m2g / ndmg / scripts / ndmg_dwi_pipeline.py View on Github external
# Build gradient table
    [gtab, nodif_B0, nodif_B0_mask] = mgu.make_gtab_and_bmask(bvals, bvec_scaled, dwi_prep, namer.dirs['output']['prep_m'])

    print("%s%s%s" % ('Preprocessing runtime: ', str(np.round(time.time() - start_time, 1)), 's'))
    # -------- Registration Steps ----------------------------------- #
    # Check orientation (t1w)
    img = nib.load(t1w)
    if nib.aff2axcodes(img.affine)[0] == 'L':
	start_time = time.time()
	print('Reorienting t1w image to RAS+ canonical...')
        # Orient t1w to std
        t1w_orig = t1w
        t1w = "{}/t1w_reor.nii.gz".format(namer.dirs['output']['prep_m'])
        shutil.copyfile(t1w_orig, t1w)
        canonical_t1w_img = nib.as_closest_canonical(img)
        nib.save(canonical_t1w_img, t1w)
	print("%s%s%s" % ('Reorienting runtime: ', str(np.round(time.time() - start_time, 1)), 's'))	

    # Check dimensions
    hdr = img.get_header()
    zooms = hdr.get_zooms()
    if (abs(zooms[0]), abs(zooms[1]), abs(zooms[2])) is not zoom_set:
	start_time = time.time()
        t1w_orig = t1w
        t1w = "{}/t1w_reslice.nii.gz".format(namer.dirs['output']['prep_m'])
        shutil.copyfile(t1w_orig, t1w)
        if vox_size == '1mm':
            print('Reslicing preprocessed t1w to 1mm...')
            t1w = rgu.reslice_to_xmm(t1w, 1.0)
        elif vox_size == '2mm':
            print('Reslicing preprocessed t1w to 2mm...')
github neurodata / m2g / ndmg / scripts / ndmg_dwi_pipeline.py View on Github external
# Perform eddy correction
    start_time = time.time()
    dwi_prep = "{}/eddy_corrected_data.nii.gz".format(namer.dirs['output']['prep_m'])
    cmd='eddy_correct ' + dwi + ' ' + dwi_prep + ' 0'
    os.system(cmd)
  
    # Check orientation (dwi_prep)
    img = nib.load(dwi_prep)
    if nib.aff2axcodes(img.affine)[0] == 'L':
        start_time = time.time()
        print('Reorienting dwi image to RAS+ canonical...')
        # Orient dwi to std
        dwi_orig = dwi_prep
        dwi_prep = "{}/dwi_prep_reor.nii.gz".format(namer.dirs['output']['prep_m'])
        shutil.copyfile(dwi_orig, dwi_prep)
        canonical_dwi_img = nib.as_closest_canonical(img)
        nib.save(canonical_dwi_img, dwi_prep)
        # Swap x-y axis in bvecs
        bvecs_orig = bvecs
        bvecs = "{}/bvec_reor.bvec".format(namer.dirs['output']['prep_m'])
        shutil.copyfile(bvecs_orig, bvecs)
        bvecs_mat = np.genfromtxt(bvecs)
        bvecs_mat[[0, 1]] = bvecs_mat[[1, 0]]
        np.savetxt(bvecs, bvecs_mat)
        print("%s%s%s" % ('Reorienting runtime: ', str(np.round(time.time() - start_time, 1)), 's'))

    # Check dimensions
    hdr = img.get_header()
    zooms = hdr.get_zooms()
    if (abs(zooms[0]), abs(zooms[1]), abs(zooms[2])) is not zoom_set:
        start_time = time.time()
        dwi_orig = dwi_prep
github neuronets / nobrainer / nobrainer / io.py View on Github external
def read_volume(filepath, dtype=None, return_affine=False, to_ras=False):
    """Return numpy array of data from a neuroimaging file."""
    img = nib.load(filepath)
    if to_ras:
        img = nib.as_closest_canonical(img)
    data = img.get_fdata(caching="unchanged")
    if dtype is not None:
        data = data.astype(dtype)
    return data if not return_affine else (data, img.affine)
github aramis-lab / clinica / clinica / pipelines / machine_learning / voxel_based_io.py View on Github external
weights:
        image:
        output_filename:

    Returns:

    """

    # Normalize with 2-norm
    # features = 2 * weights / np.power(norm(weights.flatten(), 2), 2)

    # Normalize inf-norm
    features = weights / abs(weights).max()

    img = nib.load(image)
    canonical_img = nib.as_closest_canonical(img)
    hd = canonical_img.header
    qform = np.zeros((4, 4))

    for i in range(1, 4):
        qform[i-1, i-1] = hd['pixdim'][i]
        qform[i-1, 3] = -1.0 * hd['pixdim'][i] * hd['dim'][i] / 2.0

    output_image = nib.Nifti1Image(features, qform)
    nib.save(output_image, output_filename)
github mwaskom / lyman / lyman / visualizations.py View on Github external
"""
        # -- Load and reorient the anatomical image

        if isinstance(anat, string_types):
            anat_img = nib.load(anat)
            have_orientation = True
        elif isinstance(anat, nib.spatialimages.SpatialImage):
            anat_img = anat
            have_orientation = True
        elif isinstance(anat, np.ndarray):
            anat_img = nib.Nifti1Image(anat, np.eye(4))
            have_orientation = False
        else:
            raise TypeError("anat type {} not understood".format(type(anat)))
        self.anat_img = nib.as_closest_canonical(anat_img)
        self.anat_data = self.anat_img.get_data()

        # -- Load and reorient the statistical image

        if isinstance(stat, string_types):
            stat_img = nib.load(stat)
        elif isinstance(stat, nib.spatialimages.SpatialImage):
            stat_img = stat
        elif isinstance(stat, np.ndarray):
            if stat.dtype is np.dtype("bool"):
                stat = stat.astype(np.int)
            stat_img = nib.Nifti1Image(stat, anat_img.affine, anat_img.header)
        elif stat is not None:
            raise TypeError("stat type {} not understood".format(type(stat)))
        else:
            stat_img = None
github mwaskom / lyman / lyman / mosaic.py View on Github external
# Load and reorient the anatomical image
        if anat is None:
            if "FSLDIR" in os.environ:
                anat = os.path.join(os.environ["FSLDIR"],
                                    "data/standard/avg152T1_brain.nii.gz")
        if isinstance(anat, string_types):
            anat_img = nib.load(anat)
            have_orientation = True
        elif isinstance(anat, np.ndarray):
            anat_img = nib.Nifti1Image(anat, np.eye(4))
            have_orientation = False
        else:
            anat_img = anat
            have_orientation = True
        self.anat_img = nib.as_closest_canonical(anat_img)
        self.anat_data = self.anat_img.get_data()

        # Load and reorient the statistical image
        if stat is not None:
            if isinstance(stat, string_types):
                stat_img = nib.load(stat)
            elif isinstance(stat, np.ndarray):
                if stat.dtype is np.dtype("bool"):
                    stat = stat.astype(np.int)
                stat_img = nib.Nifti1Image(stat,
                                           anat_img.affine,
                                           anat_img.header)
            else:
                stat_img = stat
            self.stat_img = nib.as_closest_canonical(stat_img)
        # Load and reorient the mask image