Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
path_im1 = os.path.join(root_dir, 'output/test_im1.nii.gz')
path_im2 = os.path.join(root_dir, 'output/test_im2.nii.gz')
nib.save(im_original, path_im1)
nib.save(im_renewed, path_im2)
if visual_assessment:
os.system('itksnap -g {}'.format(path_im1))
os.system('itksnap -g {}'.format(path_im2))
print 'Check if the second image opened is as the first multip. by 10.'
im1 = nib.load(im_original, path_im1)
im2 = nib.load(im_original, path_im2)
im1_data = im1.get_data()
im2_data = im2.get_data()
# check with a random sampling if the second image is the
# first multiplied by 10
num_samples = 20
i_v = np.random.choice(range(dims[0]), size=(num_samples, 1))
j_v = np.random.choice(range(dims[1]), size=(num_samples, 1))
k_v = np.random.choice(range(dims[2]), size=(num_samples, 1))
points = np.concatenate((i_v, j_v, k_v), axis=1)
assert_array_equal(points.shape, [num_samples, 3])
for m in range(num_samples):
----------
gtab : Obj
DiPy object storing diffusion gradient information.
data : array
4D numpy array of diffusion image data.
B0_mask : str
File path to B0 brain mask.
Returns
-------
csd_mod : obj
Spherical harmonics coefficients of the CSD-estimated reconstruction model.
'''
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response
print('Fitting CSD model...')
B0_mask_data = nib.load(B0_mask).get_fdata().astype('bool')
print('Reconstructing...')
response = recursive_response(gtab, data, mask=B0_mask_data, sh_order=8, peak_thr=0.01, init_fa=0.08,
init_trace=0.0021, iter=8, convergence=0.001, parallel=False)
print('CSD Reponse: ' + str(response))
model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)
csd_mod = model.fit(data, B0_mask_data).shm_coeff
del model, response, B0_mask_data
return csd_mod
#!/usr/bin/python
import numpy as np
import nibabel as nib
import argparse as ap
ap = ap.ArgumentParser(description="Correct mean or skeleton mask via T1toStd masks for all subjects")
ap.add_argument("-i", "--FAMask", nargs=1, help="[FAMask]", metavar=('*.nii.gz'), default=['mean_FA_skeleton_mask.nii.gz'])
ap.add_argument("-m", "--masks", nargs='+', help="[T1toStdMasks] ...", metavar=('*.nii.gz'), required=True)
opts = ap.parse_args()
numMerge=len(opts.masks)
skeletonMask = nib.load(opts.FAMask[0])
skeletonMaskData = skeletonMask.get_data()
affine = skeletonMask.get_affine()
header = skeletonMask.get_header()
outMask = skeletonMask.get_data()
data_index = skeletonMaskData>0.99
for i in range(numMerge):
outMask[data_index]=np.multiply(outMask[data_index],nib.load(opts.masks[i]).get_data()[data_index])
nib.save(nib.Nifti1Image(outMask.astype(np.float32, order = "C"),affine),opts.FAMask[0])
def main():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser, args.sh_files)
assert_outputs_exist(parser, args, args.out_sh)
assert_same_resolution(args.sh_files)
first_im = nb.load(args.sh_files[0])
out_coeffs = first_im.get_fdata(dtype=np.float32)
for sh_file in args.sh_files[1:]:
im = nb.load(sh_file)
im_dat = im.get_fdata(dtype=np.float32)
out_coeffs = np.where(np.abs(im_dat) > np.abs(out_coeffs),
im_dat, out_coeffs)
# TODO remove header or add optional argument name
nb.save(nb.Nifti1Image(out_coeffs, first_im.affine, first_im.header),
args.out_sh)
"""Extract voxel time courses for each subcortical roi index
Parameters
----------
timeseries_file: a 4D Nifti file
label_file: a 3D file containing rois in the same space/size of the 4D file
indices: a list of indices for ROIs to extract.
Returns
-------
out_file: a text file containing time courses for each voxel of each roi
The first four columns are: freesurfer index, i, j, k positions in the
label file
"""
img = nb.load(timeseries_file)
data = img.get_data()
roiimg = nb.load(label_file)
rois = roiimg.get_data()
prefix = split_filename(timeseries_file)[1]
out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
with open(out_ts_file, 'wt') as fp:
for fsindex in indices:
ijk = np.nonzero(rois == fsindex)
ts = data[ijk]
for i0, row in enumerate(ts):
fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
ijk[1][i0], ijk[2][i0]) +
','.join(['%.10f' % val for val in row]) + '\n')
return out_ts_file
def testIntersubjectRigidRegistration(fname0, fname1, level, outfname):
nib_left = nib.load(fname0)
nib_right = nib.load(fname1)
left=nib_left.get_data().astype(np.double).squeeze()
right=nib_right.get_data().astype(np.double).squeeze()
leftPyramid=[i for i in rcommon.pyramid_gaussian_3D(left, level)]
rightPyramid=[i for i in rcommon.pyramid_gaussian_3D(right, level)]
plotSlicePyramidsAxial(leftPyramid, rightPyramid)
print 'Estimation started.'
beta=estimateRigidTransformationMultiscale3D(leftPyramid, rightPyramid)
print 'Estimation finished.'
rcommon.applyRigidTransformation3D(left, beta)
sl=np.array(left.shape)//2
sr=np.array(right.shape)//2
rcommon.overlayImages(left[sl[0],:,:], leftPyramid[0][sr[0],:,:])
rcommon.overlayImages(left[sl[0],:,:], right[sr[0],:,:])
affine_transform=AffineTransform('ijk', ['aligned-z=I->S','aligned-y=P->A', 'aligned-x=L->R'], np.eye(4))
left=Image(left, affine_transform)
def testEstimateMonomodalDeformationField2DMultiScale(lambdaParam):
fname0='IBSR_01_to_02.nii.gz'
fname1='data/t1/IBSR18/IBSR_02/IBSR_02_ana_strip.nii.gz'
nib_moving = nib.load(fname0)
nib_fixed= nib.load(fname1)
moving=nib_moving.get_data().squeeze()
fixed=nib_fixed.get_data().squeeze()
moving=np.copy(moving, order='C')
fixed=np.copy(fixed, order='C')
sl=moving.shape
sr=fixed.shape
level=5
#---sagital---
moving=moving[sl[0]//2,:,:].copy()
fixed=fixed[sr[0]//2,:,:].copy()
#---coronal---
#moving=moving[:,sl[1]//2,:].copy()
#fixed=fixed[:,sr[1]//2,:].copy()
#---axial---
#moving=moving[:,:,sl[2]//2].copy()
Parameters
----------
path : string
Full path to a .gii file.
Returns
-------
vertices : array_like
Array of vertices of shape (n_vertices, 3)
faces : array_like
Array of faces of shape (n_faces, 3)
"""
is_nibabel_installed(raise_error=True)
import nibabel
logger.info(' GIFTI file detected')
arch = nibabel.load(path)
return arch.darrays[0].data, arch.darrays[1].data
def read_data(case_idx, input_name, loc):
set_name = get_set_name(case_idx)
image_path = get_filename(set_name, case_idx, input_name, loc)
print(image_path)
return nib.load(image_path)
# Set logger or default print
self.logger = logger if logger is not None else ScreenLogger()
# Set image and label paths (absolute)
self.image_path = self._validate_path(img_path)
if not self.predict_mode:
self.labels_path = self._validate_path(labels_path)
# Validate that the image and label data match and get image ID
self.id = self._get_and_validate_id()
# Set variables to store loaded image and label information
self.image_obj = nib.load(self.image_path)
self.labels_obj = None
if not self.predict_mode:
self.labels_obj = nib.load(self.labels_path)
# Stores the data of the image and labels objects
self._image = None
self._labels = None
self.scaler = None
# ViewInterpolator object initialized with set_interpolator_object
self.interpolator = None
# May be set by various functions to keep track of state of this image
self.load_state = None
# Data types
self.im_dtype = im_dtype
self.lab_dtype = lab_dtype