Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_get_volumes_per_label_inconsistent_labels_labels_names():
omega = [10, 10, 3]
data_test = np.zeros(omega)
data_test[:2, :2, :2] = 2
data_test[-3:, -3:, -2:] = 3
im_test = nib.Nifti1Image(data_test, affine=np.eye(4))
with np.testing.assert_raises(IOError):
get_volumes_per_label(im_test, [0, 2, 3, 4], labels_names=['a', 'b'])
default_seg_params[k] = segmentation_params[k]
self.segmentation_params = default_seg_params
self.seg_algo = self.segmentation_params["seg_algo"].lower()
default_clean_params = get_default_args(seg.clean_bundle)
if clean_params is not None:
for k in clean_params:
default_clean_params[k] = clean_params[k]
self.clean_params = default_clean_params
if reg_template is None:
self.reg_template = afd.read_mni_template()
else:
if not isinstance(reg_template, nib.Nifti1Image):
reg_template = nib.load(reg_template)
self.reg_template = reg_template
# Create the bundle dict after reg_template has been resolved:
self.bundle_dict = make_bundle_dict(bundle_names=bundle_names,
seg_algo=self.seg_algo,
resample_to=reg_template)
# This is where all the outputs will go:
self.afq_path = op.join(bids_path, 'afq')
# Create it as needed:
os.makedirs(self.afq_path, exist_ok=True)
bids_layout = BIDSLayout(bids_path, derivatives=True)
bids_description = bids_layout.description
for ii in grid:
for jj in ii:
streamlines.append(jj)
#Treat these streamlines as if they are in trackvis format and generate counts
counts_trackvis = density_map(streamlines, (4,4,5), (1,1,1))
#Treat these streamlines as if they are in nifti format and generate counts
counts_nifti = track_counts(streamlines, (4,4,5), (1,1,1),
return_elements=False)
print("saving trk files and track_count volumes")
aff = np.eye(4)
aff[0, 0] = -1
img = nib.Nifti1Image(counts_trackvis.astype('int16'), aff)
nib.save(img, 'counts_trackvis.nii.gz')
img = nib.Nifti1Image(counts_nifti.astype('int16'), aff)
nib.save(img, 'counts_nifti.nii.gz')
hdr = empty_header()
hdr['voxel_size'] = (1,1,1)
hdr['voxel_order'] = 'las'
hdr['vox_to_ras'] = aff
hdr['dim'] = counts_nifti.shape
#Treat these streamlines like they are in trackvis format and save them
streamlines_trackvis = ((ii,None,None) for ii in streamlines)
write('slAsTrackvis.trk', streamlines_trackvis, hdr)
#Move these streamlines from nifti to trackvis format and save them
streamlines_nifti = ((ii+.5,None,None) for ii in streamlines)
Args:
img (nibabel.nifti1.Nifti1Image): image on which to find landmarks
landmark_percs (np.ndarray): corresponding landmark points of standard scale
standard_scale (np.ndarray): landmarks on the standard scale
mask (nibabel.nifti1.Nifti1Image): foreground mask for img
Returns:
normalized (nibabel.nifti1.Nifti1Image): normalized image
"""
img_data = img.get_data()
mask_data = img_data > img_data.mean() if mask is None else mask.get_data()
masked = img_data[mask_data > 0]
landmarks = get_landmarks(masked, landmark_percs)
f = interp1d(landmarks, standard_scale, fill_value='extrapolate')
normed = f(img_data)
return nib.Nifti1Image(normed, img.affine, img.header)
def nii_ones_like(in_file, value, dtype, newpath=None):
"""Create a NIfTI file filled with ``value``, matching properties of ``in_file``."""
import os
import numpy as np
import nibabel as nb
nii = nb.load(in_file)
data = np.ones(nii.shape, dtype=float) * value
out_file = os.path.join(newpath or os.getcwd(), "filled.nii.gz")
nii = nb.Nifti1Image(data, nii.affine, nii.header)
nii.set_data_dtype(dtype)
nii.to_filename(out_file)
return out_file
if args.output_nifti and is_nifti_data:
list_seg_result.append(seg_result)
result_processing_time = datetime.now() - start_time
logger.info("Processing of the received inference results is finished")
logger.info("Processing time is {}".format(result_processing_time))
# --------------------------------------------- 7. Save output -----------------------------------------------
tiff_output_name = os.path.join(args.path_to_output, 'output.tiff')
Image.new('RGB', (data.shape[3], data.shape[2])).save(tiff_output_name, append_images=list_img, save_all=True)
logger.info("Result tiff file was saved to {}".format(tiff_output_name))
if args.output_nifti and is_nifti_data:
for seg_res in list_seg_result:
nii_filename = os.path.join(args.path_to_output, 'output_{}.nii.gz'.format(list_seg_result.index(seg_res)))
nib.save(nib.Nifti1Image(seg_res, affine=affine), nii_filename)
logger.info("Result nifti file was saved to {}".format(nii_filename))
def _run_interface(self, runtime):
for fname in self.inputs.volumes:
img = nb.load(fname, mmap=NUMPY_MMAP)
data = np.array(img.get_data())
active_map = data > self.inputs.threshold
thresholded_map = np.zeros(data.shape)
thresholded_map[active_map] = data[active_map]
new_img = nb.Nifti1Image(thresholded_map, img.affine, img.header)
_, base, _ = split_filename(fname)
nb.save(new_img, base + '_thresholded.nii')
return runtime
stc_func = "{}/{}_stc.nii.gz".format(self.outdir, func_name)
# trim the first 15 seconds of data while tissue reaches steady state
# of radiofrequency excitation
func_im = nb.load(self.func)
tr = func_im.header.get_zooms()[3]
if tr == 0:
raise ZeroDivisionError(
"Failed to determine number of frames to" " trim due to tr=0."
)
nvol_trim = int(np.floor(15 / float(tr)))
# remove the first nvol_trim timesteps
mssg = "Scrubbing first 15 seconds ({0:d} volumes due" " to tr={1: .3f}s)"
print((mssg.format(nvol_trim, tr)))
trimmed_dat = func_im.get_data()[:, :, :, nvol_trim:]
trimmed_im = nb.Nifti1Image(
dataobj=trimmed_dat, header=func_im.header, affine=func_im.affine
)
nb.save(img=trimmed_im, filename=trim_func)
# use slicetimer if user passes slicetiming information
if stc is not None:
self.slice_time_correct(trim_func, stc_func, tr, stc)
else:
stc_func = trim_func
# motion correct using the mean volume (FSL default)
self.motion_correct(stc_func, self.motion_func, None)
self.mc_params = "{}.par".format(self.motion_func)
cmd = "cp {} {}".format(self.motion_func, self.preproc_func)
mgu.execute_cmd(cmd, verb=True)
# numpy.array(names)[missing_mask]))
new_volume = numpy.zeros(ref_volume.shape)
# TODO: Find out the use of the following commented line:
# new_volume[:, :] = numpy.nan
kx, ky, kz = numpy.mgrid[-dist:dist + 1, -dist:dist + 1, -dist:dist + 1]
for val, pos in zip(values, positions):
ix, iy, iz = numpy.linalg.solve(ref_volume.affine, numpy.append(pos, 1.0))[0:3].astype(int)
# new_volume[inds[0], inds[1], inds[2]] = val
new_volume[ix + kx, iy + ky, iz + kz] = val
# add_min_max(new_volume)
new_nii = nibabel.Nifti1Image(new_volume, ref_volume.affine)
nibabel.save(new_nii, out_volume_file)
return nibabel.nifti1.Nifti1Image
Notes
-----
Input is not modified.
"""
# misc
vol = check_niimg(vol)
# assert len(vol.shape) == 3, vol.shape
# convert realigment params to affine transformation matrix
M_q = spm_matrix(q)
if inverse:
M_q = scipy.linalg.inv(M_q)
# apply affine transformation
return nibabel.Nifti1Image(vol.get_data(), np.dot(
M_q, vol.get_affine()))