How to use the dipy.viz.window function in dipy

To help you get started, we’ve selected a few dipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nipy / dipy / 1.0.0 / _downloads / 0738969566330d34f66b6b94700acd35 / sfm_reconst.py View on Github external
containing parts of the corpus callosum and of the centrum semiovale
"""

data_small = data[20:50, 55:85, 38:39]

"""
Fitting the model to this small volume of data, we calculate the ODF of this
model on the sphere, and plot it.
"""

sf_fit = sf_model.fit(data_small)
sf_odf = sf_fit.odf(sphere)

fodf_spheres = actor.odf_slicer(sf_odf, sphere=sphere, scale=0.8, colormap='plasma')

ren = window.Renderer()
ren.add(fodf_spheres)

print('Saving illustration as sf_odfs.png')
window.record(ren, out_path='sf_odfs.png', size=(1000, 1000))
if interactive:
    window.show(ren)

"""
We can extract the peaks from the ODF, and plot these as well
"""

sf_peaks = dpp.peaks_from_model(sf_model,
                                data_small,
                                sphere,
                                relative_peak_threshold=.5,
                                min_separation_angle=25,
github nipy / dipy / doc / examples / introduction_to_basic_tracking.py View on Github external
# Initialization of LocalTracking. The computation happens in the next step.
streamlines_generator = LocalTracking(csa_peaks, classifier, seeds,
                                      affine=np.eye(4), step_size=.5)

# Generate streamlines object
streamlines = Streamlines(streamlines_generator)

# Prepare the display objects.
color = cmap.line_colors(streamlines)

if have_fury:
    streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)

    # Save still images for this static example. Or for interactivity use
    window.record(r, n_frames=1, out_path='deterministic.png', size=(800, 800))
    if interactive:
        window.show(r)

"""
.. figure:: deterministic.png
   :align: center

   **Corpus Callosum Deterministic**

We've created a deterministic set of streamlines, so called because if you
repeat the fiber tracking (keeping all the inputs the same) you will get
exactly the same set of streamlines. We can save the streamlines as a Trackvis
github nipy / dipy / doc / examples / introduction_to_basic_tracking.py View on Github external
``seeds``, to ``LocalTracking`` to get a probabilistic model of the corpus
callosum.
"""

streamlines_generator = LocalTracking(prob_dg, classifier, seeds,
                                      affine=np.eye(4), step_size=.5,
                                      max_cross=1)

# Generate streamlines object.
streamlines = Streamlines(streamlines_generator)

if have_fury:
    streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)

    # Save still images for this static example.
    window.record(r, n_frames=1, out_path='probabilistic.png', size=(800, 800))
    if interactive:
        window.show(r)

"""
.. figure:: probabilistic.png
   :align: center

   Corpus callosum probabilistic tracking.
"""

save_trk("CSD_prob.trk", streamlines, affine,
         shape=labels.shape,
github nipy / dipy / 1.0.0 / _downloads / 6c9f2ef406fd32c1c6ee06117a2585d5 / linear_fascicle_evaluation.py View on Github external
# Enables/disables interactive visualization
interactive = False

candidate_streamlines_actor = actor.streamtube(candidate_sl,
                                               cmap.line_colors(candidate_sl))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
                                      opacity=0.5)

vol_actor = actor.slicer(t1_data)

vol_actor.display(x=40)
vol_actor2 = vol_actor.copy()
vol_actor2.display(z=35)

# Add display objects to canvas
ren = window.Renderer()
ren.add(candidate_streamlines_actor)
ren.add(cc_ROI_actor)
ren.add(vol_actor)
ren.add(vol_actor2)
window.record(ren, n_frames=1,
              out_path='life_candidates.png',
              size=(800, 800))
if interactive:
    window.show(ren)

"""

.. figure:: life_candidates.png
   :align: center

   **Candidate connectome before life optimization**
github nipy / dipy / doc / examples / linear_fascicle_evaluation.py View on Github external
# Enables/disables interactive visualization
interactive = False

candidate_streamlines_actor = actor.streamtube(candidate_sl,
                                               cmap.line_colors(candidate_sl))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
                                      opacity=0.5)

vol_actor = actor.slicer(t1_data)

vol_actor.display(x=40)
vol_actor2 = vol_actor.copy()
vol_actor2.display(z=35)

# Add display objects to canvas
ren = window.Renderer()
ren.add(candidate_streamlines_actor)
ren.add(cc_ROI_actor)
ren.add(vol_actor)
ren.add(vol_actor2)
window.record(ren, n_frames=1,
              out_path='life_candidates.png',
              size=(800, 800))
if interactive:
    window.show(ren)

"""

.. figure:: life_candidates.png
   :align: center

   **Candidate connectome before life optimization**
github nipy / dipy / 1.0.0 / _downloads / 3cbc8a172611811dc9bc4b5579a360ec / simulate_multi_tensor.py View on Github external
from dipy.viz import window, actor

# Enables/disables interactive visualization
interactive = False

ren = window.Renderer()

odf_actor = actor.odf_slicer(odf[None, None, None, :], sphere=sphere, colormap='plasma')
odf_actor.RotateX(90)

ren.add(odf_actor)

print('Saving illustration as multi_tensor_simulation')
window.record(ren, out_path='multi_tensor_simulation.png', size=(300, 300))
if interactive:
    window.show(ren)


"""
.. figure:: multi_tensor_simulation.png
github nipy / dipy / doc / examples / viz_bundles.py View on Github external
"""
It happened that this bundle is in world coordinates and therefore we need to
transform it into native image coordinates so that it is in the same coordinate
space as the ``fa`` image.
"""

bundle_native = transform_streamlines(bundle, np.linalg.inv(affine))

"""
Show every streamline with an orientation color
===============================================

This is the default option when you are using ``line`` or ``streamtube``.
"""

renderer = window.Renderer()

stream_actor = actor.line(bundle_native)

renderer.set_camera(position=(-176.42, 118.52, 128.20),
                    focal_point=(113.30, 128.31, 76.56),
                    view_up=(0.18, 0.00, 0.98))

renderer.add(stream_actor)

# Uncomment the line below to show to display the window
# window.show(renderer, size=(600, 600), reset_camera=False)
window.record(renderer, out_path='bundle1.png', size=(600, 600))

"""
.. figure:: bundle1.png
   :align: center
github nipy / dipy / 1.0.0 / _downloads / ff72c243f489a6c7dd4771841e8b27e3 / tracking_bootstrap_peaks.py View on Github external
"""


pmf = csd_fit.odf(small_sphere).clip(min=0)
peak_dg = ClosestPeakDirectionGetter.from_pmf(pmf, max_angle=30.,
                                              sphere=small_sphere)
peak_streamline_generator = LocalTracking(peak_dg, stopping_criterion, seeds,
                                          affine, step_size=.5)
streamlines = Streamlines(peak_streamline_generator)
sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "closest_peak_dg_CSD.trk")

if has_fury:
    r = window.Renderer()
    r.add(actor.line(streamlines, colormap.line_colors(streamlines)))
    window.record(r, out_path='tractogram_closest_peak_dg.png',
                  size=(800, 800))
    if interactive:
        window.show(r)

"""
.. figure:: tractogram_closest_peak_dg.png
   :align: center

   **Corpus Callosum Closest Peak Deterministic Direction Getter**

We have created a set of streamlines using the closest peak direction getter,
which is a type of deterministic tracking. If you repeat the fiber tracking
(keeping all inputs the same) you will get exactly the same set of streamlines.
"""

github nipy / dipy / 1.0.0 / _downloads / cf776b909486353c660b02d9e946d53c / reconst_forecast.py View on Github external
"""

odf = f_fit.odf(sphere)
print('fODF.shape (%d, %d, %d, %d)' % odf.shape)

"""
Display a part of the fODFs
"""

odf_actor = actor.odf_slicer(odf[16:36, :, 30:45], sphere=sphere,
                             colormap='plasma', scale=0.6)
odf_actor.display(y=0)
odf_actor.RotateX(-90)
ren = window.Renderer()
ren.add(odf_actor)
window.record(ren, out_path='fODFs.png', size=(600, 600), magnification=4)

"""
.. figure:: fODFs.png