How to use the dipy.viz.actor function in dipy

To help you get started, we’ve selected a few dipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fury-gl / fury / tests / test_window.py View on Github external
@npt.dec.skipif(not actor.have_vtk or not actor.have_vtk_colors or skip_it)
@xvfb_it
def test_renderer():

    ren = window.Renderer()

    npt.assert_equal(ren.size(), (0, 0))

    # background color for renderer (1, 0.5, 0)
    # 0.001 added here to remove numerical errors when moving from float
    # to int values
    bg_float = (1, 0.501, 0)

    # that will come in the image in the 0-255 uint scale
    bg_color = tuple((np.round(255 * np.array(bg_float))).astype('uint8'))

    ren.background(bg_float)
github nipy / dipy / doc / examples / reconst_mcsd.py View on Github external
"""
Now we build the MSMT-CSD model with the ``response_mcsd`` as input. We then
call the ``fit`` function to fit one slice of the 3D data and visualize it.
"""

mcsd_model = MultiShellDeconvModel(gtab, response_mcsd)
mcsd_fit = mcsd_model.fit(denoised_arr[:, :, 10:11])

"""
From the fit obtained in the previous step, we generate the ODFs which can be
visualized as follows:
"""

mcsd_odf = mcsd_fit.odf(sphere)
fodf_spheres = actor.odf_slicer(mcsd_odf, sphere=sphere, scale=0.01,
                                norm=False, colormap='plasma')

interactive = False
ren = window.Renderer()
ren.add(fodf_spheres)
ren.reset_camera_tight()

print('Saving illustration as msdodf.png')
window.record(ren, out_path='msdodf.png', size=(600, 600))

if interactive:
    window.show(ren)

"""
.. figure:: msdodf.png
github nipy / dipy / doc / examples / viz_advanced.py View on Github external
If we want to see the objects in native space we need to make sure that all
objects which are currently in world coordinates are transformed back to
native space using the inverse of the affine.
"""

if not world_coords:
    from dipy.tracking.streamline import transform_streamlines
    streamlines = transform_streamlines(streamlines, np.linalg.inv(affine))

"""
Now we create, a ``Renderer`` object and add the streamlines using the ``line``
function and an image plane using the ``slice`` function.
"""

ren = window.Renderer()
stream_actor = actor.line(streamlines)

if not world_coords:
    image_actor_z = actor.slicer(data, affine=np.eye(4))
else:
    image_actor_z = actor.slicer(data, affine)

"""
We can also change also the opacity of the slicer.
"""

slicer_opacity = 0.6
image_actor_z.opacity(slicer_opacity)

"""
We can add additonal slicers by copying the original and adjusting the
``display_extent``.
github nipy / dipy / doc / examples / reconst_forecast.py View on Github external
"""

sphere = get_sphere('symmetric724')

"""
Compute the fODFs.
"""

odf = f_fit.odf(sphere)
print('fODF.shape (%d, %d, %d, %d)' % odf.shape)

"""
Display a part of the fODFs
"""

odf_actor = actor.odf_slicer(odf[16:36, :, 30:45], sphere=sphere,
                             colormap='plasma', scale=0.6)
odf_actor.display(y=0)
odf_actor.RotateX(-90)
ren = window.Renderer()
ren.add(odf_actor)
window.record(ren, out_path='fODFs.png', size=(600, 600), magnification=4)

"""
.. figure:: fODFs.png
github nipy / dipy / doc / examples / viz_roi_contour.py View on Github external
seed_mask = labels == 2
seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

# Initialization of LocalTracking. The computation happens in the next step.
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                            step_size=2)

# Compute streamlines and store as a list.
streamlines = Streamlines(streamlines)

"""
We will create a streamline actor from the streamlines.
"""

streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))

"""
Next, we create a surface actor from the corpus callosum seed ROI. We
provide the ROI data, the affine, the color in [R,G,B], and the opacity as
a decimal between zero and one. Here, we set the color as blue/green with
50% opacity.
"""

surface_opacity = 0.5
surface_color = [0, 1, 1]

seedroi_actor = actor.contour_from_roi(seed_mask, affine,
                                       surface_color, surface_opacity)

"""
Next, we initialize a ''Renderer'' object and add both actors
github nipy / dipy / 1.0.0 / _downloads / 2bcd6eddf3f46e5d3fb2eaa26ae0ec27 / viz_advanced.py View on Github external
"""

if not world_coords:
    from dipy.tracking.streamline import transform_streamlines
    streamlines = transform_streamlines(streamlines, np.linalg.inv(affine))

"""
Now we create, a ``Renderer`` object and add the streamlines using the ``line``
function and an image plane using the ``slice`` function.
"""

ren = window.Renderer()
stream_actor = actor.line(streamlines)

if not world_coords:
    image_actor_z = actor.slicer(data, affine=np.eye(4))
else:
    image_actor_z = actor.slicer(data, affine)

"""
We can also change also the opacity of the slicer.
"""

slicer_opacity = 0.6
image_actor_z.opacity(slicer_opacity)

"""
We can add additonal slicers by copying the original and adjusting the
``display_extent``.
"""

image_actor_x = image_actor_z.copy()
github nipy / dipy / doc / examples / sfm_reconst.py View on Github external
window.show(ren)

"""
We can extract the peaks from the ODF, and plot these as well
"""

sf_peaks = dpp.peaks_from_model(sf_model,
                                data_small,
                                sphere,
                                relative_peak_threshold=.5,
                                min_separation_angle=25,
                                return_sh=False)


window.clear(ren)
fodf_peaks = actor.peak_slicer(sf_peaks.peak_dirs, sf_peaks.peak_values)
ren.add(fodf_peaks)

print('Saving illustration as sf_peaks.png')
window.record(ren, out_path='sf_peaks.png', size=(1000, 1000))
if interactive:
    window.show(ren)

"""
Finally, we plot both the peaks and the ODFs, overlayed:
"""

fodf_spheres.GetProperty().SetOpacity(0.4)
ren.add(fodf_spheres)

print('Saving illustration as sf_both.png')
window.record(ren, out_path='sf_both.png', size=(1000, 1000))
github nipy / dipy / 1.0.0 / _downloads / 6c9f2ef406fd32c1c6ee06117a2585d5 / linear_fascicle_evaluation.py View on Github external
:align: center

   **LiFE streamline weights**

"""

"""

We use $\beta$ to filter out these redundant streamlines, and generate an
optimized group of streamlines:

"""

optimized_sl = list(np.array(candidate_sl)[np.where(fiber_fit.beta > 0)[0]])
ren = window.Renderer()
ren.add(actor.streamtube(optimized_sl, cmap.line_colors(optimized_sl)))
ren.add(cc_ROI_actor)
ren.add(vol_actor)
window.record(ren, n_frames=1, out_path='life_optimized.png',
              size=(800, 800))
if interactive:
    window.show(ren)

"""

.. figure:: life_optimized.png
   :align: center

   **Streamlines selected via LiFE optimization**

"""
github nipy / dipy / doc / examples / viz_roi_contour.py View on Github external
We will create a streamline actor from the streamlines.
"""

streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))

"""
Next, we create a surface actor from the corpus callosum seed ROI. We
provide the ROI data, the affine, the color in [R,G,B], and the opacity as
a decimal between zero and one. Here, we set the color as blue/green with
50% opacity.
"""

surface_opacity = 0.5
surface_color = [0, 1, 1]

seedroi_actor = actor.contour_from_roi(seed_mask, affine,
                                       surface_color, surface_opacity)

"""
Next, we initialize a ''Renderer'' object and add both actors
to the rendering.
"""

ren = window.ren()
ren.add(streamlines_actor)
ren.add(seedroi_actor)

"""
If you uncomment the following line, the rendering will pop up in an
interactive window.
"""
github nipy / dipy / 1.0.0 / _downloads / 933f6b45906c1940e473ee93446636de / segment_clustering_metrics.py View on Github external
from dipy.segment.metric import VectorOfEndpointsFeature
from dipy.segment.metric import CosineMetric

# Enables/disables interactive visualization
interactive = False

# Get some streamlines.
streamlines = get_streamlines()  # Previously defined.

feature = VectorOfEndpointsFeature()
metric = CosineMetric(feature)
qb = QuickBundles(threshold=0.1, metric=metric)
clusters = qb.cluster(streamlines)

# Color each streamline according to the cluster they belong to.
colormap = actor.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

# Visualization
ren = window.Renderer()
window.clear(ren)
ren.SetBackground(0, 0, 0)
ren.add(actor.streamtube(streamlines, colormap_full))
window.record(ren, out_path='cosine_metric.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: cosine_metric.png