How to use the dipy.viz.window.show function in dipy

To help you get started, we’ve selected a few dipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fury-gl / fury / tests / test_actors.py View on Github external
def test_labels(interactive=False):

    text_actor = actor.label("Hello")

    renderer = window.Renderer()
    renderer.add(text_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)
github nipy / dipy / doc / examples / reconst_msd.py View on Github external
msd_model = MultiShellDeconvModel(gtab, response_msd)

data = dwi[:, :, 68: 68 + 1]
mask_tmp = mask[:, :, 68: 68 + 1]

msd_fit = msd_model.fit(data, mask_tmp)
msd_odf = msd_fit.odf(sphere)
fodf_spheres = actor.odf_slicer(msd_odf, mask=mask_tmp,
                                sphere=sphere, scale=0.001, norm=False,
                                colormap='plasma')
interactive = True
ren = window.Renderer()
ren.add(fodf_spheres)

if interactive:
    window.show(ren)
github nipy / dipy / doc / examples / bundle_registration.py View on Github external
def show_both_bundles(bundles, colors=None, show=True, fname=None):

    ren = window.Renderer()
    ren.SetBackground(1., 1, 1)
    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines_actor = actor.streamtube(bundle, color, linewidth=0.3)
        lines_actor.RotateX(-90)
        lines_actor.RotateZ(90)
        ren.add(lines_actor)
    if show:
        window.show(ren)
    if fname is not None:
        sleep(1)
        window.record(ren, n_frames=1, out_path=fname, size=(900, 900))
github nipy / dipy / doc / examples / contextual_enhancement.py View on Github external
spike[3, 3, 3, 0] = 1
spike_shm_conv = convolve(sf_to_sh(spike, k.get_sphere(), sh_order=8), k,
                          sh_order=8, test_mode=True)

sphere = get_sphere('symmetric724')
spike_sf_conv = sh_to_sf(spike_shm_conv, sphere, sh_order=8)
model_kernel = actor.odf_slicer(spike_sf_conv * 6,
                                sphere=sphere,
                                norm=False,
                                scale=0.4)
model_kernel.display(x=3)
ren.add(model_kernel)
ren.set_camera(position=(30, 0, 0), focal_point=(0, 0, 0), view_up=(0, 0, 1))
window.record(ren, out_path='kernel.png', size=(900, 900))
if interactive:
    window.show(ren)

"""
.. figure:: kernel.png
   :align: center

   Visualization of the contour enhancement kernel.
"""

"""
Shift-twist convolution is applied on the noisy data
"""

# Perform convolution
csd_shm_enh = convolve(csd_shm_noisy, k, sh_order=8)
github nipy / dipy / 1.0.0 / _downloads / 1827aa760d70d871adf21f0324e5cc2d / cluster_confidence.py View on Github external
keep_streamlines = Streamlines()
for i, sl in enumerate(long_streamlines):
    if cci[i] >= 1:
        keep_streamlines.append(sl)

# Visualize the streamlines we kept
ren = window.Renderer()

keep_streamlines_actor = actor.line(keep_streamlines, linewidth=0.1)

ren.add(keep_streamlines_actor)


interactive = False
if interactive:
    window.show(ren)
window.record(ren, n_frames=1, out_path='filtered_cci_streamlines.png',
              size=(800, 800))

"""
github nipy / dipy / 1.0.0 / _downloads / 933f6b45906c1940e473ee93446636de / segment_clustering_metrics.py View on Github external
clusters = qb.cluster(streamlines)

# Color each streamline according to the cluster they belong to.
colormap = actor.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

# Visualization
ren = window.Renderer()
window.clear(ren)
ren.SetBackground(0, 0, 0)
ren.add(actor.streamtube(streamlines, colormap_full))
window.record(ren, out_path='cosine_metric.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: cosine_metric.png
github nipy / dipy / 1.0.0 / _downloads / e6720cced1b6495ee3230ecbe8abf1bc / tracking_sfm.py View on Github external
colormap.line_colors(streamlines), linewidth=0.1)

    vol_actor = actor.slicer(t1_data)

    vol_actor.display(40, None, None)
    vol_actor2 = vol_actor.copy()
    vol_actor2.display(None, None, 35)

    ren = window.Renderer()
    ren.add(streamlines_actor)
    ren.add(vol_actor)
    ren.add(vol_actor2)

    window.record(ren, out_path='tractogram_sfm.png', size=(800, 800))
    if interactive:
        window.show(ren)

"""
.. figure:: tractogram_sfm.png
   :align: center

   **Sparse Fascicle Model tracks**

Finally, we can save these streamlines to a 'trk' file, for use in other
software, or for further analysis.
"""

sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "tractogram_sfm_detr.trk")

"""
References
github nipy / dipy / 1.0.0 / _downloads / 1b490ed1ab9a3a2e207679342bf6e176 / segment_clustering_features.py View on Github external
# Color each midpoint according to the cluster they belong to.
colormap = actor.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

# Visualization
ren = window.Renderer()
window.clear(ren)
ren.SetBackground(0, 0, 0)
ren.add(actor.point(midpoints[:, 0, :], colormap_full, point_radius=0.2))
ren.add(actor.streamtube(streamlines, window.colors.white, opacity=0.05))
window.record(ren, n_frames=1, out_path='midpoint_feature.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: midpoint_feature.png
   :align: center

   Showing the middle point of each streamline and colored according to the
   QuickBundles results.

.. _clustering-examples-ArcLengthFeature:

ArcLength Feature
=================
**What:** Instances of `ArcLengthFeature` compute the length of a streamline.
More specifically, this feature corresponds to the sum of the lengths of every
streamline segments.
github nipy / dipy / 1.0.0 / _downloads / 6413042e93e3c3001c8ef089a982440e / fiber_to_bundle_coherence.py View on Github external
ren.add(vol_actor2)

# Show original fibers
ren.set_camera(position=(-264, 285, 155),
               focal_point=(0, -14, 9),
               view_up=(0, 0, 1))
window.record(ren, n_frames=1, out_path='OR_before.png', size=(900, 900))
if interactive:
    window.show(ren)

# Show thresholded fibers
ren.rm(lineactor)
ren.add(actor.line(fbc_sl_thres, clrs_thres, linewidth=0.2))
window.record(ren, n_frames=1, out_path='OR_after.png', size=(900, 900))
if interactive:
    window.show(ren)

"""
.. _optic_radiation_before_cleaning:
github nipy / dipy / doc / examples / introduction_to_basic_tracking.py View on Github external
streamlines = Streamlines(streamlines_generator)

# Prepare the display objects.
color = cmap.line_colors(streamlines)

if have_fury:
    streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)

    # Save still images for this static example. Or for interactivity use
    window.record(r, n_frames=1, out_path='deterministic.png', size=(800, 800))
    if interactive:
        window.show(r)

"""
.. figure:: deterministic.png
   :align: center

   **Corpus Callosum Deterministic**

We've created a deterministic set of streamlines, so called because if you
repeat the fiber tracking (keeping all the inputs the same) you will get
exactly the same set of streamlines. We can save the streamlines as a Trackvis
file so it can be loaded into other software for visualization or further
analysis.
"""

from dipy.io.streamline import save_trk
save_trk("CSA_detr.trk", streamlines, affine,