How to use the dipy.viz.window.record function in dipy

To help you get started, we’ve selected a few dipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nipy / dipy / doc / examples / viz_timers.py View on Github external
tb.message = "Let's count up to 100 and exit :" + str(cnt)
    renderer.azimuth(0.05 * cnt)
    sphere_actor.GetProperty().SetOpacity(cnt/100.)
    showm.render()
    if cnt == 100:
        showm.exit()


renderer.add(tb)

# Run every 200 milliseconds
showm.add_timer_callback(True, 200, timer_callback)

showm.start()

window.record(renderer, size=(900, 768), out_path="viz_timer.png")

"""
.. figure:: viz_timer.png
github nipy / dipy / doc / examples / particle_filtering_fiber_tracking.py View on Github external
max_cross=1,
                                                     step_size=step_size,
                                                     maxlen=1000,
                                                     pft_back_tracking_dist=2,
                                                     pft_front_tracking_dist=1,
                                                     particle_count=15,
                                                     return_all=False)

# streamlines = list(pft_streamline_generator)                                                     
streamlines = Streamlines(pft_streamline_generator)
save_trk("pft_streamline.trk", streamlines, affine, shape)


renderer.clear()
renderer.add(actor.line(streamlines, cmap.line_colors(streamlines)))
window.record(renderer, out_path='pft_streamlines.png', size=(600, 600))

"""
.. figure:: pft_streamlines.png
 :align: center

 **Particle Filtering Tractography**
"""

# Local Probabilistic Tractography
prob_streamline_generator = LocalTracking(dg,
                                          cmc_classifier,
                                          seeds,
                                          affine,
                                          max_cross=1,
                                          step_size=step_size,
                                          maxlen=1000,
github UNFmontreal / toad / lib / qautil.py View on Github external
#if roi is not None:
    #    roiImage= nibabel.load(roi)
    #    roiActor = dipy.viz.fvtk.contour(
    #            roiImage.get_data(), affine=anatomicalImage.affine, levels=[1],
    #            colors=[(1., 1., 0.)], opacities=[1.])

    #    roiActor.RotateX(xRot)
    #    roiActor.RotateY(yRot)
    #    roiActor.RotateZ(zRot)

    #    ren.add(roiActor)

    ren.set_camera(
            position=(0,0,1), focal_point=(0,0,0), view_up=(0,1,0))#, verbose=False)

    window.record(ren, out_path=target, size=(1200, 1200), n_frames=1)
github nipy / dipy / 1.0.0 / _downloads / fb823a1018f08feb582caab025f582e4 / reconst_dsid.py View on Github external
from dipy.viz import window, actor

# Enables/disables interactive visualization
interactive = False


ren = window.Renderer()

# concatenate data as 4D array
odfs = np.vstack((odf_gt, dsi_odf, dsid_odf))[:, None, None]
odf_actor = actor.odf_slicer(odfs, sphere=sphere, scale=0.5, colormap='plasma')

odf_actor.display(y=0)
odf_actor.RotateX(90)
ren.add(odf_actor)
window.record(ren, out_path='dsid.png', size=(300, 300))
if interactive:
    window.show(ren)

"""
.. figure:: dsid.png
github nipy / dipy / doc / examples / reconst_mcsd.py View on Github external
"""
From the fit obtained in the previous step, we generate the ODFs which can be
visualized as follows:
"""

mcsd_odf = mcsd_fit.odf(sphere)
fodf_spheres = actor.odf_slicer(mcsd_odf, sphere=sphere, scale=0.01,
                                norm=False, colormap='plasma')

interactive = False
ren = window.Renderer()
ren.add(fodf_spheres)
ren.reset_camera_tight()

print('Saving illustration as msdodf.png')
window.record(ren, out_path='msdodf.png', size=(600, 600))

if interactive:
    window.show(ren)

"""
.. figure:: msdodf.png
github nipy / dipy / doc / examples / tracking_eudx_tensor.py View on Github external
Every streamline will be coloured according to its orientation
"""

from dipy.viz.colormap import line_colors

"""
`actor.line` creates a streamline actor for streamline visualization
and `ren.add` adds this actor to the scene
"""

ren.add(actor.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))

print('Saving illustration as tensor_tracks.png')

ren.SetBackground(1, 1, 1)
window.record(ren, out_path='tensor_tracks.png', size=(600, 600))
# Enables/disables interactive visualization
interactive = False
if interactive:
    window.show(ren)

"""
.. figure:: tensor_tracks.png
github nipy / dipy / doc / examples / reconst_dsid.py View on Github external
from dipy.viz import window, actor

# Enables/disables interactive visualization
interactive = False


ren = window.Renderer()

# concatenate data as 4D array
odfs = np.vstack((odf_gt, dsi_odf, dsid_odf))[:, None, None]
odf_actor = actor.odf_slicer(odfs, sphere=sphere, scale=0.5, colormap='plasma')

odf_actor.display(y=0)
odf_actor.RotateX(90)
ren.add(odf_actor)
window.record(ren, out_path='dsid.png', size=(300, 300))
if interactive:
    window.show(ren)

"""
.. figure:: dsid.png
github nipy / dipy / doc / examples / viz_advanced.py View on Github external
show_m.initialize()

"""
Finally, please uncomment the following 3 lines so that you can interact with
the available 3D and 2D objects.
"""

# show_m.add_window_callback(win_callback)
# show_m.render()
# show_m.start()

ren.zoom(1.5)
ren.reset_clipping_range()

window.record(ren, out_path='bundles_and_a_slice.png', size=(1200, 900))

"""
.. figure:: bundles_and_a_slice.png
   :align: center

   **A few bundles with interactive slicing**.
"""

del show_m
github nipy / dipy / doc / examples / contextual_enhancement.py View on Github external
# convolve kernel with delta spike
spike = np.zeros((7, 7, 7, k.get_orientations().shape[0]), dtype=np.float64)
spike[3, 3, 3, 0] = 1
spike_shm_conv = convolve(sf_to_sh(spike, k.get_sphere(), sh_order=8), k,
                          sh_order=8, test_mode=True)

sphere = get_sphere('symmetric724')
spike_sf_conv = sh_to_sf(spike_shm_conv, sphere, sh_order=8)
model_kernel = actor.odf_slicer(spike_sf_conv * 6,
                                sphere=sphere,
                                norm=False,
                                scale=0.4)
model_kernel.display(x=3)
ren.add(model_kernel)
ren.set_camera(position=(30, 0, 0), focal_point=(0, 0, 0), view_up=(0, 0, 1))
window.record(ren, out_path='kernel.png', size=(900, 900))
if interactive:
    window.show(ren)

"""
.. figure:: kernel.png
   :align: center

   Visualization of the contour enhancement kernel.
"""

"""
Shift-twist convolution is applied on the noisy data
"""

# Perform convolution
csd_shm_enh = convolve(csd_shm_noisy, k, sh_order=8)