How to use the dipy.viz.window.Renderer function in dipy

To help you get started, we’ve selected a few dipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nipy / dipy / 1.0.0 / _downloads / 9e3c560ae0405d4491f04ef1bcf42161 / reconst_csa.py View on Github external
their position on the discrete sphere that was used to do the reconstruction of
the ODF. In order to obtain the full ODF, return_odf should be True. Before
enabling this option, make sure that you have enough memory.

Let's visualize the ODFs of a small rectangular area in an axial slice of the
splenium of the corpus callosum (CC).
"""

data_small = maskdata[13:43, 44:74, 28:29]

from dipy.viz import window, actor

# Enables/disables interactive visualization
interactive = False

ren = window.Renderer()

csaodfs = csamodel.fit(data_small).odf(default_sphere)

"""
It is common with CSA ODFs to produce negative values, we can remove those using ``np.clip``
"""

csaodfs = np.clip(csaodfs, 0, np.max(csaodfs, -1)[..., None])
csa_odfs_actor = actor.odf_slicer(csaodfs, sphere=default_sphere,
                                  colormap='plasma', scale=0.4)
csa_odfs_actor.display(z=0)

ren.add(csa_odfs_actor)
print('Saving illustration as csa_odfs.png')
window.record(ren, n_frames=1, out_path='csa_odfs.png', size=(600, 600))
if interactive:
github nipy / dipy / 1.0.0 / _downloads / 7e5b77afacfb13e483cccefd6adf610b / viz_bundles.py View on Github external
"""
It happened that this bundle is in world coordinates and therefore we need to
transform it into native image coordinates so that it is in the same coordinate
space as the ``fa`` image.
"""

bundle_native = transform_streamlines(bundle, np.linalg.inv(affine))

"""
Show every streamline with an orientation color
===============================================

This is the default option when you are using ``line`` or ``streamtube``.
"""

renderer = window.Renderer()

stream_actor = actor.line(bundle_native)

renderer.set_camera(position=(-176.42, 118.52, 128.20),
                    focal_point=(113.30, 128.31, 76.56),
                    view_up=(0.18, 0.00, 0.98))

renderer.add(stream_actor)

# Uncomment the line below to show to display the window
# window.show(renderer, size=(600, 600), reset_camera=False)
window.record(renderer, out_path='bundle1.png', size=(600, 600))

"""
.. figure:: bundle1.png
   :align: center
github nipy / dipy / doc / examples / segment_quickbundles.py View on Github external
[  74.72344208,   86.60827637,   84.9391861 ],
           [  70.40846252,   85.15874481,   82.4484024 ],
           [  66.74534607,   86.00262451,   78.82582092],
           [  64.02451324,   88.43942261,   75.0697403 ]], dtype=float32)


`clusters` has also attributes like `centroids` (cluster representatives), and
methods like `add`, `remove`, and `clear` to modify the clustering result.

Lets first show the initial dataset.
"""

# Enables/disables interactive visualization
interactive = False

ren = window.Renderer()
ren.SetBackground(1, 1, 1)
ren.add(actor.streamtube(streamlines, window.colors.white))
window.record(ren, out_path='fornix_initial.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: fornix_initial.png
   :align: center

   Initial Fornix dataset.

Show the centroids of the fornix after clustering (with random colors):
"""

colormap = actor.create_colormap(np.arange(len(clusters)))
github nipy / dipy / doc / examples / segment_clustering_metrics.py View on Github external
# Get some streamlines.
streamlines = get_streamlines()  # Previously defined.

feature = VectorOfEndpointsFeature()
metric = CosineMetric(feature)
qb = QuickBundles(threshold=0.1, metric=metric)
clusters = qb.cluster(streamlines)

# Color each streamline according to the cluster they belong to.
colormap = actor.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

# Visualization
ren = window.Renderer()
window.clear(ren)
ren.SetBackground(0, 0, 0)
ren.add(actor.streamtube(streamlines, colormap_full))
window.record(ren, out_path='cosine_metric.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: cosine_metric.png
github nipy / dipy / 1.0.0 / _downloads / 1b490ed1ab9a3a2e207679342bf6e176 / segment_clustering_features.py View on Github external
# Get some streamlines.
streamlines = get_streamlines()  # Previously defined.

feature = ArcLengthFeature()
metric = EuclideanMetric(feature)
qb = QuickBundles(threshold=2., metric=metric)
clusters = qb.cluster(streamlines)

# Color each streamline according to the cluster they belong to.
colormap = actor.create_colormap(np.ravel(clusters.centroids))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

# Visualization
ren = window.Renderer()
window.clear(ren)
ren.SetBackground(0, 0, 0)
ren.add(actor.streamtube(streamlines, colormap_full))
window.record(ren, out_path='arclength_feature.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: arclength_feature.png
   :align: center

   Showing the streamlines colored according to their length.

.. _clustering-examples-VectorOfEndpointsFeature:

Vector Between Endpoints Feature
github nipy / dipy / 1.0.0 / _downloads / 71cf8c2b36cb790a770efa541ff9aba6 / path_length_map.py View on Github external
# Make a streamline bundle model of the corpus callosum ROI connectivity
streamlines = LocalTracking(csa_peaks, stopping_criterion, seeds, affine,
                            step_size=2)
streamlines = Streamlines(streamlines)

# Visualize the streamlines and the Path Length Map base ROI
# (in this case also the seed ROI)

streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))
surface_opacity = 0.5
surface_color = [0, 1, 1]
seedroi_actor = actor.contour_from_roi(seed_mask, affine,
                                       surface_color, surface_opacity)

ren = window.Renderer()
ren.add(streamlines_actor)
ren.add(seedroi_actor)

"""
If you set interactive to True (below), the rendering will pop up in an
interactive window.
"""

interactive = False
if interactive:
    window.show(ren)

window.record(ren, n_frames=1, out_path='plm_roi_sls.png',
              size=(800, 800))
github nipy / dipy / 1.0.0 / _downloads / 736a375ec92f7df7dc77e8d8181d2256 / tracking_introduction_eudx.py View on Github external
csa_model = CsaOdfModel(gtab, sh_order=6)
csa_peaks = peaks_from_model(csa_model, data, default_sphere,
                             relative_peak_threshold=.8,
                             min_separation_angle=45,
                             mask=white_matter)

"""
For quality assurance we can also visualize a slice from the direction field
which we will use as the basis to perform the tracking. The visualization will
be done using the ``fury`` python package
"""

from dipy.viz import window, actor, has_fury

if has_fury:
    ren = window.Renderer()
    ren.add(actor.peak_slicer(csa_peaks.peak_dirs,
                              csa_peaks.peak_values,
                              colors=None))

    window.record(ren, out_path='csa_direction_field.png', size=(900, 900))

    if interactive:
        window.show(ren, size=(800, 800))

"""
.. figure:: csa_direction_field.png
 :align: center

 **Direction Field (peaks)**
"""
github nipy / dipy / 1.0.0 / _downloads / b4f8004b94d1b3f9241ca5f1f6c2fd05 / tracking_deterministic.py View on Github external
direction getter. Here, the spherical harmonic representation of the FOD
is used.
"""


detmax_dg = DeterministicMaximumDirectionGetter.from_shcoeff(
    csd_fit.shm_coeff, max_angle=30., sphere=default_sphere)
streamline_generator = LocalTracking(detmax_dg, stopping_criterion, seeds,
                                     affine, step_size=.5)
streamlines = Streamlines(streamline_generator)

sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "tractogram_deterministic_dg.trk")

if has_fury:
    r = window.Renderer()
    r.add(actor.line(streamlines, colormap.line_colors(streamlines)))
    window.record(r, out_path='tractogram_deterministic_dg.png',
                  size=(800, 800))
    if interactive:
        window.show(r)

"""
.. figure:: tractogram_deterministic_dg.png
   :align: center

   **Corpus Callosum using deterministic maximum direction getter**
"""
"""
.. include:: ../links_names.inc
github nipy / dipy / 1.0.0 / _downloads / 874cc312af719bffbe6db3111ec0dc4c / reconst_csd.py View on Github external
init_trace=0.0021, iter=8, convergence=0.001,
                              parallel=True)


"""
We can check the shape of the signal of the response function, which should be
like  a pancake:
"""

response_signal = response.on_sphere(default_sphere)
# transform our data from 1D to 4D
response_signal = response_signal[None, None, None, :]
response_actor = actor.odf_slicer(response_signal, sphere=default_sphere,
                                  colormap='plasma')

ren = window.Renderer()

ren.add(response_actor)
print('Saving illustration as csd_recursive_response.png')
window.record(ren, out_path='csd_recursive_response.png', size=(200, 200))
if interactive:
    window.show(ren)

"""
.. figure:: csd_recursive_response.png
   :align: center

   Estimated response function using recursive calibration.

"""

ren.rm(response_actor)
github nipy / dipy / doc / examples / bundle_registration.py View on Github external
def show_both_bundles(bundles, colors=None, show=True, fname=None):

    ren = window.Renderer()
    ren.SetBackground(1., 1, 1)
    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines_actor = actor.streamtube(bundle, color, linewidth=0.3)
        lines_actor.RotateX(-90)
        lines_actor.RotateZ(90)
        ren.add(lines_actor)
    if show:
        window.show(ren)
    if fname is not None:
        sleep(1)
        window.record(ren, n_frames=1, out_path=fname, size=(900, 900))