How to use brainspace - 10 common examples

To help you get started, we’ve selected a few brainspace examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github MICA-MNI / BrainSpace / brainspace / mesh / mesh_cluster.py View on Github external
# Embedding
    evs, _ = diffusion_mapping(a, n_components=30, alpha=0, diffusion_time=1,
                               random_state=random_state)
    evs = normalize(evs)  # To find spherical clusters

    if is_size:
        n_clusters = evs.shape[0] // n_clusters

    # Find clusters
    if approach == 'kmeans':
        _, cluster_labs, _ = k_means(evs, n_clusters=n_clusters,
                                     random_state=random_state, n_jobs=n_jobs,
                                     n_init=n_init)
    else:
        conn = me.get_immediate_adjacency(surf, include_self=False)
        if mask is not None:
            conn = conn[mask, :][:, mask]
        hc = AgglomerativeClustering(n_clusters=n_clusters, connectivity=conn,
                                     linkage='ward')
        cluster_labs = hc.fit(evs).labels_

    # Valid clusters start from 1
    cluster_labs += 1

    # Find centers
    if with_centers:
        points = surf.Points if mask is None else surf.Points[mask]
        centroids = reduce_by_labels(points, cluster_labs, red_op='mean',
                                     axis=1)

        centroid_labs = np.zeros_like(cluster_labs)
github MICA-MNI / BrainSpace / brainspace / mesh / mesh_cluster.py View on Github external
Array of cluster labels. If `mask` is provided, points out of the mask
        are assigned label 0.
    center_labels : 1D ndarray, shape (n_points,)
        Array with centers labeled with their corresponding cluster label.
        The rest of points is assigned label 0. Returned only if
        ``with_centers=True``.

    Notes
    -----
    Valid cluster labels start from 1. If the mask is provided, zeros are
    assigned to the points outside the mask.

    """

    # Get immediate geodesic distance
    a = me.get_ring_distance(surf, n_ring=1, metric='geodesic')
    if mask is not None:
        a = a[mask, :][:, mask]
    a.data = np.exp(-a.data/np.median(a.data))
    a.tolil().setdiag(1)

    # Embedding
    evs, _ = diffusion_mapping(a, n_components=30, alpha=0, diffusion_time=1,
                               random_state=random_state)
    evs = normalize(evs)  # To find spherical clusters

    if is_size:
        n_clusters = evs.shape[0] // n_clusters

    # Find clusters
    if approach == 'kmeans':
        _, cluster_labs, _ = k_means(evs, n_clusters=n_clusters,
github MICA-MNI / BrainSpace / brainspace / examples / plot_tutorial2.py View on Github external
# PCA, Laplacian eigenmaps and diffusion mapping
embeddings = ['pca', 'le', 'dm']

gradients_embedding = [None] * len(embeddings)
for i, emb in enumerate(embeddings):
    gm = GradientMaps(kernel='normalized_angle', approach=emb, random_state=0)
    gm.fit(conn_matrix)

    gradients_embedding[i] = map_to_labels(gm.gradients_[:, 0], labeling, mask=mask,
                                           fill=np.nan)


# sphinx_gallery_thumbnail_number = 2
label_text = ['PCA', 'LE', 'DM']
plot_hemispheres(surf_lh, surf_rh, array_name=gradients_embedding, size=(1200, 800),
                 cmap='viridis_r', color_bar=True, label_text=label_text)


###############################################################################
# Gradient alignment
# +++++++++++++++++++
#
# A more principled way of increasing comparability across gradients are
# alignment techniques. BrainSpace provides two alignment techniques:
# Procrustes analysis, and joint alignment. For this example we will load
# functional connectivity data of a second subject group and align it with the
# first group.

conn_matrix2 = load_group_fc('schaefer', scale=400, group='holdout')
gp = GradientMaps(kernel='normalized_angle', alignment='procrustes')
gj = GradientMaps(kernel='normalized_angle', alignment='joint')
github MICA-MNI / BrainSpace / brainspace / examples / plot_spin_permutations.py View on Github external
# Compute parcellation centroids and append to spheres
aop.get_parcellation_centroids(sphere_lh, parcellation_lh, mask=mask_lh,
                               non_centroid=0, append=True,
                               array_name='centroids')
aop.get_parcellation_centroids(sphere_rh, parcellation_rh, mask=mask_rh,
                               non_centroid=0, append=True,
                               array_name='centroids')

mask_centroids_lh = sphere_lh.get_array('centroids') > 0
mask_centroids_rh = sphere_rh.get_array('centroids') > 0

centroids_lh = sphere_lh.Points[mask_centroids_lh]
centroids_rh = sphere_lh.Points[mask_centroids_rh]

# We can see the centroids on the sphere surfaces
plot_hemispheres(sphere_lh, sphere_rh, array_name='centroids',
                 interactive=False, embed_nb=True, size=(800, 200),
                 cmap_name='binary')


###############################################################################
# Now, let's generate 2000 random samples using spin permutations.

from brainspace.null_models import SpinRandomization

n_spins = 2000
sp = SpinRandomization(n_rep=n_spins, random_state=0)
sp.fit(centroids_lh, points_rh=centroids_rh)
gradient_spins_lh, gradient_spins_rh = sp.randomize(gradient[:200],
                                                    x_rh=gradient[200:])
github MICA-MNI / BrainSpace / brainspace / examples / plot_tutorial3.py View on Github external
# Let's create some rotations
n_permutations = 1000

sp = SpinPermutations(n_rep=n_permutations, random_state=0)
sp.fit(sphere_lh, points_rh=sphere_rh)

t1wt2w_rotated = np.hstack(sp.randomize(t1wt2w_lh, t1wt2w_rh))
thickness_rotated = np.hstack(sp.randomize(thickness_lh, thickness_rh))


###############################################################################
# As an illustration of the rotation, let’s plot the original t1w/t2w data

# Plot original data
plot_hemispheres(surf_lh, surf_rh, array_name=t1wt2w, size=(1200, 300), cmap='viridis',
                 nan_color=(0.5, 0.5, 0.5, 1), color_bar=True)


###############################################################################
# as well as a few rotated versions.

# sphinx_gallery_thumbnail_number = 2
# Plot some rotations
plot_hemispheres(surf_lh, surf_rh, array_name=t1wt2w_rotated[:3], size=(1200, 800),
                 cmap='viridis', nan_color=(0.5, 0.5, 0.5, 1), color_bar=True,
                 label_text=['Rot0', 'Rot1', 'Rot2'])


###############################################################################
#
# .. warning::
github MICA-MNI / BrainSpace / brainspace / examples / plot_tutorial1.py View on Github external
from brainspace.datasets import load_group_fc, load_parcellation, load_conte69

# First load mean connectivity matrix and Schaefer parcellation
conn_matrix = load_group_fc('schaefer', scale=400)
labeling = load_parcellation('schaefer', scale=400, join=True)

# and load the conte69 surfaces
surf_lh, surf_rh = load_conte69()


###############################################################################
# Let’s first look at the parcellation scheme we’re using.

from brainspace.plotting import plot_hemispheres

plot_hemispheres(surf_lh, surf_rh, array_name=labeling, size=(1200, 200),
                 cmap='tab20', zoom=1.85)


###############################################################################
# and let’s construct our gradients.

from brainspace.gradient import GradientMaps

# Ask for 10 gradients (default)
gm = GradientMaps(n_components=10, random_state=0)
gm.fit(conn_matrix)


###############################################################################
# Note that the default parameters are diffusion embedding approach, 10
# components, and no kernel (use raw data). Once you have your gradients, a
github MICA-MNI / BrainSpace / docs / python_doc / auto_examples / plot_tutorial1.py View on Github external
from brainspace.datasets import load_group_fc, load_parcellation, load_conte69

# First load mean connectivity matrix and Schaefer parcellation
conn_matrix = load_group_fc('schaefer', scale=400)
labeling = load_parcellation('schaefer', scale=400, join=True)

# and load the conte69 surfaces
surf_lh, surf_rh = load_conte69()


###############################################################################
# Let’s first look at the parcellation scheme we’re using.

from brainspace.plotting import plot_hemispheres

plot_hemispheres(surf_lh, surf_rh, array_name=labeling, size=(1200, 200),
                 cmap='tab20', zoom=1.85)


###############################################################################
# and let’s construct our gradients.

from brainspace.gradient import GradientMaps

# Ask for 10 gradients (default)
gm = GradientMaps(n_components=10, random_state=0)
gm.fit(conn_matrix)


###############################################################################
# Note that the default parameters are normalized angle kernel, diffusion
# embedding approach, 10 components. Once you have your gradients, a good first
github MICA-MNI / BrainSpace / brainspace / examples / plot_embedding_approaches.py View on Github external
# Now, we compute the gradients using 3 different embedding approaches: PCA,
# Laplacian embeddings (i.e., 'le') and Diffusion maps (i.e., 'dm')

import numpy as np

from brainspace.gradient import GradientMaps
from brainspace.utils.parcellation import map_to_labels

# list of embedding approaches
list_embedding = ['pca', 'le', 'dm']

mask_cortex = labeling != 0

for i, emb in enumerate(list_embedding):
    # We ask for 2 gradients
    gm = GradientMaps(n_gradients=2, approach=emb, kernel='normalized_angle',
                      random_state=0)

    # fit to the connectivity matrix
    gm.fit(conn_matrix)

    # append gradients to the surfaces
    for k in range(2):
        array_name = '{emb}_grad{k}'.format(emb=emb, k=k)
        grad = gm.gradients_[:, k]

        # map the gradient to the parcels (skip non-cortex)
        grad = map_to_labels(grad, labeling, mask=mask_cortex, fill=np.nan)

        # append to hemispheres
        # print("Appending '%s'" % array_name)
        surf_lh.append_array(grad[:n_pts_lh], name=array_name, at='point')
github MICA-MNI / BrainSpace / brainspace / examples / plot_tutorial0.py View on Github external
masked_regions = [regions_list[i] for i in mat_mask]


corr_plot = plotting.plot_matrix(c, figure=(15, 15), labels=masked_regions,
                                 vmax=0.8, vmin=-0.8, reorder=True)


###############################################################################
# Run gradient analysis and visualize
# +++++++++++++++++++++++++++++++++++
#
# Run gradient analysis

from brainspace.gradient import GradientMaps

gm = GradientMaps(n_components=2, random_state=0)
gm.fit(correlation_matrix)


###############################################################################
# Visualize results
from brainspace.datasets import load_fsa5
from brainspace.plotting import plot_hemispheres
from brainspace.utils.parcellation import map_to_labels

# Map gradients to original parcels
grad = [None] * 2
for i, g in enumerate(gm.gradients_.T):
    grad[i] = map_to_labels(g, labeling, mask=mask, fill=np.nan)


# Load fsaverage5 surfaces
github MICA-MNI / BrainSpace / brainspace / examples / plot_different_parcellations.py View on Github external
ax[i].yaxis.set_visible(False)


###############################################################################
# Now, we use our GradientMaps class to build one gradient for each connectivity
# matrix. Gradients are the appended to the surfaces.

import numpy as np

from brainspace.gradient import GradientMaps
from brainspace.utils.parcellation import map_to_labels

name_gradients = [None] * n_parcellations
for i, cm in enumerate(conn_matrices):
    # We ask for 2 gradients
    gm = GradientMaps(n_gradients=1, approach='dm', kernel='normalized_angle',
                      random_state=0)

    # fit to the connectivity matrix
    gm.fit(cm)

    # append gradients to the surfaces
    array_name = 'grad0_Schaefer{0}'.format(list_parcels[i])
    name_gradients[i] = array_name
    grad = gm.gradients_[:, 0]

    # map the gradient to the parcels
    grad = map_to_labels(grad, labelings[i], mask=labelings[i] != 0,
                         fill=np.nan)

    # append to hemispheres
    print("Appending '%s'" % array_name)