How to use the brainrender.Utils.data_io.load_json function in brainrender

To help you get started, we’ve selected a few brainrender examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github BrancoLab / BrainRender / brainrender / Utils / parsers / mouselight.py View on Github external
else:
			checkfile = ml_file
			is_iter = False
			neurons_names = [os.path.split(ml_file)[-1].split(".")[0]]

		if ".swc" in checkfile.lower():
			raise NotImplementedError('We are working on improving parsing of .swc files, not ready yet.')
		else:
			self.is_json = True
			if not is_iter:
				data = load_json(checkfile)
				data = data["neurons"]
			else:
				data = []
				for f in ml_file:
					fdata = load_json(f)
					data.extend(fdata['neurons'])

		if not self.rendering_necessary:
			return self.actors, self.regions
		else:	
			# Render neurons
			self.n_neurons  = len(data)
			self.actors, self.regions = [], []

			if len(neurons_names) < self.n_neurons: 
				name = neurons_names[0]
				neurons_names = [name+"_{}".format(i) for i in range(self.n_neurons)]

			# Loop over neurons
			for nn, neuron in enumerate(data):
				neuron_actors, soma_region = self.render_neuron(neuron, nn, neurons_names[nn])
github BrancoLab / BrainRender / brainrender / Utils / parsers / mouselight.py View on Github external
# Load the data
		if isinstance(ml_file, (tuple, list)):
			checkfile = ml_file[0]
			is_iter = True
			neurons_names = [os.path.split(f)[-1].split(".")[0] for f in ml_file]
		else:
			checkfile = ml_file
			is_iter = False
			neurons_names = [os.path.split(ml_file)[-1].split(".")[0]]

		if ".swc" in checkfile.lower():
			raise NotImplementedError('We are working on improving parsing of .swc files, not ready yet.')
		else:
			self.is_json = True
			if not is_iter:
				data = load_json(checkfile)
				data = data["neurons"]
			else:
				data = []
				for f in ml_file:
					fdata = load_json(f)
					data.extend(fdata['neurons'])

		if not self.rendering_necessary:
			return self.actors, self.regions
		else:	
			# Render neurons
			self.n_neurons  = len(data)
			self.actors, self.regions = [], []

			if len(neurons_names) < self.n_neurons: 
				name = neurons_names[0]
github BrancoLab / BrainRender / brainrender / Utils / parsers / mouselight.py View on Github external
def _cache_neuron(self, neuron_actors, neuron_name, params):
		"""
		Save a loaded neuron

		:param neuron_actors: list of neuron's actors
		:param neuron_name: name of the neuron

		"""
		if not neuron_name or neuron_name is None: return

		# Create/update entry in metadata
		self.cache_metadata[neuron_name] = params
		save_json(self.morphology_cache_metadata, self.cache_metadata, append=True)
		self.cache_metadata = load_json(self.morphology_cache_metadata)

		for neurite, actor in neuron_actors.items():
			if actor is None: continue
			fl = os.path.join(self.morphology_cache, neuron_name+"_"+neurite+".vtk")
			if isinstance(actor, list):
				if not actor: continue
				else:
					raise ValueError("Something went wrong while saving the actor")
			actor.write(fl)
github BrancoLab / BrainRender / brainrender / atlases / sba.py View on Github external
content = listdir(atlas_folder)
        if not [f for f in content if f.endswith('.nii')]: # TODO expand to support multiple formats
            raise ValueError("Could not find volumetric data")

        if not [f for f in content if "lbl_to_acro.json" in f]:
            raise FileNotFoundError("Could not find file with label to acronym lookup")

        if not [f for f in content if "lbl_to_rgb.json" in f]:
            raise FileNotFoundError("Could not find file with label to color lookup")

        if not [f for f in content if "lbl_to_name.json" in f]:
            raise FileNotFoundError("Could not find file with label to full name lookup")

        self.lbl_to_acro_lookup = load_json([f for f in content if "lbl_to_acro.json" in f][0])
        self.lbl_to_rgb_lookup = load_json([f for f in content if "lbl_to_rgb.json" in f][0])
        self.lbl_to_name_lookup = load_json([f for f in content if "lbl_to_name.json" in f][0])

        self.volume_data = load_volume_file([f for f in content if f.endswith('.nii')][0])

        if [f for f in content if f.endswith(".obj")]:
            if len([f for f in content if f.endswith(".obj")]) > 1:
                raise ValueError("Found too many obj file")
            self.root = load([f for f in content if f.endswith(".obj")][0])

        # Get metadata and prep other stuff
        self.prep_brain_metadata()
        self.meshes_folder = os.path.join(atlas_folder, 'meshes')
        if not os.path.isdir(self.meshes_folder):
            os.mkdir(self.meshes_folder)
github BrancoLab / BrainRender / brainrender / Utils / parsers / mouselight.py View on Github external
self.render_dendrites = render_dendrites
		self.render_axons = render_axons
		self.neurite_radius = neurite_radius 
		self.color_neurites = color_neurites 
		self.axon_color = axon_color 
		self.soma_color = soma_color 
		self.dendrites_color = dendrites_color 
		self.random_color = random_color
		self.mirror = mirror
		self.color_by_region = color_by_region
		self.force_to_hemisphere = force_to_hemisphere

		Paths.__init__(self, base_dir=base_dir, **kwargs)

		# Load cache metadata
		self.cache_metadata = load_json(self.morphology_cache_metadata)
github BrancoLab / BrainRender / brainrender / atlases / celegans.py View on Github external
except:
            raise FileNotFoundError(
                "Could not find file with neurons skeleton data"
            )

        try:
            synapses_file = [
                f
                for f in listdir(self.data_folder)
                if f.endswith("synapses.csv")
            ][0]
        except:
            raise FileNotFoundError("Could not find file with synapses data")

        # load data
        self.skeletons_data = load_json(skeletons_file)
        self.synapses_data = pd.read_csv(synapses_file, sep=";")

        # Get neurons metadata
        try:
            metadata_file = [
                f
                for f in listdir(self.data_folder)
                if "neuron_metadata.csv" in f
            ][0]
        except:
            raise FileNotFoundError(
                f"Could not find neurons metadata file {metadata_file}"
            )

        self.neurons_metadata = pd.read_csv(metadata_file)
        self.neurons_names = list(self.neurons_metadata.neuron.values)
github BrancoLab / BrainRender / brainrender / Utils / parsers / streamlines.py View on Github external
"""
        Given a path to a .json file with streamline data (or the data themselves), render the streamline as tubes actors.
        Either  filepath or data should be passed

        :param filepath: str, optional. Path to .json file with streamline data (Default value = None)
        :param data: panadas.DataFrame, optional. DataFrame with streamline data. (Default value = None)
        :param color: str color of the streamlines (Default value = 'ivory')
        :param alpha: float transparency of the streamlines (Default value = .8)
        :param radius: int radius of the streamlines actor (Default value = 10)
        :param show_injection_site: bool, if True spheres are used to render the injection volume (Default value = True)
        :param *args: 
        :param **kwargs: 

    """
    if filepath is not None and data is None:
        data = load_json(filepath)
        # data = {k:{int(k2):v2 for k2, v2 in v.items()} for k,v in data.items()}
    elif filepath is None and data is not None:
        pass
    else:
        raise ValueError("Need to pass eiteher a filepath or data argument to parse_streamline")

    # create actors for streamlines
    lines = []
    if len(data['lines']) == 1:
        lines_data = data['lines'][0]
    else:
        lines_data = data['lines']
    for line in lines_data:
        points = [[l['x'], l['y'], l['z']] for l in line]
        lines.append(shapes.Tube(points,  r=radius, c=color, alpha=alpha, res=STREAMLINES_RESOLUTION))
github BrancoLab / BrainRender / brainrender / atlases / sba.py View on Github external
content = listdir(atlas_folder)
        if not [f for f in content if f.endswith('.nii')]: # TODO expand to support multiple formats
            raise ValueError("Could not find volumetric data")

        if not [f for f in content if "lbl_to_acro.json" in f]:
            raise FileNotFoundError("Could not find file with label to acronym lookup")

        if not [f for f in content if "lbl_to_rgb.json" in f]:
            raise FileNotFoundError("Could not find file with label to color lookup")

        if not [f for f in content if "lbl_to_name.json" in f]:
            raise FileNotFoundError("Could not find file with label to full name lookup")

        self.lbl_to_acro_lookup = load_json([f for f in content if "lbl_to_acro.json" in f][0])
        self.lbl_to_rgb_lookup = load_json([f for f in content if "lbl_to_rgb.json" in f][0])
        self.lbl_to_name_lookup = load_json([f for f in content if "lbl_to_name.json" in f][0])

        self.volume_data = load_volume_file([f for f in content if f.endswith('.nii')][0])

        if [f for f in content if f.endswith(".obj")]:
            if len([f for f in content if f.endswith(".obj")]) > 1:
                raise ValueError("Found too many obj file")
            self.root = load([f for f in content if f.endswith(".obj")][0])

        # Get metadata and prep other stuff
        self.prep_brain_metadata()
        self.meshes_folder = os.path.join(atlas_folder, 'meshes')
        if not os.path.isdir(self.meshes_folder):
            os.mkdir(self.meshes_folder)