From 4e5f8cdd86431a4a8739deed07429c5ecfc3a388 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 7 Jul 2024 10:39:39 -0700 Subject: [PATCH] use navis-fastcore when available: - _generate_segments() and _break_segments() - _connected_components() if input is skeleton - skeletons.cable_length (via new cable_length() function) - geodesic_matrix() - prune_twigs() when precise=False - segments_to_coords - synapse_flow_centrality() --- navis/core/skeleton.py | 13 +- navis/graph/graph_utils.py | 182 ++++++++++++++++++-------- navis/morpho/__init__.py | 2 +- navis/morpho/manipulation.py | 71 ++++++---- navis/morpho/mmetrics.py | 193 +++++++++++++++++++++------- navis/morpho/subset.py | 2 +- navis/plotting/dd.py | 6 +- navis/plotting/k3d/k3d_objects.py | 2 +- navis/plotting/plot_utils.py | 86 ++++++++----- navis/plotting/plotly/graph_objs.py | 2 +- navis/plotting/vispy/visuals.py | 3 +- navis/utils/__init__.py | 7 + 12 files changed, 389 insertions(+), 180 deletions(-) diff --git a/navis/core/skeleton.py b/navis/core/skeleton.py index d8ca879d..da411fb1 100644 --- a/navis/core/skeleton.py +++ b/navis/core/skeleton.py @@ -606,17 +606,8 @@ def n_leafs(self) -> Optional[int]: @temp_property def cable_length(self) -> Union[int, float]: """Cable length.""" - if hasattr(self, '_cable_length'): - return self._cable_length - - # The by far fastest way to get the cable length is to work on the node table - # Using the igraph representation is about the same speed - if it is already calculated! - # However, one problem with the graph representation is that with large neuronlists - # it adds a lot to the memory footprint. - not_root = (self.nodes.parent_id >= 0).values - xyz = self.nodes[['x', 'y', 'z']].values[not_root] - xyz_parent = self.nodes.set_index('node_id').loc[self.nodes.parent_id.values[not_root], ['x', 'y', 'z']].values - self._cable_length = np.sum(np.linalg.norm(xyz - xyz_parent, axis=1)) + if not hasattr(self, '_cable_length'): + self._cable_length = morpho.cable_length(self) return self._cable_length @property diff --git a/navis/graph/graph_utils.py b/navis/graph/graph_utils.py index ef099468..935aed75 100644 --- a/navis/graph/graph_utils.py +++ b/navis/graph/graph_utils.py @@ -29,19 +29,32 @@ # Set up logging logger = config.get_logger(__name__) -__all__ = sorted(['classify_nodes', 'cut_skeleton', 'longest_neurite', - 'split_into_fragments', 'reroot_skeleton', 'distal_to', - 'dist_between', 'find_main_branchpoint', - 'generate_list_of_childs', 'geodesic_matrix', - 'node_label_sorting', - 'segment_length', 'rewire_skeleton', 'insert_nodes', - 'remove_nodes', 'dist_to_root']) - - -@utils.map_neuronlist(desc='Gen. segments', allow_parallel=True) -def _generate_segments(x: 'core.NeuronObject', - weight: Optional[str] = None, - return_lengths: bool = False) -> Union[list, Tuple[list, list]]: +__all__ = sorted( + [ + "classify_nodes", + "cut_skeleton", + "longest_neurite", + "split_into_fragments", + "reroot_skeleton", + "distal_to", + "dist_between", + "find_main_branchpoint", + "generate_list_of_childs", + "geodesic_matrix", + "node_label_sorting", + "segment_length", + "rewire_skeleton", + "insert_nodes", + "remove_nodes", + "dist_to_root", + ] +) + + +@utils.map_neuronlist(desc="Gen. segments", allow_parallel=True) +def _generate_segments( + x: "core.NeuronObject", weight: Optional[str] = None, return_lengths: bool = False +) -> Union[list, Tuple[list, list]]: """Generate segments maximizing segment lengths. Parameters @@ -65,7 +78,7 @@ def _generate_segments(x: 'core.NeuronObject', Examples -------- - This is for doctests mostly + This is primarily for doctests: >>> import navis >>> n = navis.example_neurons(1) @@ -79,15 +92,29 @@ def _generate_segments(x: 'core.NeuronObject', # At this point x is TreeNeuron x: core.TreeNeuron - assert weight in ('weight', None), f'Unable to use weight "{weight}"' + assert weight in ("weight", None), f'Unable to use weight "{weight}"' + + if utils.fastcore and not return_lengths: + if weight == "weight": + weight = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=0, + ) + + return utils.fastcore.generate_segments( + x.nodes.node_id.values, x.nodes.parent_id.values, weights=weight + ) + d = dist_to_root(x, igraph_indices=False, weight=weight) - endNodeIDs = x.nodes[x.nodes.type == 'end'].node_id.values + endNodeIDs = x.nodes[x.nodes.type == "end"].node_id.values endNodeIDs = sorted(endNodeIDs, key=lambda x: d.get(x, 0), reverse=True) if config.use_igraph and x.igraph: g: igraph.Graph = x.igraph # noqa # Convert endNodeIDs to indices - id2ix = dict(zip(x.igraph.vs['node_id'], range(len(x.igraph.vs)))) + id2ix = dict(zip(x.igraph.vs["node_id"], range(len(x.igraph.vs)))) endNodeIDs = [id2ix[n] for n in endNodeIDs] else: g: nx.DiGraph = x.graph @@ -125,9 +152,13 @@ def _generate_segments(x: 'core.NeuronObject', return sequences -def _connected_components(x: Union['core.TreeNeuron', 'core.MeshNeuron']) -> List[Set[int]]: +def _connected_components( + x: Union["core.TreeNeuron", "core.MeshNeuron"], +) -> List[Set[int]]: """Extract the connected components within a neuron. + Will use `navis-fastcore` for skeletons, if available. + Parameters ---------- x : TreeNeuron | MeshNeuron @@ -150,15 +181,22 @@ def _connected_components(x: Union['core.TreeNeuron', 'core.MeshNeuron']) -> Lis """ assert isinstance(x, (core.TreeNeuron, core.MeshNeuron)) - if config.use_igraph and x.igraph: + if isinstance(x, core.TreeNeuron) and utils.fastcore: + # This returns for each node the ID of its root + ms = utils.fastcore.connected_components( + x.nodes.node_id.values, x.nodes.parent_id.values + ) + # Translate into list of sets + cc = [x.nodes.node_id.values[ms == i] for i in np.unique(ms)] + elif config.use_igraph and x.igraph: G: igraph.Graph = x.igraph # noqa # Get the vertex clustering - vc = G.components(mode='WEAK') + vc = G.components(mode="WEAK") # Membership maps indices to connected components ms = np.array(vc.membership) if isinstance(x, core.TreeNeuron): # For skeletons we need node IDs - ids = np.array(G.vs['node_id']) + ids = np.array(G.vs["node_id"]) else: # For MeshNeurons we can use the indices directly ids = np.array(G.vs.indices) @@ -173,7 +211,7 @@ def _connected_components(x: Union['core.TreeNeuron', 'core.MeshNeuron']) -> Lis return cc -def _break_segments(x: 'core.NeuronObject') -> list: +def _break_segments(x: "core.NeuronObject") -> list: """Break neuron into small segments connecting ends, branches and root. Parameters @@ -200,14 +238,18 @@ def _break_segments(x: 'core.NeuronObject') -> list: elif isinstance(x, core.TreeNeuron): pass else: - logger.error('Unexpected datatype: %s' % str(type(x))) + logger.error("Unexpected datatype: %s" % str(type(x))) raise ValueError # At this point x is TreeNeuron x: core.TreeNeuron - if x.igraph and config.use_igraph: - g: Union['igraph.Graph', 'nx.DiGraph'] = x.igraph # noqa + if utils.fastcore: + seg_list = utils.fastcore.break_segments( + x.nodes.node_id.values, x.nodes.parent_id.values + ) + elif x.igraph and config.use_igraph: + g: Union["igraph.Graph", "nx.DiGraph"] = x.igraph # noqa end = g.vs.select(_indegree=0).indices branch = g.vs.select(_indegree_gt=1, _outdegree=1).indices root = g.vs.select(_outdegree=0).indices @@ -228,12 +270,13 @@ def _break_segments(x: 'core.NeuronObject') -> list: seg.append(parent) seg_list.append(seg) # Translate indices to node IDs - ix_id = {v: n for v, n in zip(g.vs.indices, - g.vs.get_attribute_values('node_id'))} + ix_id = { + v: n for v, n in zip(g.vs.indices, g.vs.get_attribute_values("node_id")) + } seg_list = [[ix_id[n] for n in s] for s in seg_list] else: - seeds = x.nodes[x.nodes.type.isin(['branch', 'end'])].node_id.values - stops = x.nodes[x.nodes.type.isin(['branch', 'root'])].node_id.values + seeds = x.nodes[x.nodes.type.isin(["branch", "end"])].node_id.values + stops = x.nodes[x.nodes.type.isin(["branch", "root"])].node_id.values # Converting to set speeds up the "parent in stops" check stops = set(stops) g = x.graph @@ -648,12 +691,13 @@ def distal_to(x: 'core.TreeNeuron', return df -def geodesic_matrix(x: 'core.NeuronObject', - from_: Optional[Iterable[int]] = None, - directed: bool = False, - weight: Optional[str] = 'weight', - limit: Union[float, int] = np.inf - ) -> pd.DataFrame: +def geodesic_matrix( + x: "core.NeuronObject", + from_: Optional[Iterable[int]] = None, + directed: bool = False, + weight: Optional[str] = "weight", + limit: Union[float, int] = np.inf, +) -> pd.DataFrame: """Generate geodesic ("along-the-arbor") distance matrix between nodes/vertices. Parameters @@ -709,11 +753,51 @@ def geodesic_matrix(x: 'core.NeuronObject', if len(x) == 1: x = x[0] else: - raise ValueError('Cannot process more than a single neuron.') + raise ValueError("Cannot process more than a single neuron.") elif not isinstance(x, (core.TreeNeuron, core.MeshNeuron)): raise ValueError(f'Unable to process data of type "{type(x)}"') - limit = x.map_units(limit, on_error='raise') + limit = x.map_units(limit, on_error="raise") + + # Use fastcore if available + if utils.fastcore and isinstance(x, core.TreeNeuron): + # Calculate node distances + if weight == "weight": + weight = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=0, + ) + + # Check for missing sources + if not isinstance(from_, type(None)): + from_ = np.unique(utils.make_iterable(from_)) + + miss = from_[~np.isin(from_, x.nodes.node_id.values)] + if len(miss): + raise ValueError( + f'Node/vertex IDs not present: {", ".join(miss.astype(str))}' + ) + ix = from_ + else: + ix = x.nodes.node_id.values + + dmat = utils.fastcore.geodesic_matrix( + x.nodes.node_id.values, + x.nodes.parent_id.values, + weights=weight, + directed=directed, + sources=from_, + ) + + # Fastcore returns -1 for non-connected nodes + dmat[dmat < 0] = np.inf + + if limit is not None and limit is not np.inf: + dmat[dmat > limit] = np.inf + + return pd.DataFrame(dmat, index=ix, columns=x.nodes.node_id.values) # Makes no sense to use directed for MeshNeurons if isinstance(x, core.MeshNeuron): @@ -721,7 +805,7 @@ def geodesic_matrix(x: 'core.NeuronObject', if x.igraph and config.use_igraph: if isinstance(x, core.TreeNeuron): - nodeList = np.array(x.igraph.vs.get_attribute_values('node_id')) + nodeList = np.array(x.igraph.vs.get_attribute_values("node_id")) else: nodeList = np.arange(len(x.igraph.vs)) @@ -730,18 +814,16 @@ def geodesic_matrix(x: 'core.NeuronObject', else: nodeList = np.array(x.graph.nodes()) - if hasattr(nx, 'to_scipy_sparse_matrix'): - m = nx.to_scipy_sparse_matrix(x.graph, nodeList, - weight=weight) + if hasattr(nx, "to_scipy_sparse_matrix"): + m = nx.to_scipy_sparse_matrix(x.graph, nodeList, weight=weight) else: - m = nx.to_scipy_sparse_array(x.graph, nodeList, - weight=weight) + m = nx.to_scipy_sparse_array(x.graph, nodeList, weight=weight) if not isinstance(from_, type(None)): from_ = np.unique(utils.make_iterable(from_)) miss = from_[~np.isin(from_, nodeList)].astype(str) - if any(miss): + if len(miss): raise ValueError(f'Node/vertex IDs not present: {", ".join(miss)}') indices = np.where(np.isin(nodeList, from_))[0] @@ -752,19 +834,15 @@ def geodesic_matrix(x: 'core.NeuronObject', # For some reason csgrpah.dijkstra expects indices/indptr as int32 # igraph seems to do that by default but networkx uses int64 for indices - m.indptr = m.indptr.astype('int32', copy=False) - m.indices = m.indices.astype('int32', copy=False) - dmat = csgraph.dijkstra(m, - directed=directed, - indices=indices, - limit=limit) + m.indptr = m.indptr.astype("int32", copy=False) + m.indices = m.indices.astype("int32", copy=False) + dmat = csgraph.dijkstra(m, directed=directed, indices=indices, limit=limit) return pd.DataFrame(dmat, columns=nodeList, index=ix) # type: ignore # no stubs @utils.lock_neuron -def segment_length(x: 'core.TreeNeuron', - segment: List[int]) -> float: +def segment_length(x: "core.TreeNeuron", segment: List[int]) -> float: """Get length of a linear segment. This function is superfast but has no checks - you must provide a diff --git a/navis/morpho/__init__.py b/navis/morpho/__init__.py index 421332fb..f7d4f919 100644 --- a/navis/morpho/__init__.py +++ b/navis/morpho/__init__.py @@ -14,7 +14,7 @@ from .mmetrics import (strahler_index, bending_flow, flow_centrality, synapse_flow_centrality, sholl_analysis, segregation_index, arbor_segregation_index, tortuosity, - betweeness_centrality, segment_analysis) + betweeness_centrality, segment_analysis, cable_length) from .manipulation import (prune_by_strahler, stitch_skeletons, split_axon_dendrite, average_skeletons, despike_skeleton, guess_radius, smooth_skeleton, diff --git a/navis/morpho/manipulation.py b/navis/morpho/manipulation.py index ea453ef9..650b6e50 100644 --- a/navis/morpho/manipulation.py +++ b/navis/morpho/manipulation.py @@ -392,33 +392,53 @@ def _prune_twigs_simple(neuron: 'core.TreeNeuron', if not inplace: neuron = neuron.copy() - # Find terminal nodes - leafs = neuron.nodes[neuron.nodes.type == 'end'].node_id.values + if utils.fastcore: + nodes_to_keep = utils.fastcore.prune_twigs( + neuron.nodes.node_id.values, + neuron.nodes.parent_id.values, + threshold=size, + weights=utils.fastcore.dag.parent_dist( + neuron.nodes.node_id.values, + neuron.nodes.parent_id.values, + neuron.nodes[['x', 'y', 'z']].values, + ) + ) + + if len(nodes_to_keep) < neuron.n_nodes: + subset.subset_neuron(neuron, + nodes_to_keep, + inplace=True) + + if recursive: + recursive -= 1 + prune_twigs(neuron, size=size, inplace=True, recursive=recursive) + else: + # Find terminal nodes + leafs = neuron.nodes[neuron.nodes.type == 'end'].node_id.values - # Find terminal segments - segs = graph._break_segments(neuron) - segs = np.array([s for s in segs if s[0] in leafs], dtype=object) + # Find terminal segments + segs = graph._break_segments(neuron) + segs = np.array([s for s in segs if s[0] in leafs], dtype=object) - # Get segment lengths - seg_lengths = np.array([graph.segment_length(neuron, s) for s in segs]) + # Get segment lengths + seg_lengths = np.array([graph.segment_length(neuron, s) for s in segs]) - # Find out which to delete - segs_to_delete = segs[seg_lengths <= size] + # Find out which to delete + segs_to_delete = segs[seg_lengths <= size] + if len(segs_to_delete): + # Unravel the into list of node IDs -> skip the last parent + nodes_to_delete = [n for s in segs_to_delete for n in s[:-1]] - if segs_to_delete.any(): - # Unravel the into list of node IDs -> skip the last parent - nodes_to_delete = [n for s in segs_to_delete for n in s[:-1]] + # Subset neuron + nodes_to_keep = neuron.nodes[~neuron.nodes.node_id.isin(nodes_to_delete)].node_id.values + subset.subset_neuron(neuron, + nodes_to_keep, + inplace=True) - # Subset neuron - nodes_to_keep = neuron.nodes[~neuron.nodes.node_id.isin(nodes_to_delete)].node_id.values - subset.subset_neuron(neuron, - nodes_to_keep, - inplace=True) - - # Go recursive - if recursive: - recursive -= 1 - prune_twigs(neuron, size=size, inplace=True, recursive=recursive) + # Go recursive + if recursive: + recursive -= 1 + prune_twigs(neuron, size=size, inplace=True, recursive=recursive) return neuron @@ -973,7 +993,7 @@ def stitch_skeletons(*x: Union[Sequence[NeuronObject], 'core.NeuronList'], """Stitch multiple skeletons together. Uses minimum spanning tree to determine a way to connect all fragments - while minimizing length (Euclidian distance) of the new edges. Nodes + while minimizing length (Euclidean distance) of the new edges. Nodes that have been stitched will get a "stitched" tag. Important @@ -1912,10 +1932,8 @@ def _stitch_mst(x: 'core.TreeNeuron', "nodes in the neuron") mask = x.nodes.node_id.values[mask] - g = x.graph.to_undirected() - # Get connected components - cc = list(nx.connected_components(g)) + cc = graph._connected_components(x) if len(cc) == 1: # There's only one component -- no healing necessary return x @@ -1998,6 +2016,7 @@ def _stitch_mst(x: 'core.TreeNeuron', # For each inter-fragment edge, add the corresponding # fine-grained edge between skeleton nodes in the original graph. + g = x.graph.to_undirected() to_add = [[e[2]['node_a'], e[2]['node_b']] for e in frag_edges] g.add_edges_from(to_add) diff --git a/navis/morpho/mmetrics.py b/navis/morpho/mmetrics.py index 7d0e2c76..10331686 100644 --- a/navis/morpho/mmetrics.py +++ b/navis/morpho/mmetrics.py @@ -23,7 +23,7 @@ import pandas as pd import numpy as np -from typing import Union, Optional, Sequence, List, Dict, overload +from typing import Union, Optional, Sequence, Dict from typing_extensions import Literal from .. import config, graph, sampling, core, utils @@ -31,14 +31,25 @@ # Set up logging logger = config.get_logger(__name__) -__all__ = sorted(['strahler_index', 'bending_flow', 'sholl_analysis', - 'flow_centrality', 'synapse_flow_centrality', - 'segregation_index', 'tortuosity', - 'betweeness_centrality', 'segment_analysis']) - - -def parent_dist(x: Union['core.TreeNeuron', pd.DataFrame], - root_dist: Optional[int] = None) -> None: +__all__ = sorted( + [ + "strahler_index", + "bending_flow", + "sholl_analysis", + "flow_centrality", + "synapse_flow_centrality", + "segregation_index", + "tortuosity", + "betweeness_centrality", + "segment_analysis", + "cable_length", + ] +) + + +def parent_dist( + x: Union["core.TreeNeuron", pd.DataFrame], root_dist: Optional[int] = None +) -> None: """Get child->parent distances for skeleton nodes. Parameters @@ -61,31 +72,43 @@ def parent_dist(x: Union['core.TreeNeuron', pd.DataFrame], else: raise TypeError(f'Need TreeNeuron or DataFrame, got "{type(x)}"') - # Extract node coordinates - tn_coords = nodes[['x', 'y', 'z']].values + if not utils.fastcore: + # Extract node coordinates + tn_coords = nodes[["x", "y", "z"]].values - # Get parent coordinates - parent_coords = nodes.set_index('node_id').reindex(nodes.parent_id.values)[['x', 'y', 'z']].values + # Get parent coordinates + parent_coords = ( + nodes.set_index("node_id") + .reindex(nodes.parent_id.values)[["x", "y", "z"]] + .values + ) - # Calculate distances between nodes and their parents - w = np.sqrt(np.sum((tn_coords - parent_coords) ** 2, axis=1)) + # Calculate distances between nodes and their parents + w = np.sqrt(np.sum((tn_coords - parent_coords) ** 2, axis=1)) - # Replace root dist (nan by default) - w[np.isnan(w)] = root_dist + # Replace root dist (nan by default) + w[np.isnan(w)] = root_dist + else: + w = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=root_dist, + ) return w -@utils.map_neuronlist(desc='Calc. SI', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - reroot_soma=True, - node_props=['strahler_index']) -def strahler_index(x: 'core.NeuronObject', - method: Union[Literal['standard'], - Literal['greedy']] = 'standard', - to_ignore: list = [], - min_twig_size: Optional[int] = None - ) -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Calc. SI", allow_parallel=True) +@utils.meshneuron_skeleton( + method="node_properties", reroot_soma=True, node_props=["strahler_index"] +) +def strahler_index( + x: "core.NeuronObject", + method: Union[Literal["standard"], Literal["greedy"]] = "standard", + to_ignore: list = [], + min_twig_size: Optional[int] = None, +) -> "core.NeuronObject": """Calculate Strahler Index (SI). Starts with SI of 1 at each leaf and walks to root. At forks with different @@ -873,16 +896,17 @@ def _flow_centrality_igraph(x: 'core.NeuronObject', return x -@utils.map_neuronlist(desc='Calc. flow', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - include_connectors=True, - heal=True, - node_props=['synapse_flow_centrality']) -def synapse_flow_centrality(x: 'core.NeuronObject', - mode: Union[Literal['centrifugal'], - Literal['centripetal'], - Literal['sum']] = 'sum' - ) -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Calc. flow", allow_parallel=True) +@utils.meshneuron_skeleton( + method="node_properties", + include_connectors=True, + heal=True, + node_props=["synapse_flow_centrality"], +) +def synapse_flow_centrality( + x: "core.NeuronObject", + mode: Union[Literal["centrifugal"], Literal["centripetal"], Literal["sum"]] = "sum", +) -> "core.NeuronObject": """Calculate synapse flow centrality (SFC). From Schneider-Mizell et al. (2016): "We use flow centrality for @@ -899,8 +923,7 @@ def synapse_flow_centrality(x: 'core.NeuronObject', of each skeleton node in a 3d view, providing a characteristic signature of the arbor that enables subjective evaluation of its identity." - Losely based on Alex Bates' implemention in `catnat - `_. + Uses navis-fastcore if available. Parameters ---------- @@ -949,26 +972,62 @@ def synapse_flow_centrality(x: 'core.NeuronObject', # causes less headaches. It is, however, about >10X slower than this version! # Note to self: do not go down that rabbit hole again! - if mode not in ['centrifugal', 'centripetal', 'sum']: + if mode not in ["centrifugal", "centripetal", "sum"]: raise ValueError(f'Unknown "mode" parameter: {mode}') if not isinstance(x, core.TreeNeuron): raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"') if not x.has_connectors: - raise ValueError('Neuron must have connectors.') + raise ValueError("Neuron must have connectors.") if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)): - logger.warning(f'Neuron {x.id} is not rooted to its soma!') + logger.warning(f"Neuron {x.id} is not rooted to its soma!") # Figure out how connector types are labeled cn_types = x.connectors.type.unique() - if any(np.isin(['pre', 'post'], cn_types)): - pre, post = 'pre', 'post' + if any(np.isin(["pre", "post"], cn_types)): + pre, post = "pre", "post" elif any(np.isin([0, 1], cn_types)): pre, post = 0, 1 else: - raise ValueError(f'Unable to parse connector types "{cn_types}" for neuron {x.id}') + raise ValueError( + f'Unable to parse connector types "{cn_types}" for neuron {x.id}' + ) + + if utils.fastcore: + x.nodes["synapse_flow_centrality"] = utils.fastcore.synapse_flow_centrality( + node_ids=x.nodes.node_id.values, + parent_ids=x.nodes.parent_id.values, + presynapses=x.nodes.node_id.map( + x.connectors[(x.connectors.type == pre)].node_id.value_counts() + ) + .fillna(0) + .astype(int) + .values, + postsynapses=x.nodes.node_id.map( + x.connectors[(x.connectors.type == post)].node_id.value_counts() + ) + .fillna(0) + .astype(int) + .values, + mode=mode, + ) + # Add info on method/mode used for flow centrality + x.centrality_method = mode # type: ignore + + # Need to add a restriction, that a branchpoint cannot have a lower + # flow than its highest child -> this happens at the main branch point to + # the cell body fiber because the flow doesn't go "through" it in + # child -> parent direction but rather "across" it from one child to the + # other + is_bp = x.nodes["type"] == "branch" + bp = x.nodes.loc[is_bp, "node_id"].values + bp_childs = x.nodes[x.nodes.parent_id.isin(bp)] + max_flow = bp_childs.groupby("parent_id").synapse_flow_centrality.max() + x.nodes.loc[is_bp, "synapse_flow_centrality"] = max_flow.loc[bp].values + x.nodes["synapse_flow_centrality"] = x.nodes.synapse_flow_centrality.astype(int) + return x # Get list of nodes with pre/postsynapses pre_node_ids = x.connectors[x.connectors.type == pre].node_id.values @@ -1547,3 +1606,47 @@ def betweeness_centrality(x: 'core.NeuronObject', x.nodes['betweenness'] = x.nodes.node_id.map(bc).astype(int) return x + + +@utils.map_neuronlist(desc="Cable length", allow_parallel=True) +@utils.meshneuron_skeleton(method="pass_through") +def cable_length(x) -> Union[int, float]: + """Calculate cable length. + + Parameters + ---------- + x : TreeNeuron | MeshNeuron | NeuronList + Neuron(s) for which to calculate cable length. + + Returns + ------- + cable_length : float | array of float + Cable length of the neuron(s). + + """ + utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,)) + + # See if we can use fastcore + if not utils.fastcore: + # The by far fastest way to get the cable length is to work on the node table + # Using the igraph representation is about the same speed... if it is already calculated! + # However, one problem with the graph representation is that with large neuronlists + # it adds a lot to the memory footprint. + not_root = (x.nodes.parent_id >= 0).values + xyz = x.nodes[["x", "y", "z"]].values[not_root] + xyz_parent = ( + x.nodes.set_index("node_id") + .loc[x.nodes.parent_id.values[not_root], ["x", "y", "z"]] + .values + ) + cable_length = np.sum(np.linalg.norm(xyz - xyz_parent, axis=1)) + else: + cable_length = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=0, + ).sum() + + return cable_length + diff --git a/navis/morpho/subset.py b/navis/morpho/subset.py index e7b5d2f9..0bd876dd 100644 --- a/navis/morpho/subset.py +++ b/navis/morpho/subset.py @@ -291,7 +291,7 @@ def _subset_treeneuron(x, subset, keep_disc_cn, prevent_fragments): # Remove empty tags x.tags = {t: x.tags[t] for t in x.tags if x.tags[t]} # type: ignore # TreeNeuron has no tags - # Fix graph representations + # Fix graph representations (avoids having to recompute them) if '_graph_nx' in x.__dict__: x._graph_nx = x.graph.subgraph(x.nodes.node_id.values) if '_igraph' in x.__dict__: diff --git a/navis/plotting/dd.py b/navis/plotting/dd.py index dbebe0ce..44760a16 100644 --- a/navis/plotting/dd.py +++ b/navis/plotting/dd.py @@ -845,9 +845,7 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): if method == '2d': if not depth_coloring and not (isinstance(color, np.ndarray) and color.ndim == 2): # Generate by-segment coordinates - coords = segments_to_coords(neuron, - neuron.segments, - modifier=(1, 1, 1)) + coords = segments_to_coords(neuron, modifier=(1, 1, 1)) # We have to add (None, None, None) to the end of each # slab to make that line discontinuous there @@ -929,7 +927,6 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): else: # Generate by-segment coordinates coords = segments_to_coords(neuron, - neuron.segments, modifier=(1, 1, 1)) line_color = color @@ -953,7 +950,6 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): elif method == '3d_complex': # Generate by-segment coordinates coords = segments_to_coords(neuron, - neuron.segments, modifier=(1, 1, 1)) for c in coords: lc = Line3DCollection([c], diff --git a/navis/plotting/k3d/k3d_objects.py b/navis/plotting/k3d/k3d_objects.py index bd54eeea..13aaaf2a 100644 --- a/navis/plotting/k3d/k3d_objects.py +++ b/navis/plotting/k3d/k3d_objects.py @@ -239,7 +239,7 @@ def skeleton2k3d(neuron, legendgroup, showlegend, label, color, **kwargs): logger.warning(f'Skipping single-node skeleton: {neuron.label}') return [] - coords = segments_to_coords(neuron, neuron.segments) + coords = segments_to_coords(neuron) linewidth = kwargs.get('linewidth', kwargs.get('lw', 1)) # We have to add (None, None, None) to the end of each segment to diff --git a/navis/plotting/plot_utils.py b/navis/plotting/plot_utils.py index ce49eb8f..3b302431 100644 --- a/navis/plotting/plot_utils.py +++ b/navis/plotting/plot_utils.py @@ -69,20 +69,19 @@ def tn_pairs_to_coords(x: core.TreeNeuron, return coords.reshape((coords.shape[0], 2, 3)) -def segments_to_coords(x: core.TreeNeuron, - segments: List[List[int]], - modifier: Optional[Tuple[float, - float, - float]] = (1, 1, 1), - node_colors: Optional[np.ndarray] = None, - ) -> List[np.ndarray]: +def segments_to_coords( + x: core.TreeNeuron, + modifier: Optional[Tuple[float, float, float]] = (1, 1, 1), + node_colors: Optional[np.ndarray] = None, +) -> List[np.ndarray]: """Turn lists of node IDs into coordinates. + Will use navis-fastcore if available. + Parameters ---------- x : TreeNeuron Must contain the nodes - segments : list of lists node IDs node_colors : numpy.ndarray, optional A color for each node in ``x.nodes``. If provided, will also return a list of colors sorted to match coordinates. @@ -98,43 +97,60 @@ def segments_to_coords(x: core.TreeNeuron, to match ``coords``. """ - if not isinstance(modifier, np.ndarray): - modifier = np.array(modifier) - - # Using a dictionary here is orders of manitude faster than .loc[]! - locs: Dict[int, Tuple[float, float, float]] - # Oddly, this is also the fastest way to generate the dictionary - nodes = x.nodes - locs = {i: (x, y, z) for i, x, y, z in zip(nodes.node_id.values, - nodes.x.values, - nodes.y.values, - nodes.z.values)} # type: ignore - # locs = {r.node_id: (r.x, r.y, r.z) for r in x.nodes.itertuples()} # type: ignore - coords = [[locs[tn] for tn in s] for s in segments] - - if any(modifier != 1): - coords = [(np.array(c) * modifier).tolist() for c in coords] - - if not isinstance(node_colors, type(None)): - ilocs = dict(zip(x.nodes.node_id.values, - np.arange(x.nodes.shape[0]))) - colors = [node_colors[[ilocs[tn] for tn in s]] for s in segments] - + colors = None + + if utils.fastcore is not None: + if node_colors is None: + coords = utils.fastcore.segment_coords( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + ) + else: + coords, colors = utils.fastcore.segment_coords( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + node_colors=node_colors, + ) + else: + # Using a dictionary here is orders of manitude faster than .loc[]! + locs: Dict[int, Tuple[float, float, float]] + # Oddly, this is also the fastest way to generate the dictionary + nodes = x.nodes + locs = { + i: (x, y, z) + for i, x, y, z in zip( + nodes.node_id.values, nodes.x.values, nodes.y.values, nodes.z.values + ) + } # type: ignore + # locs = {r.node_id: (r.x, r.y, r.z) for r in x.nodes.itertuples()} # type: ignore + coords = [np.array([locs[tn] for tn in s]) for s in x.segments] + + if not isinstance(node_colors, type(None)): + ilocs = dict(zip(x.nodes.node_id.values, np.arange(x.nodes.shape[0]))) + colors = [node_colors[[ilocs[tn] for tn in s]] for s in x.segments] + + modifier = np.asarray(modifier) + if (modifier != 1).any(): + for seg in coords: + np.multiply(seg, modifier, out=seg) + + if colors is not None: return coords, colors return coords -def fibonacci_sphere(samples: int = 1, - randomize: bool = True) -> list: +def fibonacci_sphere(samples: int = 1, randomize: bool = True) -> list: """Generate points on a sphere.""" - rnd = 1. + rnd = 1.0 if randomize: rnd = random.random() * samples points = [] - offset = 2. / samples - increment = math.pi * (3. - math.sqrt(5.)) + offset = 2.0 / samples + increment = math.pi * (3.0 - math.sqrt(5.0)) for i in range(samples): y = ((i * offset) - 1) + (offset / 2) diff --git a/navis/plotting/plotly/graph_objs.py b/navis/plotting/plotly/graph_objs.py index c927d2d5..8e7536cc 100644 --- a/navis/plotting/plotly/graph_objs.py +++ b/navis/plotting/plotly/graph_objs.py @@ -357,7 +357,7 @@ def skeleton2plotly(neuron, legendgroup, showlegend, label, color, **kwargs): logger.warning(f'Skipping single-node TreeNeuron: {neuron.label}') return [] - coords = segments_to_coords(neuron, neuron.segments) + coords = segments_to_coords(neuron) linewidth = kwargs.get('linewidth', kwargs.get('lw', 2)) # We have to add (None, None, None) to the end of each segment to diff --git a/navis/plotting/vispy/visuals.py b/navis/plotting/vispy/visuals.py index d52dca28..e34a6f51 100644 --- a/navis/plotting/vispy/visuals.py +++ b/navis/plotting/vispy/visuals.py @@ -23,9 +23,8 @@ import matplotlib.colors as mcl -from ... import core, config, utils, morpho, conversion +from ... import core, config, utils, conversion from ..colors import * -from ..plot_utils import segments_to_coords, make_tube with warnings.catch_warnings(): warnings.simplefilter("ignore") diff --git a/navis/utils/__init__.py b/navis/utils/__init__.py index 542d9c98..cd5fbcf5 100644 --- a/navis/utils/__init__.py +++ b/navis/utils/__init__.py @@ -24,5 +24,12 @@ from .decorators import (meshneuron_skeleton, map_neuronlist_df, map_neuronlist, lock_neuron) +try: + import navis_fastcore as fastcore +except ModuleNotFoundError: + fastcore = None +except ImportError: + raise + __all__ = ['set_loggers', 'set_pbars', 'set_default_connector_colors', 'patch_cloudvolume']