From 1aee19416bf9674ece696e29d7c350f720dc39cb Mon Sep 17 00:00:00 2001 From: Neil Shephard Date: Mon, 20 Nov 2023 08:20:20 +0000 Subject: [PATCH] Convert '-' to '_' in summary dataframe (#215) This makes it easier to access the columns as attributes of the dataframe. --- Co-authored-by: Juan Nunez-Iglesias --- benchmarks/bench_skan.py | 2 +- doc/examples/complete_analysis.md | 8 +-- doc/examples/visualizing_3d_skeletons.md | 16 ++--- doc/getting_started/getting_started.md | 12 ++-- src/skan/csr.py | 61 +++++++++++------- src/skan/draw.py | 8 +-- src/skan/napari_skan.py | 2 +- src/skan/pipe.py | 12 ++-- src/skan/summary_utils.py | 6 +- src/skan/test/test_csr.py | 79 ++++++++++++++---------- src/skan/test/test_draw.py | 4 +- src/skan/test/test_napari_plugin.py | 6 +- src/skan/test/test_skeleton_class.py | 10 +-- src/skan/test/test_summary_utils.py | 4 +- 14 files changed, 131 insertions(+), 99 deletions(-) diff --git a/benchmarks/bench_skan.py b/benchmarks/bench_skan.py index 55248efc..d7596215 100644 --- a/benchmarks/bench_skan.py +++ b/benchmarks/bench_skan.py @@ -42,7 +42,7 @@ def bench_suite(): skel_obj = csr.Skeleton(skeleton) times['skeleton object again'] = t_skeleton2[0] with timer() as t_summary: - summary = csr.summarize(skel_obj) + summary = csr.summarize(skel_obj, separator='_') times['compute per-skeleton statistics'] = t_summary[0] return times diff --git a/doc/examples/complete_analysis.md b/doc/examples/complete_analysis.md index 761ca980..61cb3df6 100644 --- a/doc/examples/complete_analysis.md +++ b/doc/examples/complete_analysis.md @@ -112,21 +112,21 @@ data['field number'] = data['filename'].apply(field_number) Next, we filter the branches by using the [*shape index*](http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.shape_index). We have used a very simple method to extract skeletons (see [Getting started](../getting_started/getting_started)), which does an acceptable job but creates a lot of false branches. Since the goal of Skan is to analyse skeletons, rather than generate them, we attempt to filter the branches, and measure only those that look like ridges according to the shape index. ```{code-cell} ipython3 -ridges = ((data['mean-shape-index'] < 0.625) & - (data['mean-shape-index'] > 0.125)) +ridges = ((data['mean_shape_index'] < 0.625) & + (data['mean_shape_index'] > 0.125)) ``` For the same reason, we only look at junction-to-junction branches, which are more accurately identified by our method than junction-to-endpoint branches. ```{code-cell} ipython3 -j2j = data['branch-type'] == 2 +j2j = data['branch_type'] == 2 datar = data.loc[ridges & j2j].copy() ``` Finally, we make a new column of measurements in a more natural scale for our purpose. ```{code-cell} ipython3 -datar['branch distance (nm)'] = datar['branch-distance'] * 1e9 +datar['branch distance (nm)'] = datar['branch_distance'] * 1e9 ``` ## 3. Making the figure diff --git a/doc/examples/visualizing_3d_skeletons.md b/doc/examples/visualizing_3d_skeletons.md index f61c1fa2..18d570ef 100644 --- a/doc/examples/visualizing_3d_skeletons.md +++ b/doc/examples/visualizing_3d_skeletons.md @@ -49,17 +49,17 @@ all_paths = [ ``` ```{code-cell} ipython3 -paths_table = skan.summarize(skeleton) +paths_table = skan.summarize(skeleton, separator='_') ``` ```{code-cell} ipython3 -paths_table['path-id'] = np.arange(skeleton.n_paths) +paths_table['path_id'] = np.arange(skeleton.n_paths) ``` First, we color by random path ID, showing each path in a distinct color using the matplotlib "tab10" qualitative palette. (Coloring by path ID directly results in "bands" of nearby paths receiving the same color.) ```{code-cell} ipython3 -paths_table['random-path-id'] = np.random.default_rng().permutation(skeleton.n_paths) +paths_table['random_path_id'] = np.random.default_rng().permutation(skeleton.n_paths) ``` ```{code-cell} ipython3 @@ -70,7 +70,7 @@ skeleton_layer = viewer.add_shapes( shape_type='path', properties=paths_table, edge_width=0.5, - edge_color='random-path-id', + edge_color='random_path_id', edge_colormap='tab10', ) ``` @@ -85,9 +85,9 @@ napari.utils.nbscreenshot(viewer) We can also demonstrate that most of these branches are in one skeleton, with a few stragglers around the edges, by coloring by skeleton ID: ```{code-cell} ipython3 -skeleton_layer.edge_color = 'skeleton-id' +skeleton_layer.edge_color = 'skeleton_id' # for now, we need to set the face color as well -skeleton_layer.face_color = 'skeleton-id' +skeleton_layer.face_color = 'skeleton_id' ``` ```{code-cell} ipython3 @@ -99,10 +99,10 @@ napari.utils.nbscreenshot(viewer) Finally, we can color the paths by a numerical property, such as their length. ```{code-cell} ipython3 -skeleton_layer.edge_color = 'branch-distance' +skeleton_layer.edge_color = 'branch_distance' skeleton_layer.edge_colormap = 'viridis' # for now, we need to set the face color as well -skeleton_layer.face_color = 'branch-distance' +skeleton_layer.face_color = 'branch_distance' skeleton_layer.face_colormap = 'viridis' ``` diff --git a/doc/getting_started/getting_started.md b/doc/getting_started/getting_started.md index 115cbc54..657d64d1 100644 --- a/doc/getting_started/getting_started.md +++ b/doc/getting_started/getting_started.md @@ -136,7 +136,7 @@ Let's go back to the red blood cell image to illustrate this graph. ```{code-cell} ipython3 from skan import Skeleton, summarize -branch_data = summarize(Skeleton(skeleton0, spacing=spacing_nm)) +branch_data = summarize(Skeleton(skeleton0, spacing=spacing_nm), separator='_') branch_data.head() ``` @@ -156,7 +156,7 @@ Next come the coordinates in natural space, the Euclidean distance between the p This data table follows the "tidy data" paradigm, with one row per branch, which allows fast exploration of branch statistics. Here, for example, we plot the distribution of branch lengths according to branch type: ```{code-cell} ipython3 -branch_data.hist(column='branch-distance', by='branch-type', bins=100); +branch_data.hist(column='branch_distance', by='branch_type', bins=100); ``` We can see that junction-to-junction branches tend to be longer than junction-to-endpoint and junction isolated branches, and that there are no cycles in our dataset. @@ -165,7 +165,7 @@ We can also represent this visually with the `overlay_euclidean_skeleton`, which ```{code-cell} ipython3 draw.overlay_euclidean_skeleton_2d(image0, branch_data, - skeleton_color_source='branch-type'); + skeleton_color_source='branch_type'); ``` ## 2. Comparing different skeletons @@ -194,7 +194,7 @@ def skeletonize(images, spacings_nm): skeletons = skeletonize(images, spacings_nm) -tables = [summarize(Skeleton(skeleton, spacing=spacing)) +tables = [summarize(Skeleton(skeleton, spacing=spacing), separator='_') for skeleton, spacing in zip(skeletons, spacings_nm)] for filename, dataframe in zip(files, tables): @@ -210,8 +210,8 @@ Now, however, we have a tidy data table with information about the sample origin ```{code-cell} ipython3 import seaborn as sns -j2j = (table[table['branch-type'] == 2]. - rename(columns={'branch-distance': +j2j = (table[table['branch_type'] == 2]. + rename(columns={'branch_distance': 'branch distance (nm)'})) per_image = j2j.groupby('filename').median() per_image['infected'] = ['infected' if 'inf' in fn else 'normal' diff --git a/src/skan/csr.py b/src/skan/csr.py index 5608ab95..5a8db377 100644 --- a/src/skan/csr.py +++ b/src/skan/csr.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numpy as np import pandas as pd from scipy import sparse, ndimage as ndi @@ -705,7 +707,8 @@ def summarize( skel: Skeleton, *, value_is_height: bool = False, - find_main_branch: bool = False + find_main_branch: bool = False, + separator: str | None = None, ) -> pd.DataFrame: """Compute statistics for every skeleton and branch in ``skel``. @@ -722,6 +725,11 @@ def summarize( longest shortest path within a skeleton. This step is very expensive as it involves computing the shortest paths between all pairs of branch endpoints, so it is off by default. + separator : str, optional + Some column names are composite, e.g. ``'coord_src_0'``. The separator + argument allows users to configure which character is used to separate + the components. The default up to version 0.12 is '-', but will change + to '_' in version 0.13. Returns ------- @@ -729,49 +737,59 @@ def summarize( A summary of the branches including branch length, mean branch value, branch euclidean distance, etc. """ + if separator is None: + warnings.warn( + "separator in column name will change to _ in version 0.13; " + "to silence this warning, use `separator='-'` to maintain " + "current behavior and use `separator='_'` to switch to the " + "new default behavior.", + np.VisibleDeprecationWarning, + stacklevel=2, # make sure warning points to calling line + ) + separator = '-' summary = {} ndim = skel.coordinates.shape[1] _, skeleton_ids = csgraph.connected_components(skel.graph, directed=False) endpoints_src = skel.paths.indices[skel.paths.indptr[:-1]] endpoints_dst = skel.paths.indices[skel.paths.indptr[1:] - 1] - summary['skeleton-id'] = skeleton_ids[endpoints_src] - summary['node-id-src'] = endpoints_src - summary['node-id-dst'] = endpoints_dst - summary['branch-distance'] = skel.path_lengths() + summary['skeleton_id'] = skeleton_ids[endpoints_src] + summary['node_id_src'] = endpoints_src + summary['node_id_dst'] = endpoints_dst + summary['branch_distance'] = skel.path_lengths() deg_src = skel.degrees[endpoints_src] deg_dst = skel.degrees[endpoints_dst] kind = np.full(deg_src.shape, 2) # default: junction-to-junction kind[(deg_src == 1) | (deg_dst == 1)] = 1 # tip-junction kind[(deg_src == 1) & (deg_dst == 1)] = 0 # tip-tip kind[endpoints_src == endpoints_dst] = 3 # cycle - summary['branch-type'] = kind - summary['mean-pixel-value'] = skel.path_means() - summary['stdev-pixel-value'] = skel.path_stdev() + summary['branch_type'] = kind + summary['mean_pixel_value'] = skel.path_means() + summary['stdev_pixel_value'] = skel.path_stdev() for i in range(ndim): # keep loops separate for best insertion order - summary[f'image-coord-src-{i}'] = skel.coordinates[endpoints_src, i] + summary[f'image_coord_src_{i}'] = skel.coordinates[endpoints_src, i] for i in range(ndim): - summary[f'image-coord-dst-{i}'] = skel.coordinates[endpoints_dst, i] + summary[f'image_coord_dst_{i}'] = skel.coordinates[endpoints_dst, i] coords_real_src = skel.coordinates[endpoints_src] * skel.spacing for i in range(ndim): - summary[f'coord-src-{i}'] = coords_real_src[:, i] + summary[f'coord_src_{i}'] = coords_real_src[:, i] if value_is_height: values_src = skel.pixel_values[endpoints_src] - summary[f'coord-src-{ndim}'] = values_src + summary[f'coord_src_{ndim}'] = values_src coords_real_src = np.concatenate( [coords_real_src, values_src[:, np.newaxis]], axis=1, - ) # yapf: ignore + ) coords_real_dst = skel.coordinates[endpoints_dst] * skel.spacing for i in range(ndim): - summary[f'coord-dst-{i}'] = coords_real_dst[:, i] + summary[f'coord_dst_{i}'] = coords_real_dst[:, i] if value_is_height: values_dst = skel.pixel_values[endpoints_dst] - summary[f'coord-dst-{ndim}'] = values_dst + summary[f'coord_dst_{ndim}'] = values_dst coords_real_dst = np.concatenate( [coords_real_dst, values_dst[:, np.newaxis]], axis=1, - ) # yapf: ignore - summary['euclidean-distance'] = ( + ) + summary['euclidean_distance'] = ( np.sqrt((coords_real_dst - coords_real_src)**2 @ np.ones(ndim + int(value_is_height))) ) @@ -780,6 +798,7 @@ def summarize( if find_main_branch: # define main branch as longest shortest path within a single skeleton df['main'] = find_main_branches(df) + df.rename(columns=lambda s: s.replace('_', separator), inplace=True) return df @@ -1051,10 +1070,10 @@ def _simplify_graph(skel): # don't reduce return skel.graph, np.arange(skel.graph.shape[0]) - summary = summarize(skel) - src = np.asarray(summary['node-id-src']) - dst = np.asarray(summary['node-id-dst']) - distance = np.asarray(summary['branch-distance']) + summary = summarize(skel, separator='_') + src = np.asarray(summary['node_id_src']) + dst = np.asarray(summary['node_id_dst']) + distance = np.asarray(summary['branch_distance']) # to reduce the size of simplified graph nodes = np.unique(np.append(src, dst)) diff --git a/src/skan/draw.py b/src/skan/draw.py index 13d4af9f..b4504abb 100644 --- a/src/skan/draw.py +++ b/src/skan/draw.py @@ -100,7 +100,7 @@ def overlay_euclidean_skeleton_2d( stats, *, image_cmap=None, - skeleton_color_source='branch-type', + skeleton_color_source='branch_type', skeleton_colormap='viridis', axes=None ): @@ -124,7 +124,7 @@ def overlay_euclidean_skeleton_2d( - skeleton-id: each individual skeleton (connected component) will have a different colour. - - branch-type: each branch type (tip-tip, tip-junction, + - branch_type: each branch type (tip-tip, tip-junction, junction-junction, path-path). This is the default. - branch-distance: the curved length of the skeleton branch. - euclidean-distance: the straight-line length of the skeleton branch. @@ -142,8 +142,8 @@ def overlay_euclidean_skeleton_2d( image = _normalise_image(image, image_cmap=image_cmap) summary = stats # transforming from row, col to x, y - coords_cols = (['image-coord-src-%i' % i for i in [1, 0]] - + ['image-coord-dst-%i' % i for i in [1, 0]]) + coords_cols = (['image_coord_src_%i' % i for i in [1, 0]] + + ['image_coord_dst_%i' % i for i in [1, 0]]) coords = summary[coords_cols].values.reshape((-1, 2, 2)) if axes is None: fig, axes = plt.subplots() diff --git a/src/skan/napari_skan.py b/src/skan/napari_skan.py index 0714cf32..dfd03a1a 100644 --- a/src/skan/napari_skan.py +++ b/src/skan/napari_skan.py @@ -39,7 +39,7 @@ def labels_to_skeleton_shapes( all_paths = [skeleton.path_coordinates(i) for i in range(skeleton.n_paths)] # option to have main_path = True (or something) changing header - paths_table = summarize(skeleton) + paths_table = summarize(skeleton, separator='_') layer_kwargs = { 'shape_type': 'path', 'edge_colormap': 'tab10', diff --git a/src/skan/pipe.py b/src/skan/pipe.py index 6a772520..44f82d2c 100644 --- a/src/skan/pipe.py +++ b/src/skan/pipe.py @@ -65,13 +65,15 @@ def process_single_image( ) quality = shape_index(image, sigma=pixel_smoothing_radius, mode='reflect') skeleton = morphology.skeletonize(thresholded) * quality - framedata = csr.summarize(csr.Skeleton(skeleton, spacing=scale)) + framedata = csr.summarize( + csr.Skeleton(skeleton, spacing=scale), separator='_' + ) framedata['squiggle'] = np.log2( - framedata['branch-distance'] / framedata['euclidean-distance'] + framedata['branch_distance'] / framedata['euclidean_distance'] ) framedata['scale'] = scale framedata.rename( - columns={'mean-pixel-value': 'mean-shape-index'}, + columns={'mean_pixel_value': 'mean_shape_index'}, inplace=True, errors='raise', ) @@ -152,9 +154,9 @@ def process_images( image_stats['branch density'] = ( framedata.shape[0] / image_stats['area'] ) - j2j = framedata[framedata['branch-type'] == 2] + j2j = framedata[framedata['branch_type'] == 2] image_stats['mean J2J branch distance'] = ( - j2j['branch-distance'].mean() + j2j['branch_distance'].mean() ) image_results.append(image_stats) yield filename, image, thresholded, skeleton, framedata diff --git a/src/skan/summary_utils.py b/src/skan/summary_utils.py index cf451b90..45aaf94f 100644 --- a/src/skan/summary_utils.py +++ b/src/skan/summary_utils.py @@ -20,9 +20,9 @@ def find_main_branches(summary: DataFrame) -> np.ndarray: skeleton """ is_main = np.zeros(summary.shape[0], dtype=bool) - us = summary['node-id-src'] - vs = summary['node-id-dst'] - ws = summary['branch-distance'] + us = summary['node_id_src'] + vs = summary['node_id_dst'] + ws = summary['branch_distance'] edge2idx = {(u, v): i for i, (u, v) in enumerate(zip(us, vs))} diff --git a/src/skan/test/test_csr.py b/src/skan/test/test_csr.py index cb23b04c..ff8674af 100644 --- a/src/skan/test/test_csr.py +++ b/src/skan/test/test_csr.py @@ -18,8 +18,10 @@ def _old_branch_statistics( skel = csr.Skeleton( skeleton_image, spacing=spacing, value_is_height=value_is_height ) - summary = csr.summarize(skel, value_is_height=value_is_height) - columns = ['node-id-src', 'node-id-dst', 'branch-distance', 'branch-type'] + summary = csr.summarize( + skel, value_is_height=value_is_height, separator='_' + ) + columns = ['node_id_src', 'node_id_dst', 'branch_distance', 'branch_type'] return summary[columns].to_numpy() @@ -54,23 +56,23 @@ def test_skeleton1_stats(): def test_2skeletons(): - df = csr.summarize(csr.Skeleton(skeleton2)) - assert_almost_equal(np.unique(df['euclidean-distance']), np.sqrt([5, 10])) - assert_equal(np.unique(df['skeleton-id']), [0, 1]) - assert_equal(np.bincount(df['branch-type']), [0, 4, 4]) + df = csr.summarize(csr.Skeleton(skeleton2), separator='_') + assert_almost_equal(np.unique(df['euclidean_distance']), np.sqrt([5, 10])) + assert_equal(np.unique(df['skeleton_id']), [0, 1]) + assert_equal(np.bincount(df['branch_type']), [0, 4, 4]) def test_summarize_spacing(): - df = csr.summarize(csr.Skeleton(skeleton2)) - df2 = csr.summarize(csr.Skeleton(skeleton2, spacing=2)) - assert_equal(np.array(df['node-id-src']), np.array(df2['node-id-src'])) + df = csr.summarize(csr.Skeleton(skeleton2), separator='_') + df2 = csr.summarize(csr.Skeleton(skeleton2, spacing=2), separator='_') + assert_equal(np.array(df['node_id_src']), np.array(df2['node_id_src'])) assert_almost_equal( - np.array(df2['euclidean-distance']), - np.array(2 * df['euclidean-distance']) + np.array(df2['euclidean_distance']), + np.array(2 * df['euclidean_distance']) ) assert_almost_equal( - np.array(df2['branch-distance']), - np.array(2 * df['branch-distance']) + np.array(df2['branch_distance']), + np.array(2 * df['branch_distance']) ) @@ -107,9 +109,10 @@ def test_topograph_summary(): stats = csr.summarize( csr.Skeleton(topograph1d, spacing=2.5, value_is_height=True), value_is_height=True, + separator='_', ) - assert stats.loc[0, 'euclidean-distance'] == 5.0 - columns = ['coord-src-0', 'coord-src-1', 'coord-dst-0', 'coord-dst-1'] + assert stats.loc[0, 'euclidean_distance'] == 5.0 + columns = ['coord_src_0', 'coord_src_1', 'coord_dst_0', 'coord_dst_1'] assert_almost_equal(sorted(stats.loc[0, columns]), [0, 3, 3, 5]) @@ -121,27 +124,27 @@ def test_junction_multiplicity(): def test_multiplicity_stats(): - stats1 = csr.summarize(csr.Skeleton(skeleton0)) - stats2 = csr.summarize(csr.Skeleton(skeleton0, spacing=2)) + stats1 = csr.summarize(csr.Skeleton(skeleton0), separator='_') + stats2 = csr.summarize(csr.Skeleton(skeleton0, spacing=2), separator='_') assert_almost_equal( - 2 * stats1['branch-distance'].values, - stats2['branch-distance'].values + 2 * stats1['branch_distance'].values, + stats2['branch_distance'].values ) assert_almost_equal( - 2 * stats1['euclidean-distance'].values, - stats2['euclidean-distance'].values + 2 * stats1['euclidean_distance'].values, + stats2['euclidean_distance'].values ) def test_pixel_values(): image = np.random.random((45,)) expected = np.mean(image) - stats = csr.summarize(csr.Skeleton(image)) - assert_almost_equal(stats.loc[0, 'mean-pixel-value'], expected) + stats = csr.summarize(csr.Skeleton(image), separator='_') + assert_almost_equal(stats.loc[0, 'mean_pixel_value'], expected) def test_tip_junction_edges(): - stats1 = csr.summarize(csr.Skeleton(skeleton4)) + stats1 = csr.summarize(csr.Skeleton(skeleton4), separator='_') assert stats1.shape[0] == 3 # ensure all three branches are counted @@ -172,7 +175,7 @@ def test_transpose_image(): skeleton1 = csr.Skeleton(image) skeleton2 = csr.Skeleton(image.T) - assert (skeleton1.n_paths == skeleton2.n_paths) + assert skeleton1.n_paths == skeleton2.n_paths np.testing.assert_allclose( np.sort(skeleton1.path_lengths()), np.sort(skeleton2.path_lengths()), @@ -180,7 +183,7 @@ def test_transpose_image(): @pytest.mark.parametrize( - "skeleton,prune_branch,target", + 'skeleton,prune_branch,target', [ ( skeleton1, 1, @@ -208,8 +211,8 @@ def test_prune_paths( ) -> None: """Test pruning of paths.""" s = csr.Skeleton(skeleton, keep_images=True) - summary = summarize(s) - indices_to_remove = summary.loc[summary['branch-type'] == prune_branch + summary = summarize(s, separator='_') + indices_to_remove = summary.loc[summary['branch_type'] == prune_branch ].index pruned = s.prune_paths(indices_to_remove) np.testing.assert_array_equal(pruned, target) @@ -219,8 +222,8 @@ def test_prune_paths_exception_single_point() -> None: """Test exceptions raised when pruning leaves a single point and Skeleton object can not be created and returned.""" s = csr.Skeleton(skeleton0) - summary = summarize(s) - indices_to_remove = summary.loc[summary['branch-type'] == 1].index + summary = summarize(s, separator='_') + indices_to_remove = summary.loc[summary['branch_type'] == 1].index with pytest.raises(ValueError): s.prune_paths(indices_to_remove) @@ -229,7 +232,7 @@ def test_prune_paths_exception_invalid_path_index() -> None: """Test exceptions raised when trying to prune paths that do not exist in the summary. This can arise if skeletons are not updated correctly during iterative pruning.""" s = csr.Skeleton(skeleton0) - summary = summarize(s) + summary = summarize(s, separator='_') indices_to_remove = [6] with pytest.raises(ValueError): s.prune_paths(indices_to_remove) @@ -312,7 +315,15 @@ def test_skeleton_path_image_no_keep_image(): pli = s.path_label_image() assert np.max(pli) == s.n_paths + def test_skeletonlabel(): - stats = csr.summarize(csr.Skeleton(skeletonlabel)) - assert stats['mean-pixel-value'].max() == skeletonlabel.max() - assert stats['mean-pixel-value'].max() > 1 + stats = csr.summarize(csr.Skeleton(skeletonlabel), separator='_') + assert stats['mean_pixel_value'].max() == skeletonlabel.max() + assert stats['mean_pixel_value'].max() > 1 + + +def test_default_summarize_separator(): + with pytest.warns(np.VisibleDeprecationWarning, + match='separator in column name'): + stats = csr.summarize(csr.Skeleton(skeletonlabel)) + assert 'skeleton-id' in stats diff --git a/src/skan/test/test_draw.py b/src/skan/test/test_draw.py index 904bdbc6..12156e3e 100644 --- a/src/skan/test/test_draw.py +++ b/src/skan/test/test_draw.py @@ -35,7 +35,7 @@ def test_skeleton(test_thresholded): @pytest.fixture def test_stats(test_skeleton): - stats = csr.summarize(csr.Skeleton(test_skeleton)) + stats = csr.summarize(csr.Skeleton(test_skeleton), separator='_') return stats @@ -48,7 +48,7 @@ def test_overlay_skeleton(test_image, test_skeleton): def test_overlay_euclidean_skeleton(test_image, test_stats): draw.overlay_euclidean_skeleton_2d(test_image, test_stats) draw.overlay_euclidean_skeleton_2d( - test_image, test_stats, skeleton_color_source='branch-distance' + test_image, test_stats, skeleton_color_source='branch_distance' ) diff --git a/src/skan/test/test_napari_plugin.py b/src/skan/test/test_napari_plugin.py index 39a0129d..4582bdb9 100644 --- a/src/skan/test/test_napari_plugin.py +++ b/src/skan/test/test_napari_plugin.py @@ -22,7 +22,7 @@ def test_get_skeleton_simple(): labels_layer, skeleton_type ) - assert type(layer_kwargs["metadata"]["skeleton"]) is Skeleton + assert type(layer_kwargs['metadata']['skeleton']) is Skeleton np.testing.assert_array_equal( shapes_data[0], [[5, 1], [5, 2], [5, 3], [5, 4], [5, 5], [5, 6], [5, 7], [5, 8]] @@ -41,7 +41,7 @@ def test_get_skeleton_horse(): ) assert len(shapes_data) == 24 # 24 line segments in the horse skeleton assert 'features' in layer_kwargs - assert type(layer_kwargs["features"]) is pd.DataFrame + assert type(layer_kwargs['features']) is pd.DataFrame def test_gui(make_napari_viewer): @@ -56,7 +56,7 @@ def test_gui(make_napari_viewer): dw, widget = viewer.window.add_plugin_dock_widget( 'skan', 'Color Skeleton Widget' ) - widget.feature_name.value = "euclidean-distance" + widget.feature_name.value = 'euclidean_distance' widget() layer = viewer.layers[-1] assert layer.edge_colormap.name == 'viridis' diff --git a/src/skan/test/test_skeleton_class.py b/src/skan/test/test_skeleton_class.py index 99fd0561..0404c637 100644 --- a/src/skan/test/test_skeleton_class.py +++ b/src/skan/test/test_skeleton_class.py @@ -101,17 +101,17 @@ def test_skeleton_summarize(): image = np.zeros(skeleton2.shape, dtype=float) image[skeleton2] = 1 + np.random.random(np.sum(skeleton2)) skeleton = Skeleton(image) - summary = summarize(skeleton) - assert set(summary['skeleton-id']) == {0, 1} + summary = summarize(skeleton, separator='_') + assert set(summary['skeleton_id']) == {0, 1} assert ( - np.all(summary['mean-pixel-value'] < 2) - and np.all(summary['mean-pixel-value'] > 1) + np.all(summary['mean_pixel_value'] < 2) + and np.all(summary['mean_pixel_value'] > 1) ) def test_skeleton_label_image_strict(): """Test that the skeleton pixel labels match the branch IDs. - + This does pixel-wise pairing of the label image with the expected label image. There should be the same number of unique pairs as there are unique labels in the expected label image. This assumes that the branches are diff --git a/src/skan/test/test_summary_utils.py b/src/skan/test/test_summary_utils.py index a35d0642..059cfa72 100644 --- a/src/skan/test/test_summary_utils.py +++ b/src/skan/test/test_summary_utils.py @@ -6,7 +6,7 @@ def test_find_main(): skeleton = Skeleton(skeleton1) - summary_df = summarize(skeleton, find_main_branch=True) + summary_df = summarize(skeleton, find_main_branch=True, separator='_') non_main_edge_start = [2, 1] non_main_edge_finish = [3, 3] @@ -14,7 +14,7 @@ def test_find_main(): non_main_df = summary_df.loc[summary_df['main'] == False] assert non_main_df.shape[0] == 1 coords = non_main_df[[ - 'coord-src-0', 'coord-src-1', 'coord-dst-0', 'coord-dst-1' + 'coord_src_0', 'coord_src_1', 'coord_dst_0', 'coord_dst_1' ]].to_numpy() assert ( np.all(coords == non_main_edge_start + non_main_edge_finish)