From b6191fb2c713e7ee4321939f6ae984579940039a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 11 Jul 2019 22:35:41 -0400 Subject: [PATCH 0001/1097] wip: refactor add_chunk_edges --- pychunkedgraph/backend/chunkedgraph.py | 291 +++++++++++++++++++++++++ 1 file changed, 291 insertions(+) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index c4662b125..acb0c10cb 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1760,6 +1760,297 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) + def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, + edge_aff_dict: dict, edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None): + """ Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk + + Alle edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime + """ + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, + round_up=False) + + edge_aff_keys = [ + 'in_connected','in_disconnected','between_connected','between_disconnected'] + edge_id_keys = edge_aff_keys[:].insert(2, 'cross') + + # Check if keys exist and include an empty array if not + n_edge_ids = 0 + empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) + + for key in edge_id_keys: + edge_id_dict[key] = np.concatenate( + edge_id_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + n_edge_ids += len(edge_id_dict[edge_id_key]) + + for key in edge_aff_keys: + edge_aff_dict[key] = np.concatenate( + edge_aff_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + + time_start = time.time() + + # Get connected component within the chunk + chunk_node_ids = np.concatenate([ + isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), + np.unique(edge_id_dict["cross"][:, 0]), + np.unique(edge_id_dict["between_connected"][:, 0]), + np.unique(edge_id_dict["between_disconnected"][:, 0])]) + + + # nothing to do + if not len(chunk_node_ids): return 0 + + chunk_node_ids = np.unique(chunk_node_ids) + + node_chunk_ids = np.array([self.get_chunk_id(c) + for c in chunk_node_ids], + dtype=np.uint64) + + u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, + return_counts=True) + if len(u_node_chunk_ids) > 1: + raise Exception("%d: %d chunk ids found in node id list. " + "Some edges might be in the wrong order. " + "Number of occurences:" % + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + + add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T + edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), + add_edge_ids]) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True) + + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + + # Add rows for nodes that are in this chunk + # a connected component at a time + node_c = 0 # Just a counter for the log / speed measurement + + n_ccs = len(ccs) + + # Make parent id creation easier + chunk_id = u_node_chunk_ids[0] + parent_chunk_id = self.get_chunk_id( + layer=2, *self.get_chunk_coordinates(chunk_id)) + + parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + + time_start = time.time() + time_dict = collections.defaultdict(list) + + time_start_1 = time.time() + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + # Circumvent datatype issues + + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + + time_dict["sparse_indices"].append(time.time() - time_start_1) + + rows = [] + + for i_cc, cc in enumerate(ccs): + node_ids = unique_graph_ids[cc] + + u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) + + if len(u_chunk_ids) > 1: + self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + raise Exception() + + # Create parent id + parent_id = parent_ids[i_cc] + + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + + # Add rows for nodes that are in this chunk + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected + time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) + connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) + disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) + disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) + connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) + + val_dict = {column_keys.Connectivity.Partner: partners, + column_keys.Connectivity.Affinity: affinities, + column_keys.Connectivity.Area: areas, + column_keys.Connectivity.Connected: connected, + column_keys.Hierarchy.Parent: parent_id} + + rows.append(self.mutate_row(serializers.serialize_uint64(node_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + time_dict["creating_lv1_row"].append(time.time() - time_start_2) + + time_start_1 = time.time() + # Create parent node + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + {column_keys.Hierarchy.Child: node_ids}, + time_stamp=time_stamp)) + + time_dict["creating_lv2_row"].append(time.time() - time_start_1) + time_start_1 = time.time() + + cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {} + for cc_layer in u_cce_layers: + layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + + if len(layer_cross_edges) > 0: + val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ + layer_cross_edges + + if len(val_dict) > 0: + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + + time_dict["adding_cross_edges"].append(time.time() - time_start_1) + + if len(rows) > 100000: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if len(rows) > 0: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if verbose: + self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % + (time.time() - time_start, len(ccs), node_c)) + + for k in time_dict.keys(): + self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % + (k, np.sum(time_dict[k])*1000, len(time_dict[k]), + np.mean(time_dict[k])*1000)) + def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, From adb31870c4e3b9c3102b259e05fae80ddaa1a4b9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:27:27 -0400 Subject: [PATCH 0002/1097] wip: remove redundant unique chunk id check --- pychunkedgraph/backend/chunkedgraph.py | 39 +++++++++----------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index acb0c10cb..ddb023264 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1810,38 +1810,37 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, edge_aff_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - time_start = time.time() - # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) + add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T + + chunk_node_ids = np.concatenate([ + chunk_node_ids, + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"])]) # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) + node_chunk_ids = np.array( + [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - node_chunk_ids = np.array([self.get_chunk_id(c) - for c in chunk_node_ids], - dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, - return_counts=True) + u_node_chunk_ids, c_node_chunk_ids = np.unique( + node_chunk_ids, return_counts=True) if len(u_node_chunk_ids) > 1: raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T - edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), - add_edge_ids]) + edge_ids = np.concatenate( + [edge_id_dict["in_connected"].copy(), add_edge_ids]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) @@ -1858,9 +1857,8 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, n_ccs = len(ccs) # Make parent id creation easier - chunk_id = u_node_chunk_ids[0] parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(chunk_id)) + layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) @@ -1885,20 +1883,11 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, rows = [] for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] - - u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) - - if len(u_chunk_ids) > 1: - self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") - raise Exception() - - # Create parent id parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) # Add rows for nodes that are in this chunk + node_ids = unique_graph_ids[cc] for i_node_id, node_id in enumerate(node_ids): # Extract edges relevant to this node From 349117fe7799fa0f4edcd8c5360e0d24d9a460b2 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:45:57 -0400 Subject: [PATCH 0003/1097] wip: revert mistake, add more comments --- pychunkedgraph/backend/chunkedgraph.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index ddb023264..4ae8d9c89 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1813,18 +1813,12 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T - - chunk_node_ids = np.concatenate([ - chunk_node_ids, - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"])]) - # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) @@ -1839,12 +1833,16 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - edge_ids = np.concatenate( - [edge_id_dict["in_connected"].copy(), add_edge_ids]) + + # add self edge to all node_ids to make sure they're + # part of connected components because the graph is processed component wise + # if not, the node_ids won't be stored + edge_ids = np.concatenate([ + edge_id_dict["in_connected"].copy(), + np.vstack([chunk_node_ids, chunk_node_ids]).T]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) if verbose: From f47028e8c441a0fc5744866c73d11d1d530befdd Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 15:46:00 -0400 Subject: [PATCH 0004/1097] move add edges to separate module --- pychunkedgraph/backend/chunkedgraph.py | 278 ------------------- pychunkedgraph/backend/chunkedgraph_init.py | 283 ++++++++++++++++++++ pychunkedgraph/ingest/chunkEdges.proto | 9 + 3 files changed, 292 insertions(+), 278 deletions(-) create mode 100644 pychunkedgraph/backend/chunkedgraph_init.py create mode 100644 pychunkedgraph/ingest/chunkEdges.proto diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 4ae8d9c89..c4662b125 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1760,284 +1760,6 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) - def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, - edge_aff_dict: dict, edge_area_dict: dict, - isolated_node_ids: Sequence[np.uint64], - verbose: bool = True, - time_stamp: Optional[datetime.datetime] = None): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime - """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - - edge_aff_keys = [ - 'in_connected','in_disconnected','between_connected','between_disconnected'] - edge_id_keys = edge_aff_keys[:].insert(2, 'cross') - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) - - for key in edge_id_keys: - edge_id_dict[key] = np.concatenate( - edge_id_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) - - for key in edge_aff_keys: - edge_aff_dict[key] = np.concatenate( - edge_aff_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - - # Get connected component within the chunk - chunk_node_ids = np.concatenate([ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - if not len(chunk_node_ids): return 0 - - chunk_node_ids = np.unique(chunk_node_ids) - node_chunk_ids = np.array( - [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique( - node_chunk_ids, return_counts=True) - if len(u_node_chunk_ids) > 1: - raise Exception("%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - - - # add self edge to all node_ids to make sure they're - # part of connected components because the graph is processed component wise - # if not, the node_ids won't be stored - edge_ids = np.concatenate([ - edge_id_dict["in_connected"].copy(), - np.vstack([chunk_node_ids, chunk_node_ids]).T]) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - - # Add rows for nodes that are in this chunk - # a connected component at a time - node_c = 0 # Just a counter for the log / speed measurement - - n_ccs = len(ccs) - - # Make parent id creation easier - parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) - - parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - - time_start = time.time() - time_dict = collections.defaultdict(list) - - time_start_1 = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start_1) - - rows = [] - - for i_cc, cc in enumerate(ccs): - parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - - # Add rows for nodes that are in this chunk - node_ids = unique_graph_ids[cc] - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected - time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) - - val_dict = {column_keys.Connectivity.Partner: partners, - column_keys.Connectivity.Affinity: affinities, - column_keys.Connectivity.Area: areas, - column_keys.Connectivity.Connected: connected, - column_keys.Hierarchy.Parent: parent_id} - - rows.append(self.mutate_row(serializers.serialize_uint64(node_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - - time_start_1 = time.time() - # Create parent node - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp)) - - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() - - cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {} - for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - - if len(layer_cross_edges) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ - layer_cross_edges - - if len(val_dict) > 0: - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - - time_dict["adding_cross_edges"].append(time.time() - time_start_1) - - if len(rows) > 100000: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if len(rows) > 0: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % - (time.time() - time_start, len(ccs), node_c)) - - for k in time_dict.keys(): - self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % - (k, np.sum(time_dict[k])*1000, len(time_dict[k]), - np.mean(time_dict[k])*1000)) - def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py new file mode 100644 index 000000000..1557ab14c --- /dev/null +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -0,0 +1,283 @@ +import time +import datetime +import os + +from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple + +import numpy as np + +from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils + +def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, + edge_aff_dict: dict, edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None): + """ Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk + + Alle edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime + """ + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, + round_up=False) + + edge_aff_keys = [ + 'in_connected','in_disconnected','between_connected','between_disconnected'] + edge_id_keys = edge_aff_keys[:].insert(2, 'cross') + + # Check if keys exist and include an empty array if not + n_edge_ids = 0 + empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) + + for key in edge_id_keys: + edge_id_dict[key] = np.concatenate( + edge_id_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + n_edge_ids += len(edge_id_dict[edge_id_key]) + + for key in edge_aff_keys: + edge_aff_dict[key] = np.concatenate( + edge_aff_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + + # Get connected component within the chunk + chunk_node_ids = np.concatenate([ + isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), + np.unique(edge_id_dict["cross"][:, 0]), + np.unique(edge_id_dict["between_connected"][:, 0]), + np.unique(edge_id_dict["between_disconnected"][:, 0])]) + + if not len(chunk_node_ids): return 0 + + chunk_node_ids = np.unique(chunk_node_ids) + node_chunk_ids = np.array( + [cg.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) + + u_node_chunk_ids, c_node_chunk_ids = np.unique( + node_chunk_ids, return_counts=True) + if len(u_node_chunk_ids) > 1: + raise Exception("%d: %d chunk ids found in node id list. " + "Some edges might be in the wrong order. " + "Number of occurences:" % + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + + + # add self edge to all node_ids to make sure they're + # part of connected components because the graph is processed component wise + # if not, the node_ids won't be stored + edge_ids = np.concatenate([ + edge_id_dict["in_connected"].copy(), + np.vstack([chunk_node_ids, chunk_node_ids]).T]) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True) + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + cg.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + + # Add rows for nodes that are in this chunk + # a connected component at a time + node_c = 0 # Just a counter for the log / speed measurement + + n_ccs = len(ccs) + + # Make parent id creation easier + parent_chunk_id = cg.get_chunk_id( + layer=2, *cg.get_chunk_coordinates(u_node_chunk_ids[0])) + + parent_ids = cg.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + + time_start = time.time() + time_dict = collections.defaultdict(list) + + time_start_1 = time.time() + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + # Circumvent datatype issues + + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + + time_dict["sparse_indices"].append(time.time() - time_start_1) + + rows = [] + + for i_cc, cc in enumerate(ccs): + parent_id = parent_ids[i_cc] + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + + # Add rows for nodes that are in this chunk + node_ids = unique_graph_ids[cc] + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected + time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) + connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) + disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) + disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) + connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) + + val_dict = {column_keys.Hierarchy.Parent: parent_id} + + rows.append(cg.mutate_row(serializers.serialize_uint64(node_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + time_dict["creating_lv1_row"].append(time.time() - time_start_2) + + time_start_1 = time.time() + # Create parent node + rows.append(cg.mutate_row(serializers.serialize_uint64(parent_id), + {column_keys.Hierarchy.Child: node_ids}, + time_stamp=time_stamp)) + + time_dict["creating_lv2_row"].append(time.time() - time_start_1) + time_start_1 = time.time() + + cce_layers = cg.get_cross_chunk_edges_layer(parent_cross_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {} + for cc_layer in u_cce_layers: + layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + + if len(layer_cross_edges) > 0: + val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ + layer_cross_edges + + if len(val_dict) > 0: + rows.append(cg.mutate_row(serializers.serialize_uint64(parent_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + + time_dict["adding_cross_edges"].append(time.time() - time_start_1) + + if len(rows) > 100000: + time_start_1 = time.time() + cg.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if len(rows) > 0: + time_start_1 = time.time() + cg.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if verbose: + cg.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % + (time.time() - time_start, len(ccs), node_c)) + + for k in time_dict.keys(): + cg.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % + (k, np.sum(time_dict[k])*1000, len(time_dict[k]), + np.mean(time_dict[k])*1000)) \ No newline at end of file diff --git a/pychunkedgraph/ingest/chunkEdges.proto b/pychunkedgraph/ingest/chunkEdges.proto new file mode 100644 index 000000000..369fe9505 --- /dev/null +++ b/pychunkedgraph/ingest/chunkEdges.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package test; + +message Edges { + bytes edgeList = 1; + bytes affinities = 2; + bytes areas = 3; +} \ No newline at end of file From 369bd45c2f3c10770f904d5d1f6dfd52a7f0f57f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 16:10:34 -0400 Subject: [PATCH 0005/1097] remove edge stuff --- pychunkedgraph/backend/chunkedgraph_init.py | 112 +++----------------- 1 file changed, 15 insertions(+), 97 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 1557ab14c..b6ea25196 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -1,12 +1,21 @@ import time import datetime import os - +import collections from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple +import pytz import numpy as np from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils +from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes +from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ + compute_bitmasks, get_google_compatible_time_stamp, \ + get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ + combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict + + +UTC = pytz.UTC def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_aff_dict: dict, edge_area_dict: dict, @@ -51,7 +60,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_id_dict[key] = np.concatenate( edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) + n_edge_ids += len(edge_id_dict[key]) for key in edge_aff_keys: edge_aff_dict[key] = np.concatenate( @@ -79,7 +88,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) # add self edge to all node_ids to make sure they're @@ -91,6 +100,8 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) + + time_start = time.time() ccs = flatgraph_utils.connected_components(graph) if verbose: @@ -134,103 +145,10 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, # Add rows for nodes that are in this chunk node_ids = unique_graph_ids[cc] - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected + for node_id in node_ids: time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) val_dict = {column_keys.Hierarchy.Parent: parent_id} - rows.append(cg.mutate_row(serializers.serialize_uint64(node_id), val_dict, time_stamp=time_stamp)) node_c += 1 From 48a5be548fcfe670ca6a556d851258c605e1d9b9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 11:30:15 -0400 Subject: [PATCH 0006/1097] remove unused stuff --- pychunkedgraph/backend/chunkedgraph.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index c4662b125..41c094222 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3044,14 +3044,6 @@ def _get_subgraph_layer2_edges(node_ids) -> \ time_start = time.time() - child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) - u_ccids = np.unique(child_chunk_ids) - - child_blocks = [] - # Make blocks of child ids that are in the same chunk - for u_ccid in u_ccids: - child_blocks.append(child_ids[child_chunk_ids == u_ccid]) - n_child_ids = len(child_ids) this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) From 3cdcd1d269d17dd94c47c01700d056e67abd7ffd Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 14:38:00 -0400 Subject: [PATCH 0007/1097] wip: subpackage to segregate storage --- pychunkedgraph/backend/chunkedgraph.py | 24 ++++++++++++++++++------ pychunkedgraph/io/__init__.py | 0 pychunkedgraph/io/gcs.py | 12 ++++++++++++ 3 files changed, 30 insertions(+), 6 deletions(-) create mode 100644 pychunkedgraph/io/__init__.py create mode 100644 pychunkedgraph/io/gcs.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 41c094222..2fb98557d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3023,11 +3023,17 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, :return: edge list """ + # def _get_subgraph_layer2_edges(node_ids) -> \ + # Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: + # return self.get_subgraph_chunk(node_ids, + # connected_edges=connected_edges, + # time_stamp=time_stamp) + def _get_subgraph_layer2_edges(node_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return self.get_subgraph_chunk(node_ids, + return self.get_subgraph_chunk_v2(node_ids, connected_edges=connected_edges, - time_stamp=time_stamp) + time_stamp=time_stamp) time_stamp = self.read_node_id_row(agglomeration_id, columns=column_keys.Hierarchy.Child)[0].timestamp @@ -3043,9 +3049,15 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() + child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) + u_ccids = np.unique(child_chunk_ids) - n_child_ids = len(child_ids) - this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) + # Make blocks of child ids that are in the same chunk + child_blocks = [] + for u_ccid in u_ccids: + child_blocks.append(child_ids[child_chunk_ids == u_ccid]) + + this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, @@ -3063,9 +3075,9 @@ def _get_subgraph_layer2_edges(node_ids) -> \ edges = np.concatenate([edges, _edges]) if verbose: - self.logger.debug("Layer %d: %.3fms for %d childs with %d threads" % + self.logger.debug("Layer %d: %.3fms for %d children with %d threads" % (2, (time.time() - time_start) * 1000, - n_child_ids, this_n_threads)) + len(child_ids), this_n_threads)) return edges, affinities, areas diff --git a/pychunkedgraph/io/__init__.py b/pychunkedgraph/io/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py new file mode 100644 index 000000000..63ac7ece5 --- /dev/null +++ b/pychunkedgraph/io/gcs.py @@ -0,0 +1,12 @@ +''' +Functions to use when dealing with Google Cloud Storage +''' + +# TODO some funtions in ChunkedGraph +# should be class methods or util functions +# for now pass instance of ChunkedGraph + +def get_chunk_edges(cg, chunk_id): + chunk_coords = cg.get_chunk_coordinates(chunk_id) + chunk_str = repr(chunk)[1:-1].replace(', ','_') + fname = f'edges_{chunk_str}.data' From 2a5fe5517b6eab7c3544b5db31d7eb2510c08905 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 15:16:04 -0400 Subject: [PATCH 0008/1097] wip: updates --- pychunkedgraph/io/gcs.py | 22 ++++- pychunkedgraph/io/protobuf/__init__.py | 0 .../{ingest => io/protobuf}/chunkEdges.proto | 0 pychunkedgraph/io/protobuf/chunkEdges_pb3.py | 84 +++++++++++++++++++ 4 files changed, 104 insertions(+), 2 deletions(-) create mode 100644 pychunkedgraph/io/protobuf/__init__.py rename pychunkedgraph/{ingest => io/protobuf}/chunkEdges.proto (100%) create mode 100644 pychunkedgraph/io/protobuf/chunkEdges_pb3.py diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py index 63ac7ece5..c8261aa5e 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/gcs.py @@ -2,11 +2,29 @@ Functions to use when dealing with Google Cloud Storage ''' +import numpy as np +import zstandard as zstd + +from cloudvolume import Storage +from .protobuf.chunkEdges_pb3 import Edges + # TODO some funtions in ChunkedGraph # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id): +def get_chunk_edges(cg, chunk_id: np.uint64): chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = repr(chunk)[1:-1].replace(', ','_') + chunk_str = '_'.join(str(coord) for coord in chunk_coords) fname = f'edges_{chunk_str}.data' + + edgesMessage = Edges() + with Storage(cg._cv_path) as st: + file_content = st.get_file(fname) + + file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) + edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) + areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Thu, 11 Jul 2019 22:35:41 -0400 Subject: [PATCH 0009/1097] wip: refactor add_chunk_edges --- pychunkedgraph/backend/chunkedgraph.py | 291 +++++++++++++++++++++++++ 1 file changed, 291 insertions(+) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 2fb98557d..7a3f2a770 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1760,6 +1760,297 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) + def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, + edge_aff_dict: dict, edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None): + """ Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk + + Alle edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime + """ + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, + round_up=False) + + edge_aff_keys = [ + 'in_connected','in_disconnected','between_connected','between_disconnected'] + edge_id_keys = edge_aff_keys[:].insert(2, 'cross') + + # Check if keys exist and include an empty array if not + n_edge_ids = 0 + empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) + + for key in edge_id_keys: + edge_id_dict[key] = np.concatenate( + edge_id_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + n_edge_ids += len(edge_id_dict[edge_id_key]) + + for key in edge_aff_keys: + edge_aff_dict[key] = np.concatenate( + edge_aff_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + + time_start = time.time() + + # Get connected component within the chunk + chunk_node_ids = np.concatenate([ + isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), + np.unique(edge_id_dict["cross"][:, 0]), + np.unique(edge_id_dict["between_connected"][:, 0]), + np.unique(edge_id_dict["between_disconnected"][:, 0])]) + + + # nothing to do + if not len(chunk_node_ids): return 0 + + chunk_node_ids = np.unique(chunk_node_ids) + + node_chunk_ids = np.array([self.get_chunk_id(c) + for c in chunk_node_ids], + dtype=np.uint64) + + u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, + return_counts=True) + if len(u_node_chunk_ids) > 1: + raise Exception("%d: %d chunk ids found in node id list. " + "Some edges might be in the wrong order. " + "Number of occurences:" % + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + + add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T + edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), + add_edge_ids]) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True) + + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + + # Add rows for nodes that are in this chunk + # a connected component at a time + node_c = 0 # Just a counter for the log / speed measurement + + n_ccs = len(ccs) + + # Make parent id creation easier + chunk_id = u_node_chunk_ids[0] + parent_chunk_id = self.get_chunk_id( + layer=2, *self.get_chunk_coordinates(chunk_id)) + + parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + + time_start = time.time() + time_dict = collections.defaultdict(list) + + time_start_1 = time.time() + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + # Circumvent datatype issues + + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + + time_dict["sparse_indices"].append(time.time() - time_start_1) + + rows = [] + + for i_cc, cc in enumerate(ccs): + node_ids = unique_graph_ids[cc] + + u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) + + if len(u_chunk_ids) > 1: + self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + raise Exception() + + # Create parent id + parent_id = parent_ids[i_cc] + + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + + # Add rows for nodes that are in this chunk + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected + time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) + connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) + disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) + disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) + connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) + + val_dict = {column_keys.Connectivity.Partner: partners, + column_keys.Connectivity.Affinity: affinities, + column_keys.Connectivity.Area: areas, + column_keys.Connectivity.Connected: connected, + column_keys.Hierarchy.Parent: parent_id} + + rows.append(self.mutate_row(serializers.serialize_uint64(node_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + time_dict["creating_lv1_row"].append(time.time() - time_start_2) + + time_start_1 = time.time() + # Create parent node + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + {column_keys.Hierarchy.Child: node_ids}, + time_stamp=time_stamp)) + + time_dict["creating_lv2_row"].append(time.time() - time_start_1) + time_start_1 = time.time() + + cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {} + for cc_layer in u_cce_layers: + layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + + if len(layer_cross_edges) > 0: + val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ + layer_cross_edges + + if len(val_dict) > 0: + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + + time_dict["adding_cross_edges"].append(time.time() - time_start_1) + + if len(rows) > 100000: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if len(rows) > 0: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if verbose: + self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % + (time.time() - time_start, len(ccs), node_c)) + + for k in time_dict.keys(): + self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % + (k, np.sum(time_dict[k])*1000, len(time_dict[k]), + np.mean(time_dict[k])*1000)) + def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, From 9c130a8d0a4fa0986d5a4b756562eb450cf55ff7 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:27:27 -0400 Subject: [PATCH 0010/1097] wip: remove redundant unique chunk id check --- pychunkedgraph/backend/chunkedgraph.py | 39 +++++++++----------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 7a3f2a770..deadb94fb 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1810,38 +1810,37 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, edge_aff_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - time_start = time.time() - # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) + add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T + + chunk_node_ids = np.concatenate([ + chunk_node_ids, + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"])]) # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) + node_chunk_ids = np.array( + [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - node_chunk_ids = np.array([self.get_chunk_id(c) - for c in chunk_node_ids], - dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, - return_counts=True) + u_node_chunk_ids, c_node_chunk_ids = np.unique( + node_chunk_ids, return_counts=True) if len(u_node_chunk_ids) > 1: raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T - edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), - add_edge_ids]) + edge_ids = np.concatenate( + [edge_id_dict["in_connected"].copy(), add_edge_ids]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) @@ -1858,9 +1857,8 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, n_ccs = len(ccs) # Make parent id creation easier - chunk_id = u_node_chunk_ids[0] parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(chunk_id)) + layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) @@ -1885,20 +1883,11 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, rows = [] for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] - - u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) - - if len(u_chunk_ids) > 1: - self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") - raise Exception() - - # Create parent id parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) # Add rows for nodes that are in this chunk + node_ids = unique_graph_ids[cc] for i_node_id, node_id in enumerate(node_ids): # Extract edges relevant to this node From c0e8cecc3d906ae1a15e0a2a436041438724f68f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:45:57 -0400 Subject: [PATCH 0011/1097] wip: revert mistake, add more comments --- pychunkedgraph/backend/chunkedgraph.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index deadb94fb..9e33b4cc2 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1813,18 +1813,12 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T - - chunk_node_ids = np.concatenate([ - chunk_node_ids, - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"])]) - # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) @@ -1839,12 +1833,16 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - edge_ids = np.concatenate( - [edge_id_dict["in_connected"].copy(), add_edge_ids]) + + # add self edge to all node_ids to make sure they're + # part of connected components because the graph is processed component wise + # if not, the node_ids won't be stored + edge_ids = np.concatenate([ + edge_id_dict["in_connected"].copy(), + np.vstack([chunk_node_ids, chunk_node_ids]).T]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) if verbose: From 6886b03b75da391a2f0774f6bca7ac5bd6660928 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 15:46:00 -0400 Subject: [PATCH 0012/1097] move add edges to separate module --- pychunkedgraph/backend/chunkedgraph.py | 278 -------------------- pychunkedgraph/backend/chunkedgraph_init.py | 112 ++++++-- pychunkedgraph/ingest/chunkEdges.proto | 9 + 3 files changed, 106 insertions(+), 293 deletions(-) create mode 100644 pychunkedgraph/ingest/chunkEdges.proto diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 9e33b4cc2..2fb98557d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1760,284 +1760,6 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) - def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, - edge_aff_dict: dict, edge_area_dict: dict, - isolated_node_ids: Sequence[np.uint64], - verbose: bool = True, - time_stamp: Optional[datetime.datetime] = None): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime - """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - - edge_aff_keys = [ - 'in_connected','in_disconnected','between_connected','between_disconnected'] - edge_id_keys = edge_aff_keys[:].insert(2, 'cross') - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) - - for key in edge_id_keys: - edge_id_dict[key] = np.concatenate( - edge_id_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) - - for key in edge_aff_keys: - edge_aff_dict[key] = np.concatenate( - edge_aff_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - - # Get connected component within the chunk - chunk_node_ids = np.concatenate([ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - if not len(chunk_node_ids): return 0 - - chunk_node_ids = np.unique(chunk_node_ids) - node_chunk_ids = np.array( - [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique( - node_chunk_ids, return_counts=True) - if len(u_node_chunk_ids) > 1: - raise Exception("%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - - - # add self edge to all node_ids to make sure they're - # part of connected components because the graph is processed component wise - # if not, the node_ids won't be stored - edge_ids = np.concatenate([ - edge_id_dict["in_connected"].copy(), - np.vstack([chunk_node_ids, chunk_node_ids]).T]) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - - # Add rows for nodes that are in this chunk - # a connected component at a time - node_c = 0 # Just a counter for the log / speed measurement - - n_ccs = len(ccs) - - # Make parent id creation easier - parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) - - parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - - time_start = time.time() - time_dict = collections.defaultdict(list) - - time_start_1 = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start_1) - - rows = [] - - for i_cc, cc in enumerate(ccs): - parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - - # Add rows for nodes that are in this chunk - node_ids = unique_graph_ids[cc] - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected - time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) - - val_dict = {column_keys.Connectivity.Partner: partners, - column_keys.Connectivity.Affinity: affinities, - column_keys.Connectivity.Area: areas, - column_keys.Connectivity.Connected: connected, - column_keys.Hierarchy.Parent: parent_id} - - rows.append(self.mutate_row(serializers.serialize_uint64(node_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - - time_start_1 = time.time() - # Create parent node - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp)) - - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() - - cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {} - for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - - if len(layer_cross_edges) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ - layer_cross_edges - - if len(val_dict) > 0: - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - - time_dict["adding_cross_edges"].append(time.time() - time_start_1) - - if len(rows) > 100000: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if len(rows) > 0: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % - (time.time() - time_start, len(ccs), node_c)) - - for k in time_dict.keys(): - self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % - (k, np.sum(time_dict[k])*1000, len(time_dict[k]), - np.mean(time_dict[k])*1000)) - def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index b6ea25196..1557ab14c 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -1,21 +1,12 @@ import time import datetime import os -import collections + from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple -import pytz import numpy as np from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils -from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes -from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ - compute_bitmasks, get_google_compatible_time_stamp, \ - get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ - combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict - - -UTC = pytz.UTC def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_aff_dict: dict, edge_area_dict: dict, @@ -60,7 +51,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_id_dict[key] = np.concatenate( edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[key]) + n_edge_ids += len(edge_id_dict[edge_id_key]) for key in edge_aff_keys: edge_aff_dict[key] = np.concatenate( @@ -88,7 +79,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % - (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) # add self edge to all node_ids to make sure they're @@ -100,8 +91,6 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) - - time_start = time.time() ccs = flatgraph_utils.connected_components(graph) if verbose: @@ -145,10 +134,103 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, # Add rows for nodes that are in this chunk node_ids = unique_graph_ids[cc] - for node_id in node_ids: + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) + connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) + disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) + disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) + connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) val_dict = {column_keys.Hierarchy.Parent: parent_id} + rows.append(cg.mutate_row(serializers.serialize_uint64(node_id), val_dict, time_stamp=time_stamp)) node_c += 1 diff --git a/pychunkedgraph/ingest/chunkEdges.proto b/pychunkedgraph/ingest/chunkEdges.proto new file mode 100644 index 000000000..369fe9505 --- /dev/null +++ b/pychunkedgraph/ingest/chunkEdges.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package test; + +message Edges { + bytes edgeList = 1; + bytes affinities = 2; + bytes areas = 3; +} \ No newline at end of file From 6f43d472608fadcfb890d508e59278a777781013 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 16:10:34 -0400 Subject: [PATCH 0013/1097] remove edge stuff --- pychunkedgraph/backend/chunkedgraph_init.py | 112 +++----------------- 1 file changed, 15 insertions(+), 97 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 1557ab14c..b6ea25196 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -1,12 +1,21 @@ import time import datetime import os - +import collections from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple +import pytz import numpy as np from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils +from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes +from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ + compute_bitmasks, get_google_compatible_time_stamp, \ + get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ + combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict + + +UTC = pytz.UTC def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_aff_dict: dict, edge_area_dict: dict, @@ -51,7 +60,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_id_dict[key] = np.concatenate( edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) + n_edge_ids += len(edge_id_dict[key]) for key in edge_aff_keys: edge_aff_dict[key] = np.concatenate( @@ -79,7 +88,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) # add self edge to all node_ids to make sure they're @@ -91,6 +100,8 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) + + time_start = time.time() ccs = flatgraph_utils.connected_components(graph) if verbose: @@ -134,103 +145,10 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, # Add rows for nodes that are in this chunk node_ids = unique_graph_ids[cc] - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected + for node_id in node_ids: time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) val_dict = {column_keys.Hierarchy.Parent: parent_id} - rows.append(cg.mutate_row(serializers.serialize_uint64(node_id), val_dict, time_stamp=time_stamp)) node_c += 1 From 30eb3d487e8348d85858123fa9932722a605b86d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 11:30:15 -0400 Subject: [PATCH 0014/1097] remove unused stuff --- pychunkedgraph/backend/chunkedgraph.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 2fb98557d..673a93c5c 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3049,15 +3049,9 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() - child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) - u_ccids = np.unique(child_chunk_ids) - - # Make blocks of child ids that are in the same chunk - child_blocks = [] - for u_ccid in u_ccids: - child_blocks.append(child_ids[child_chunk_ids == u_ccid]) - - this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) + + n_child_ids = len(child_ids) + this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, From 3bdebe5523bb03d53f32b7a42172ed547dddc196 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 14:38:00 -0400 Subject: [PATCH 0015/1097] wip: subpackage to segregate storage --- pychunkedgraph/backend/chunkedgraph.py | 12 +++++++++--- pychunkedgraph/io/gcs.py | 22 ++-------------------- 2 files changed, 11 insertions(+), 23 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 673a93c5c..2fb98557d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3049,9 +3049,15 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() - - n_child_ids = len(child_ids) - this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) + child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) + u_ccids = np.unique(child_chunk_ids) + + # Make blocks of child ids that are in the same chunk + child_blocks = [] + for u_ccid in u_ccids: + child_blocks.append(child_ids[child_chunk_ids == u_ccid]) + + this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py index c8261aa5e..63ac7ece5 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/gcs.py @@ -2,29 +2,11 @@ Functions to use when dealing with Google Cloud Storage ''' -import numpy as np -import zstandard as zstd - -from cloudvolume import Storage -from .protobuf.chunkEdges_pb3 import Edges - # TODO some funtions in ChunkedGraph # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id: np.uint64): +def get_chunk_edges(cg, chunk_id): chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = '_'.join(str(coord) for coord in chunk_coords) + chunk_str = repr(chunk)[1:-1].replace(', ','_') fname = f'edges_{chunk_str}.data' - - edgesMessage = Edges() - with Storage(cg._cv_path) as st: - file_content = st.get_file(fname) - - file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) - edgesMessage.ParseFromString(file_content) - edges = np.frombuffer(edgesMessage.edgeList) - areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Mon, 22 Jul 2019 15:16:04 -0400 Subject: [PATCH 0016/1097] wip: updates --- pychunkedgraph/ingest/chunkEdges.proto | 9 --------- pychunkedgraph/io/gcs.py | 22 ++++++++++++++++++++-- 2 files changed, 20 insertions(+), 11 deletions(-) delete mode 100644 pychunkedgraph/ingest/chunkEdges.proto diff --git a/pychunkedgraph/ingest/chunkEdges.proto b/pychunkedgraph/ingest/chunkEdges.proto deleted file mode 100644 index 369fe9505..000000000 --- a/pychunkedgraph/ingest/chunkEdges.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package test; - -message Edges { - bytes edgeList = 1; - bytes affinities = 2; - bytes areas = 3; -} \ No newline at end of file diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py index 63ac7ece5..c8261aa5e 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/gcs.py @@ -2,11 +2,29 @@ Functions to use when dealing with Google Cloud Storage ''' +import numpy as np +import zstandard as zstd + +from cloudvolume import Storage +from .protobuf.chunkEdges_pb3 import Edges + # TODO some funtions in ChunkedGraph # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id): +def get_chunk_edges(cg, chunk_id: np.uint64): chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = repr(chunk)[1:-1].replace(', ','_') + chunk_str = '_'.join(str(coord) for coord in chunk_coords) fname = f'edges_{chunk_str}.data' + + edgesMessage = Edges() + with Storage(cg._cv_path) as st: + file_content = st.get_file(fname) + + file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) + edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) + areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Mon, 22 Jul 2019 16:08:53 -0400 Subject: [PATCH 0017/1097] wip --- pychunkedgraph/io/{gcs.py => storage.py} | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) rename pychunkedgraph/io/{gcs.py => storage.py} (76%) diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/storage.py similarity index 76% rename from pychunkedgraph/io/gcs.py rename to pychunkedgraph/io/storage.py index c8261aa5e..9cc7ae7a8 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/storage.py @@ -1,7 +1,9 @@ ''' -Functions to use when dealing with Google Cloud Storage +Functions to use when dealing with any cloud storage via CloudVolume ''' +from typing import List + import numpy as np import zstandard as zstd @@ -12,14 +14,17 @@ # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id: np.uint64): +def get_chunk_edges(cg, chunk_ids: List[np.uint64]): + fnames = [] chunk_coords = cg.get_chunk_coordinates(chunk_id) chunk_str = '_'.join(str(coord) for coord in chunk_coords) fname = f'edges_{chunk_str}.data' edgesMessage = Edges() with Storage(cg._cv_path) as st: - file_content = st.get_file(fname) + file_content = st.get_files(fnames) + + # TODO move decompression to a generator file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) edgesMessage.ParseFromString(file_content) From db5b6c2686f846ea424bd113a1f53b1b1365489c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:26:02 -0400 Subject: [PATCH 0018/1097] wip: add generator to decompress files --- pychunkedgraph/io/storage.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 9cc7ae7a8..47034a5e1 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -2,7 +2,7 @@ Functions to use when dealing with any cloud storage via CloudVolume ''' -from typing import List +from typing import List, Dict import numpy as np import zstandard as zstd @@ -14,22 +14,32 @@ # should be class methods or util functions # for now pass instance of ChunkedGraph +def _decompress_edges(files: List[Dict]): + """ + :param files: list of dicts (from CloudVolume.Storage.get_files) + :return: Tuple[edges:np.array[np.uint64, np.uint64], + areas:np.array[np.uint64] + affinities: np.array[np.float64]] + """ + edgesMessage = Edges() + + for _file in files: + file_content = zstd.ZstdDecompressor().decompressobj().decompress(_file['content']) + edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) + areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Mon, 22 Jul 2019 16:29:04 -0400 Subject: [PATCH 0019/1097] wip: updates --- pychunkedgraph/io/storage.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 47034a5e1..2cd1f6926 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -15,12 +15,12 @@ # for now pass instance of ChunkedGraph def _decompress_edges(files: List[Dict]): - """ + ''' :param files: list of dicts (from CloudVolume.Storage.get_files) :return: Tuple[edges:np.array[np.uint64, np.uint64], areas:np.array[np.uint64] affinities: np.array[np.float64]] - """ + ''' edgesMessage = Edges() for _file in files: @@ -33,10 +33,15 @@ def _decompress_edges(files: List[Dict]): def get_chunk_edges(cg, chunk_ids: List[np.uint64]): + ''' + :param cg: ChunkedGraph instance + :return: a generator that yields decompressed file content + ''' fnames = [] - chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = '_'.join(str(coord) for coord in chunk_coords) - fname = f'edges_{chunk_str}.data' + for chunk_id in chunk_ids: + chunk_coords = cg.get_chunk_coordinates(chunk_id) + chunk_str = '_'.join(str(coord) for coord in chunk_coords) + fnames.append(f'edges_{chunk_str}.data') files = [] with Storage(cg._cv_path) as st: From b2ecfc5893d3f23ee1a637b27990d2834dea9b92 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:34:12 -0400 Subject: [PATCH 0020/1097] wip: fix path --- pychunkedgraph/io/storage.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 2cd1f6926..ae3d0a19c 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -10,9 +10,6 @@ from cloudvolume import Storage from .protobuf.chunkEdges_pb3 import Edges -# TODO some funtions in ChunkedGraph -# should be class methods or util functions -# for now pass instance of ChunkedGraph def _decompress_edges(files: List[Dict]): ''' @@ -44,7 +41,7 @@ def get_chunk_edges(cg, chunk_ids: List[np.uint64]): fnames.append(f'edges_{chunk_str}.data') files = [] - with Storage(cg._cv_path) as st: + with Storage(f'{cg._cv_path}/edges_dir') as st: files = st.get_files(fnames) return _decompress_edges(files) \ No newline at end of file From c368bc10427b548f53ecd9d3e30101e8c8a3c4a5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:48:34 -0400 Subject: [PATCH 0021/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 18 ++++-------------- pychunkedgraph/io/storage.py | 6 +++--- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 2fb98557d..c442b7165 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -27,6 +27,7 @@ MulticutOperation, SplitOperation, ) +from pychunkedgraph.io.storage import get_chunk_edges # from pychunkedgraph.meshing import meshgen from google.api_core.retry import Retry, if_exception_type @@ -3029,14 +3030,9 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, # connected_edges=connected_edges, # time_stamp=time_stamp) - def _get_subgraph_layer2_edges(node_ids) -> \ + def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return self.get_subgraph_chunk_v2(node_ids, - connected_edges=connected_edges, - time_stamp=time_stamp) - - time_stamp = self.read_node_id_row(agglomeration_id, - columns=column_keys.Hierarchy.Child)[0].timestamp + return get_chunk_edges(self, chunk_ids) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) @@ -3051,17 +3047,11 @@ def _get_subgraph_layer2_edges(node_ids) -> \ child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) u_ccids = np.unique(child_chunk_ids) - - # Make blocks of child ids that are in the same chunk - child_blocks = [] - for u_ccid in u_ccids: - child_blocks.append(child_ids[child_chunk_ids == u_ccid]) - this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, - np.array_split(child_ids, this_n_threads), + np.array_split(u_ccids, this_n_threads), n_threads=this_n_threads, debug=this_n_threads == 1) affinities = np.array([], dtype=np.float32) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index ae3d0a19c..b340fe799 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -29,9 +29,9 @@ def _decompress_edges(files: List[Dict]): yield edges, areas, affinities -def get_chunk_edges(cg, chunk_ids: List[np.uint64]): +def get_chunk_edges(edges_dir:str, chunk_ids: List[np.uint64]): ''' - :param cg: ChunkedGraph instance + :param chunk_ids :return: a generator that yields decompressed file content ''' fnames = [] @@ -41,7 +41,7 @@ def get_chunk_edges(cg, chunk_ids: List[np.uint64]): fnames.append(f'edges_{chunk_str}.data') files = [] - with Storage(f'{cg._cv_path}/edges_dir') as st: + with Storage(edges_dir) as st: files = st.get_files(fnames) return _decompress_edges(files) \ No newline at end of file From 15c056eac688b5d5ce58b194caebd74e3697d0e4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:55:18 -0400 Subject: [PATCH 0022/1097] wip --- pychunkedgraph/io/storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index b340fe799..502a791f1 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -29,7 +29,7 @@ def _decompress_edges(files: List[Dict]): yield edges, areas, affinities -def get_chunk_edges(edges_dir:str, chunk_ids: List[np.uint64]): +def get_chunk_edges(edges_dir:str, chunk_coords: np.array([x, y, z])): ''' :param chunk_ids :return: a generator that yields decompressed file content From bbadca3629c41f0d105da2dad2cdd8699bd6c462 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 23 Jul 2019 16:20:07 -0400 Subject: [PATCH 0023/1097] wip: udpates --- pychunkedgraph/backend/chunkedgraph.py | 4 +++- .../io/protobuf/{chunkEdges_pb3.py => chunkEdges_pb2.py} | 0 pychunkedgraph/io/storage.py | 9 ++++----- 3 files changed, 7 insertions(+), 6 deletions(-) rename pychunkedgraph/io/protobuf/{chunkEdges_pb3.py => chunkEdges_pb2.py} (100%) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index c442b7165..def034633 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3032,7 +3032,9 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return get_chunk_edges(self, chunk_ids) + return get_chunk_edges( + 'edges', + [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb3.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py similarity index 100% rename from pychunkedgraph/io/protobuf/chunkEdges_pb3.py rename to pychunkedgraph/io/protobuf/chunkEdges_pb2.py diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 502a791f1..da0d22f31 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -8,7 +8,7 @@ import zstandard as zstd from cloudvolume import Storage -from .protobuf.chunkEdges_pb3 import Edges +from .protobuf.chunkEdges_pb2 import Edges def _decompress_edges(files: List[Dict]): @@ -29,14 +29,13 @@ def _decompress_edges(files: List[Dict]): yield edges, areas, affinities -def get_chunk_edges(edges_dir:str, chunk_coords: np.array([x, y, z])): +def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray]): ''' - :param chunk_ids + :param: chunks_coordinates np.array of chunk coordinates :return: a generator that yields decompressed file content ''' fnames = [] - for chunk_id in chunk_ids: - chunk_coords = cg.get_chunk_coordinates(chunk_id) + for chunk_coords in chunks_coordinates: chunk_str = '_'.join(str(coord) for coord in chunk_coords) fnames.append(f'edges_{chunk_str}.data') From 8cb731a2d0adc974b70a815d69bc1d939e128a51 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 24 Jul 2019 11:15:17 -0400 Subject: [PATCH 0024/1097] wip: read edges from 256 chunks --- pychunkedgraph/backend/chunkedgraph.py | 75 ++++++++++++++++++++++---- pychunkedgraph/io/storage.py | 25 +++++++-- 2 files changed, 86 insertions(+), 14 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index def034633..e1f0ed42c 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3024,17 +3024,11 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, :return: edge list """ - # def _get_subgraph_layer2_edges(node_ids) -> \ - # Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - # return self.get_subgraph_chunk(node_ids, - # connected_edges=connected_edges, - # time_stamp=time_stamp) - - def _get_subgraph_layer2_edges(chunk_ids) -> \ + def _get_subgraph_layer2_edges(node_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return get_chunk_edges( - 'edges', - [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) + return self.get_subgraph_chunk(node_ids, + connected_edges=connected_edges, + time_stamp=time_stamp) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) @@ -3073,6 +3067,67 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ return edges, affinities, areas + + def get_subgraph_edges_v2(self, agglomeration_id: np.uint64, + bounding_box: Optional[Sequence[Sequence[int]]] = None, + bb_is_coordinate: bool = False, + connected_edges=True, + verbose: bool = True + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + + def _get_subgraph_layer2_edges(chunk_ids) -> \ + Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: + return get_chunk_edges( + 'testing_ignore_this', + [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) + + # test assumes agglomeration id spans 256 chunks + # just reads edges from all 256 chunks + offset = np.array([102, 51, 5]) + + x_start, y_start, z_start = offset + x_end, y_end, z_end = map( + int, np.ceil( + np.array(cg.dataset_info['scales'][0]['size']) / cg.chunk_size) - offset) + + chunks = [] + + for x in range(x_start,x_end): + for y in range(y_start, y_end): + for z in range(z_start, z_end): + chunks.append((x, y, z)) + + chunk_ids = [cg.get_chunk_id(None, 1, *chunk) for chunk in chunks] + this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + + if verbose: + time_start = time.time() + + edge_infos = mu.multithread_func( + _get_subgraph_layer2_edges, + np.array_split(chunk_ids, this_n_threads), + n_threads=this_n_threads, debug=this_n_threads == 1) + + edges = np.array([], dtype=np.uint64).reshape(0, 2) + affinities = np.array([], dtype=np.float32) + areas = np.array([], dtype=np.uint64) + + for edge_info in edge_infos: + _edges, _affinities, _areas = edge_info + edges = np.concatenate([edges, _edges]) + affinities = np.concatenate([affinities, _affinities]) + areas = np.concatenate([areas, _areas]) + + if verbose: + print(f'time: {time.time() - time_start)}') + print(f'chunks: {len(chunks)}') + print(f'threads: {len(this_n_threads)}') + print(f'edges: {len(edges)}') + print(f'affinities: {len(affinities)}') + print(f'areas: {len(areas)}') + + return edges, affinities, areas + def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index da0d22f31..53afe6c21 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -2,6 +2,7 @@ Functions to use when dealing with any cloud storage via CloudVolume ''' +import os from typing import List, Dict import numpy as np @@ -23,17 +24,21 @@ def _decompress_edges(files: List[Dict]): for _file in files: file_content = zstd.ZstdDecompressor().decompressobj().decompress(_file['content']) edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) - areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Wed, 24 Jul 2019 14:53:58 -0400 Subject: [PATCH 0025/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 19 +++++------ pychunkedgraph/io/storage.py | 46 +++++++++++++------------- 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index e1f0ed42c..43d08c235 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3067,8 +3067,8 @@ def _get_subgraph_layer2_edges(node_ids) -> \ return edges, affinities, areas - def get_subgraph_edges_v2(self, agglomeration_id: np.uint64, + offset = np.array([104, 53, 6]), bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3081,14 +3081,13 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ 'testing_ignore_this', [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) - # test assumes agglomeration id spans 256 chunks - # just reads edges from all 256 chunks - offset = np.array([102, 51, 5]) + # test assumes agglomeration id spans 32 chunks + # just reads edges from all 32 chunks x_start, y_start, z_start = offset x_end, y_end, z_end = map( int, np.ceil( - np.array(cg.dataset_info['scales'][0]['size']) / cg.chunk_size) - offset) + np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) chunks = [] @@ -3097,7 +3096,7 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ for z in range(z_start, z_end): chunks.append((x, y, z)) - chunk_ids = [cg.get_chunk_id(None, 1, *chunk) for chunk in chunks] + chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) if verbose: @@ -3106,7 +3105,7 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, np.array_split(chunk_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1) + n_threads=this_n_threads, debug=this_n_threads == 1) edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) @@ -3119,14 +3118,14 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ areas = np.concatenate([areas, _areas]) if verbose: - print(f'time: {time.time() - time_start)}') + print(f'time: {time.time() - time_start}') print(f'chunks: {len(chunks)}') - print(f'threads: {len(this_n_threads)}') + print(f'threads: {this_n_threads}') print(f'edges: {len(edges)}') print(f'affinities: {len(affinities)}') print(f'areas: {len(areas)}') - return edges, affinities, areas + # return edges, affinities, areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 53afe6c21..321fa2971 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -11,24 +11,26 @@ from cloudvolume import Storage from .protobuf.chunkEdges_pb2 import Edges +# Creating these everytime is costly +edgesMessage = Edges() +zstdDecompressor = zstd.ZstdDecompressor() -def _decompress_edges(files: List[Dict]): + +def _decompress_edges(content): ''' - :param files: list of dicts (from CloudVolume.Storage.get_files) + :param content: zstd compressed bytes :return: Tuple[edges:np.array[np.uint64, np.uint64], areas:np.array[np.uint64] affinities: np.array[np.float64]] ''' - edgesMessage = Edges() + zstdDecompressor = zstd.ZstdDecompressor() + file_content = zstdDecompressor.decompressobj().decompress(content) + edgesMessage.ParseFromString(file_content) - for _file in files: - file_content = zstd.ZstdDecompressor().decompressobj().decompress(_file['content']) - edgesMessage.ParseFromString(file_content) - - edges = np.frombuffer(edgesMessage.edgeList) - affinities = np.frombuffer(edgesMessage.affinities, dtype=' Date: Thu, 25 Jul 2019 12:25:52 -0400 Subject: [PATCH 0026/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 10 ++++++---- pychunkedgraph/io/{storage.py => edge_storage.py} | 9 +++------ 2 files changed, 9 insertions(+), 10 deletions(-) rename pychunkedgraph/io/{storage.py => edge_storage.py} (89%) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 43d08c235..5027a8daa 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3067,8 +3067,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ return edges, affinities, areas - def get_subgraph_edges_v2(self, agglomeration_id: np.uint64, - offset = np.array([104, 53, 6]), + def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3088,6 +3087,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ x_end, y_end, z_end = map( int, np.ceil( np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) + print(x_start, y_start, z_start) + print(x_end, y_end, z_end) chunks = [] @@ -3098,6 +3099,9 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + + print(f'chunks: {len(chunks)}') + print(f'threads: {this_n_threads}') if verbose: time_start = time.time() @@ -3119,8 +3123,6 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ if verbose: print(f'time: {time.time() - time_start}') - print(f'chunks: {len(chunks)}') - print(f'threads: {this_n_threads}') print(f'edges: {len(edges)}') print(f'affinities: {len(affinities)}') print(f'areas: {len(areas)}') diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/edge_storage.py similarity index 89% rename from pychunkedgraph/io/storage.py rename to pychunkedgraph/io/edge_storage.py index 321fa2971..2063b7302 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -11,10 +11,6 @@ from cloudvolume import Storage from .protobuf.chunkEdges_pb2 import Edges -# Creating these everytime is costly -edgesMessage = Edges() -zstdDecompressor = zstd.ZstdDecompressor() - def _decompress_edges(content): ''' @@ -23,8 +19,9 @@ def _decompress_edges(content): areas:np.array[np.uint64] affinities: np.array[np.float64]] ''' - zstdDecompressor = zstd.ZstdDecompressor() - file_content = zstdDecompressor.decompressobj().decompress(content) + edgesMessage = Edges() + zstdDecompressorObj = zstd.ZstdDecompressor().decompressobj() + file_content = zstdDecompressorObj.decompress(content) edgesMessage.ParseFromString(file_content) edges = np.frombuffer(edgesMessage.edgeList).reshape(-1, 2) From 1490817b01a659946a3f1c4162e8a23fcc74765b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 25 Jul 2019 12:27:53 -0400 Subject: [PATCH 0027/1097] wip: number of threads as argument --- pychunkedgraph/backend/chunkedgraph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 5027a8daa..2d5e99fce 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3068,6 +3068,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ return edges, affinities, areas def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), + this_n_threads = 4, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3098,7 +3099,7 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ chunks.append((x, y, z)) chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) - this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + # this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) print(f'chunks: {len(chunks)}') print(f'threads: {this_n_threads}') From b05a80849fc0776cffb714599289bef68017b7dd Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 25 Jul 2019 17:22:35 -0400 Subject: [PATCH 0028/1097] wip: pass cv thread count --- pychunkedgraph/backend/chunkedgraph.py | 41 +++++++++++++++----------- pychunkedgraph/io/edge_storage.py | 4 +-- 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 2d5e99fce..f648e0ede 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -10,6 +10,7 @@ import re import itertools import logging +import json from itertools import chain from multiwrapper import multiprocessing_utils as mu @@ -28,6 +29,7 @@ SplitOperation, ) from pychunkedgraph.io.storage import get_chunk_edges +from pychunkedgraph.io.edge_storage import get_chunk_edges # from pychunkedgraph.meshing import meshgen from google.api_core.retry import Retry, if_exception_type @@ -3069,6 +3071,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), this_n_threads = 4, + cv_threads = 20, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3079,39 +3082,39 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( 'testing_ignore_this', - [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) + [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], + cv_threads) - # test assumes agglomeration id spans 32 chunks - # just reads edges from all 32 chunks + timings = {} + timings['total'] = time.time() + timings['determine_chunks_ids'] = time.time() x_start, y_start, z_start = offset x_end, y_end, z_end = map( int, np.ceil( np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) - print(x_start, y_start, z_start) - print(x_end, y_end, z_end) chunks = [] for x in range(x_start,x_end): for y in range(y_start, y_end): for z in range(z_start, z_end): - chunks.append((x, y, z)) + chunks.append((x, y, z)) chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) - # this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] - print(f'chunks: {len(chunks)}') - print(f'threads: {this_n_threads}') - - if verbose: - time_start = time.time() +# print(f'chunks: {len(chunks)}') +# print(f'threads: {this_n_threads}') + timings['reading_edges'] = time.time() edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, np.array_split(chunk_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1) + n_threads=this_n_threads, debug=this_n_threads == 1) + timings['reading_edges'] = time.time() - timings['reading_edges'] + timings['collecting_edges'] = time.time() edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) areas = np.array([], dtype=np.uint64) @@ -3121,12 +3124,14 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ edges = np.concatenate([edges, _edges]) affinities = np.concatenate([affinities, _affinities]) areas = np.concatenate([areas, _areas]) + timings['collecting_edges'] = time.time() - timings['collecting_edges'] + + timings['total'] = time.time() - timings['total'] - if verbose: - print(f'time: {time.time() - time_start}') - print(f'edges: {len(edges)}') - print(f'affinities: {len(affinities)}') - print(f'areas: {len(areas)}') + print(json.dumps(timings, default=str, indent=4)) + # print(f'edges: {len(edges)}') + # print(f'affinities: {len(affinities)}') + # print(f'areas: {len(areas)}') # return edges, affinities, areas diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 2063b7302..33327a48b 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -30,7 +30,7 @@ def _decompress_edges(content): return edges, affinities, areas -def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray]): +def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray], cv_threads): ''' :param: chunks_coordinates np.array of chunk coordinates :return: a generator that yields decompressed file content @@ -48,7 +48,7 @@ def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray]): areas = np.array([], dtype=np.uint64) files = [] - with Storage(edges_dir) as st: + with Storage(edges_dir, n_threads = cv_threads) as st: files = st.get_files(fnames) for _file in files: if not _file['content']: continue From 75d9f4e479106aa7f16ee5f1faf07089628279ef Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 26 Jul 2019 13:32:54 -0400 Subject: [PATCH 0029/1097] wip: more performance testing --- pychunkedgraph/backend/chunkedgraph.py | 3 ++ pychunkedgraph/io/edge_storage.py | 54 +++++++++++++++----------- 2 files changed, 35 insertions(+), 22 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index f648e0ede..6013fa393 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3032,6 +3032,9 @@ def _get_subgraph_layer2_edges(node_ids) -> \ connected_edges=connected_edges, time_stamp=time_stamp) + time_stamp = self.read_node_id_row(agglomeration_id, + columns=column_keys.Hierarchy.Child)[0].timestamp + bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) # Layer 3+ diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 33327a48b..dbbc57486 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -1,60 +1,70 @@ -''' +""" Functions to use when dealing with any cloud storage via CloudVolume -''' +""" import os -from typing import List, Dict +from typing import List, Dict, Tuple import numpy as np import zstandard as zstd from cloudvolume import Storage +from cloudvolume.storage import SimpleStorage from .protobuf.chunkEdges_pb2 import Edges def _decompress_edges(content): - ''' + """ :param content: zstd compressed bytes :return: Tuple[edges:np.array[np.uint64, np.uint64], areas:np.array[np.uint64] affinities: np.array[np.float64]] - ''' - edgesMessage = Edges() + """ + edgesMessage = Edges() zstdDecompressorObj = zstd.ZstdDecompressor().decompressobj() file_content = zstdDecompressorObj.decompress(content) edgesMessage.ParseFromString(file_content) - + edges = np.frombuffer(edgesMessage.edgeList).reshape(-1, 2) - affinities = np.frombuffer(edgesMessage.affinities, dtype=' Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ :param: chunks_coordinates np.array of chunk coordinates - :return: a generator that yields decompressed file content - ''' + :return: tuple of edge infos (edges, affinities, areas) + """ edges_dir = os.environ.get( - 'EDIR', - 'gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9') + "EDIR", "gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9" + ) fnames = [] for chunk_coords in chunks_coordinates: - chunk_str = '_'.join(str(coord) for coord in chunk_coords) - fnames.append(f'chunk_{chunk_str}_zstd_level_17_proto.data') + chunk_str = "_".join(str(coord) for coord in chunk_coords) + fnames.append(f"chunk_{chunk_str}_zstd_level_17_proto.data") edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) areas = np.array([], dtype=np.uint64) + st = SimpleStorage(edges_dir) + if cv_threads > 1: + st = Storage(edges_dir, n_threads=cv_threads) + files = [] - with Storage(edges_dir, n_threads = cv_threads) as st: + with st: files = st.get_files(fnames) for _file in files: - if not _file['content']: continue - _edges, _affinities, _areas = _decompress_edges(_file['content']) + if not _file["content"]: + continue + _edges, _affinities, _areas = _decompress_edges(_file["content"]) edges = np.concatenate([edges, _edges]) affinities = np.concatenate([affinities, _affinities]) - areas = np.concatenate([areas, _areas]) + areas = np.concatenate([areas, _areas]) - return edges, affinities, areas \ No newline at end of file + return edges, affinities, areas From f7947b81747ae275bd2f1ee6f361dad28e22ff46 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 26 Jul 2019 13:34:29 -0400 Subject: [PATCH 0030/1097] wip: updates --- pychunkedgraph/backend/chunkedgraph.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 6013fa393..3c315c089 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3107,8 +3107,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] -# print(f'chunks: {len(chunks)}') -# print(f'threads: {this_n_threads}') + # print(f'chunks: {len(chunks)}') + # print(f'threads: {this_n_threads}') timings['reading_edges'] = time.time() edge_infos = mu.multithread_func( @@ -3131,7 +3131,9 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ timings['total'] = time.time() - timings['total'] - print(json.dumps(timings, default=str, indent=4)) + return timings + + # print(json.dumps(timings, default=str, indent=4)) # print(f'edges: {len(edges)}') # print(f'affinities: {len(affinities)}') # print(f'areas: {len(areas)}') From 291ee15674cf97ccad5fe64e838adcb63f032cce Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 11 Jul 2019 22:35:41 -0400 Subject: [PATCH 0031/1097] wip: refactor add_chunk_edges --- pychunkedgraph/backend/chunkedgraph.py | 291 +++++++++++++++++++++++++ 1 file changed, 291 insertions(+) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3c315c089..607dfab04 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1763,6 +1763,297 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) + def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, + edge_aff_dict: dict, edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None): + """ Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk + + Alle edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime + """ + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, + round_up=False) + + edge_aff_keys = [ + 'in_connected','in_disconnected','between_connected','between_disconnected'] + edge_id_keys = edge_aff_keys[:].insert(2, 'cross') + + # Check if keys exist and include an empty array if not + n_edge_ids = 0 + empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) + + for key in edge_id_keys: + edge_id_dict[key] = np.concatenate( + edge_id_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + n_edge_ids += len(edge_id_dict[edge_id_key]) + + for key in edge_aff_keys: + edge_aff_dict[key] = np.concatenate( + edge_aff_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + + time_start = time.time() + + # Get connected component within the chunk + chunk_node_ids = np.concatenate([ + isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), + np.unique(edge_id_dict["cross"][:, 0]), + np.unique(edge_id_dict["between_connected"][:, 0]), + np.unique(edge_id_dict["between_disconnected"][:, 0])]) + + + # nothing to do + if not len(chunk_node_ids): return 0 + + chunk_node_ids = np.unique(chunk_node_ids) + + node_chunk_ids = np.array([self.get_chunk_id(c) + for c in chunk_node_ids], + dtype=np.uint64) + + u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, + return_counts=True) + if len(u_node_chunk_ids) > 1: + raise Exception("%d: %d chunk ids found in node id list. " + "Some edges might be in the wrong order. " + "Number of occurences:" % + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + + add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T + edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), + add_edge_ids]) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True) + + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + + # Add rows for nodes that are in this chunk + # a connected component at a time + node_c = 0 # Just a counter for the log / speed measurement + + n_ccs = len(ccs) + + # Make parent id creation easier + chunk_id = u_node_chunk_ids[0] + parent_chunk_id = self.get_chunk_id( + layer=2, *self.get_chunk_coordinates(chunk_id)) + + parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + + time_start = time.time() + time_dict = collections.defaultdict(list) + + time_start_1 = time.time() + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + # Circumvent datatype issues + + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + + time_dict["sparse_indices"].append(time.time() - time_start_1) + + rows = [] + + for i_cc, cc in enumerate(ccs): + node_ids = unique_graph_ids[cc] + + u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) + + if len(u_chunk_ids) > 1: + self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + raise Exception() + + # Create parent id + parent_id = parent_ids[i_cc] + + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + + # Add rows for nodes that are in this chunk + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected + time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) + connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) + disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) + disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) + connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) + + val_dict = {column_keys.Connectivity.Partner: partners, + column_keys.Connectivity.Affinity: affinities, + column_keys.Connectivity.Area: areas, + column_keys.Connectivity.Connected: connected, + column_keys.Hierarchy.Parent: parent_id} + + rows.append(self.mutate_row(serializers.serialize_uint64(node_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + time_dict["creating_lv1_row"].append(time.time() - time_start_2) + + time_start_1 = time.time() + # Create parent node + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + {column_keys.Hierarchy.Child: node_ids}, + time_stamp=time_stamp)) + + time_dict["creating_lv2_row"].append(time.time() - time_start_1) + time_start_1 = time.time() + + cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {} + for cc_layer in u_cce_layers: + layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + + if len(layer_cross_edges) > 0: + val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ + layer_cross_edges + + if len(val_dict) > 0: + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + + time_dict["adding_cross_edges"].append(time.time() - time_start_1) + + if len(rows) > 100000: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if len(rows) > 0: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if verbose: + self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % + (time.time() - time_start, len(ccs), node_c)) + + for k in time_dict.keys(): + self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % + (k, np.sum(time_dict[k])*1000, len(time_dict[k]), + np.mean(time_dict[k])*1000)) + def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, From f5b7bd7c2b89c3e3347ce8eab37b58d405778acd Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:27:27 -0400 Subject: [PATCH 0032/1097] wip: remove redundant unique chunk id check --- pychunkedgraph/backend/chunkedgraph.py | 39 +++++++++----------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 607dfab04..f3bf0a296 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1813,38 +1813,37 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, edge_aff_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - time_start = time.time() - # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) + add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T + + chunk_node_ids = np.concatenate([ + chunk_node_ids, + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"])]) # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) + node_chunk_ids = np.array( + [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - node_chunk_ids = np.array([self.get_chunk_id(c) - for c in chunk_node_ids], - dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, - return_counts=True) + u_node_chunk_ids, c_node_chunk_ids = np.unique( + node_chunk_ids, return_counts=True) if len(u_node_chunk_ids) > 1: raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T - edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), - add_edge_ids]) + edge_ids = np.concatenate( + [edge_id_dict["in_connected"].copy(), add_edge_ids]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) @@ -1861,9 +1860,8 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, n_ccs = len(ccs) # Make parent id creation easier - chunk_id = u_node_chunk_ids[0] parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(chunk_id)) + layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) @@ -1888,20 +1886,11 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, rows = [] for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] - - u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) - - if len(u_chunk_ids) > 1: - self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") - raise Exception() - - # Create parent id parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) # Add rows for nodes that are in this chunk + node_ids = unique_graph_ids[cc] for i_node_id, node_id in enumerate(node_ids): # Extract edges relevant to this node From 40dc4019d1a16b57709a53d10a30cb3eccf91394 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:45:57 -0400 Subject: [PATCH 0033/1097] wip: revert mistake, add more comments --- pychunkedgraph/backend/chunkedgraph.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index f3bf0a296..d461e9754 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1816,18 +1816,12 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T - - chunk_node_ids = np.concatenate([ - chunk_node_ids, - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"])]) - # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) @@ -1842,12 +1836,16 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - edge_ids = np.concatenate( - [edge_id_dict["in_connected"].copy(), add_edge_ids]) + + # add self edge to all node_ids to make sure they're + # part of connected components because the graph is processed component wise + # if not, the node_ids won't be stored + edge_ids = np.concatenate([ + edge_id_dict["in_connected"].copy(), + np.vstack([chunk_node_ids, chunk_node_ids]).T]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) if verbose: From 19ed9c8f39288dd5aa6530c2d526d71d21e5734d Mon Sep 17 00:00:00 2001 From: Forrest Collman Date: Tue, 16 Jul 2019 14:45:06 -0700 Subject: [PATCH 0034/1097] making password optional (#151) --- rq_workers/mesh_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rq_workers/mesh_worker.py b/rq_workers/mesh_worker.py index 2a0ececda..686d3aef8 100644 --- a/rq_workers/mesh_worker.py +++ b/rq_workers/mesh_worker.py @@ -7,7 +7,7 @@ REDIS_PORT = os.environ.get('REDIS_SERVICE_PORT') REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD') if REDIS_PASSWORD is None: - REDIS_URL = f'redis://@{REDIS_HOST}:{REDIS_PORT}/0' + REDIS_URL = f'redis://:{REDIS_HOST}:{REDIS_PORT}/0' else: REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0' From 52f891301312432fdf0d9807670e7f274277c599 Mon Sep 17 00:00:00 2001 From: Sven Dorkenwald Date: Tue, 16 Jul 2019 14:47:40 -0700 Subject: [PATCH 0035/1097] Version bump --- pychunkedgraph/__init__.py | 2 +- pychunkedgraph/app/cg_app_blueprint.py | 2 +- pychunkedgraph/app/meshing_app_blueprint.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/__init__.py b/pychunkedgraph/__init__.py index f221d99f1..af7928b2c 100644 --- a/pychunkedgraph/__init__.py +++ b/pychunkedgraph/__init__.py @@ -1 +1 @@ -__version__ = 'fafb.1.21' \ No newline at end of file +__version__ = 'fafb.1.16' diff --git a/pychunkedgraph/app/cg_app_blueprint.py b/pychunkedgraph/app/cg_app_blueprint.py index 96bff589d..ae47bbfa4 100644 --- a/pychunkedgraph/app/cg_app_blueprint.py +++ b/pychunkedgraph/app/cg_app_blueprint.py @@ -16,7 +16,7 @@ chunkedgraph_comp as cg_comp from middle_auth_client import auth_required, auth_requires_roles -__version__ = 'fafb.1.21' +__version__ = 'fafb.1.16' bp = Blueprint('pychunkedgraph', __name__, url_prefix="/segmentation") # ------------------------------- diff --git a/pychunkedgraph/app/meshing_app_blueprint.py b/pychunkedgraph/app/meshing_app_blueprint.py index 0cabcdc32..48bc29e7e 100644 --- a/pychunkedgraph/app/meshing_app_blueprint.py +++ b/pychunkedgraph/app/meshing_app_blueprint.py @@ -8,7 +8,7 @@ from pychunkedgraph.app import app_utils from pychunkedgraph.backend import chunkedgraph -__version__ = 'fafb.1.21' +__version__ = 'fafb.1.16' bp = Blueprint('pychunkedgraph_meshing', __name__, url_prefix="/meshing") # ------------------------------- From 417655c51128191a2e2326af644698ea966e83b7 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 15:46:00 -0400 Subject: [PATCH 0036/1097] move add edges to separate module --- pychunkedgraph/backend/chunkedgraph.py | 278 -------------------- pychunkedgraph/backend/chunkedgraph_init.py | 17 +- pychunkedgraph/ingest/chunkEdges.proto | 9 + 3 files changed, 12 insertions(+), 292 deletions(-) create mode 100644 pychunkedgraph/ingest/chunkEdges.proto diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index d461e9754..3c315c089 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1763,284 +1763,6 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) - def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, - edge_aff_dict: dict, edge_area_dict: dict, - isolated_node_ids: Sequence[np.uint64], - verbose: bool = True, - time_stamp: Optional[datetime.datetime] = None): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime - """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - - edge_aff_keys = [ - 'in_connected','in_disconnected','between_connected','between_disconnected'] - edge_id_keys = edge_aff_keys[:].insert(2, 'cross') - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) - - for key in edge_id_keys: - edge_id_dict[key] = np.concatenate( - edge_id_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) - - for key in edge_aff_keys: - edge_aff_dict[key] = np.concatenate( - edge_aff_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - - # Get connected component within the chunk - chunk_node_ids = np.concatenate([ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - if not len(chunk_node_ids): return 0 - - chunk_node_ids = np.unique(chunk_node_ids) - node_chunk_ids = np.array( - [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique( - node_chunk_ids, return_counts=True) - if len(u_node_chunk_ids) > 1: - raise Exception("%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - - - # add self edge to all node_ids to make sure they're - # part of connected components because the graph is processed component wise - # if not, the node_ids won't be stored - edge_ids = np.concatenate([ - edge_id_dict["in_connected"].copy(), - np.vstack([chunk_node_ids, chunk_node_ids]).T]) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - - # Add rows for nodes that are in this chunk - # a connected component at a time - node_c = 0 # Just a counter for the log / speed measurement - - n_ccs = len(ccs) - - # Make parent id creation easier - parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) - - parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - - time_start = time.time() - time_dict = collections.defaultdict(list) - - time_start_1 = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start_1) - - rows = [] - - for i_cc, cc in enumerate(ccs): - parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - - # Add rows for nodes that are in this chunk - node_ids = unique_graph_ids[cc] - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected - time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) - - val_dict = {column_keys.Connectivity.Partner: partners, - column_keys.Connectivity.Affinity: affinities, - column_keys.Connectivity.Area: areas, - column_keys.Connectivity.Connected: connected, - column_keys.Hierarchy.Parent: parent_id} - - rows.append(self.mutate_row(serializers.serialize_uint64(node_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - - time_start_1 = time.time() - # Create parent node - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp)) - - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() - - cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {} - for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - - if len(layer_cross_edges) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ - layer_cross_edges - - if len(val_dict) > 0: - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - - time_dict["adding_cross_edges"].append(time.time() - time_start_1) - - if len(rows) > 100000: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if len(rows) > 0: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % - (time.time() - time_start, len(ccs), node_c)) - - for k in time_dict.keys(): - self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % - (k, np.sum(time_dict[k])*1000, len(time_dict[k]), - np.mean(time_dict[k])*1000)) - def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index b6ea25196..a785f8fda 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -1,21 +1,12 @@ import time import datetime import os -import collections + from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple -import pytz import numpy as np from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils -from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes -from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ - compute_bitmasks, get_google_compatible_time_stamp, \ - get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ - combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict - - -UTC = pytz.UTC def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_aff_dict: dict, edge_area_dict: dict, @@ -60,7 +51,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_id_dict[key] = np.concatenate( edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[key]) + n_edge_ids += len(edge_id_dict[edge_id_key]) for key in edge_aff_keys: edge_aff_dict[key] = np.concatenate( @@ -88,7 +79,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % - (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) # add self edge to all node_ids to make sure they're @@ -100,8 +91,6 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) - - time_start = time.time() ccs = flatgraph_utils.connected_components(graph) if verbose: diff --git a/pychunkedgraph/ingest/chunkEdges.proto b/pychunkedgraph/ingest/chunkEdges.proto new file mode 100644 index 000000000..369fe9505 --- /dev/null +++ b/pychunkedgraph/ingest/chunkEdges.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package test; + +message Edges { + bytes edgeList = 1; + bytes affinities = 2; + bytes areas = 3; +} \ No newline at end of file From 5f3639de7fa2a4cdafadd1238d8588d4bc4ca27a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 16:10:34 -0400 Subject: [PATCH 0037/1097] remove edge stuff --- pychunkedgraph/backend/chunkedgraph_init.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index a785f8fda..b6ea25196 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -1,12 +1,21 @@ import time import datetime import os - +import collections from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple +import pytz import numpy as np from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils +from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes +from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ + compute_bitmasks, get_google_compatible_time_stamp, \ + get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ + combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict + + +UTC = pytz.UTC def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_aff_dict: dict, edge_area_dict: dict, @@ -51,7 +60,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_id_dict[key] = np.concatenate( edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) + n_edge_ids += len(edge_id_dict[key]) for key in edge_aff_keys: edge_aff_dict[key] = np.concatenate( @@ -79,7 +88,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) # add self edge to all node_ids to make sure they're @@ -91,6 +100,8 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) + + time_start = time.time() ccs = flatgraph_utils.connected_components(graph) if verbose: From 71793ed434150a47c91754ce1bd73187faa30f5f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 11:30:15 -0400 Subject: [PATCH 0038/1097] remove unused stuff --- pychunkedgraph/backend/chunkedgraph.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3c315c089..cd4cc0e73 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3046,9 +3046,9 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() - child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) - u_ccids = np.unique(child_chunk_ids) - this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) + + n_child_ids = len(child_ids) + this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, From b94e167c823be5a933855a3598cde40e24298ca4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 14:38:00 -0400 Subject: [PATCH 0039/1097] wip: subpackage to segregate storage --- pychunkedgraph/backend/chunkedgraph.py | 20 ++++++++++++++++---- pychunkedgraph/io/gcs.py | 12 ++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) create mode 100644 pychunkedgraph/io/gcs.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index cd4cc0e73..a5a9a9a5d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3026,11 +3026,17 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, :return: edge list """ + # def _get_subgraph_layer2_edges(node_ids) -> \ + # Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: + # return self.get_subgraph_chunk(node_ids, + # connected_edges=connected_edges, + # time_stamp=time_stamp) + def _get_subgraph_layer2_edges(node_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return self.get_subgraph_chunk(node_ids, + return self.get_subgraph_chunk_v2(node_ids, connected_edges=connected_edges, - time_stamp=time_stamp) + time_stamp=time_stamp) time_stamp = self.read_node_id_row(agglomeration_id, columns=column_keys.Hierarchy.Child)[0].timestamp @@ -3046,9 +3052,15 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() + child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) + u_ccids = np.unique(child_chunk_ids) - n_child_ids = len(child_ids) - this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) + # Make blocks of child ids that are in the same chunk + child_blocks = [] + for u_ccid in u_ccids: + child_blocks.append(child_ids[child_chunk_ids == u_ccid]) + + this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py new file mode 100644 index 000000000..63ac7ece5 --- /dev/null +++ b/pychunkedgraph/io/gcs.py @@ -0,0 +1,12 @@ +''' +Functions to use when dealing with Google Cloud Storage +''' + +# TODO some funtions in ChunkedGraph +# should be class methods or util functions +# for now pass instance of ChunkedGraph + +def get_chunk_edges(cg, chunk_id): + chunk_coords = cg.get_chunk_coordinates(chunk_id) + chunk_str = repr(chunk)[1:-1].replace(', ','_') + fname = f'edges_{chunk_str}.data' From d07ecc0629f435ca61391ea41121490ed4d5b69a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 15:16:04 -0400 Subject: [PATCH 0040/1097] wip: updates --- pychunkedgraph/ingest/chunkEdges.proto | 9 --- pychunkedgraph/io/gcs.py | 22 ++++- pychunkedgraph/io/protobuf/chunkEdges_pb3.py | 84 ++++++++++++++++++++ 3 files changed, 104 insertions(+), 11 deletions(-) delete mode 100644 pychunkedgraph/ingest/chunkEdges.proto create mode 100644 pychunkedgraph/io/protobuf/chunkEdges_pb3.py diff --git a/pychunkedgraph/ingest/chunkEdges.proto b/pychunkedgraph/ingest/chunkEdges.proto deleted file mode 100644 index 369fe9505..000000000 --- a/pychunkedgraph/ingest/chunkEdges.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package test; - -message Edges { - bytes edgeList = 1; - bytes affinities = 2; - bytes areas = 3; -} \ No newline at end of file diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py index 63ac7ece5..c8261aa5e 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/gcs.py @@ -2,11 +2,29 @@ Functions to use when dealing with Google Cloud Storage ''' +import numpy as np +import zstandard as zstd + +from cloudvolume import Storage +from .protobuf.chunkEdges_pb3 import Edges + # TODO some funtions in ChunkedGraph # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id): +def get_chunk_edges(cg, chunk_id: np.uint64): chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = repr(chunk)[1:-1].replace(', ','_') + chunk_str = '_'.join(str(coord) for coord in chunk_coords) fname = f'edges_{chunk_str}.data' + + edgesMessage = Edges() + with Storage(cg._cv_path) as st: + file_content = st.get_file(fname) + + file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) + edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) + areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Thu, 11 Jul 2019 22:35:41 -0400 Subject: [PATCH 0041/1097] wip: refactor add_chunk_edges --- pychunkedgraph/backend/chunkedgraph.py | 291 +++++++++++++++++++++++++ 1 file changed, 291 insertions(+) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index a5a9a9a5d..22fbc8567 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1763,6 +1763,297 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) + def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, + edge_aff_dict: dict, edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None): + """ Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk + + Alle edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime + """ + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, + round_up=False) + + edge_aff_keys = [ + 'in_connected','in_disconnected','between_connected','between_disconnected'] + edge_id_keys = edge_aff_keys[:].insert(2, 'cross') + + # Check if keys exist and include an empty array if not + n_edge_ids = 0 + empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) + + for key in edge_id_keys: + edge_id_dict[key] = np.concatenate( + edge_id_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + n_edge_ids += len(edge_id_dict[edge_id_key]) + + for key in edge_aff_keys: + edge_aff_dict[key] = np.concatenate( + edge_aff_dict.get(key, empty_edges_array.copy(), + empty_edges_array.copy())) + + time_start = time.time() + + # Get connected component within the chunk + chunk_node_ids = np.concatenate([ + isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), + np.unique(edge_id_dict["cross"][:, 0]), + np.unique(edge_id_dict["between_connected"][:, 0]), + np.unique(edge_id_dict["between_disconnected"][:, 0])]) + + + # nothing to do + if not len(chunk_node_ids): return 0 + + chunk_node_ids = np.unique(chunk_node_ids) + + node_chunk_ids = np.array([self.get_chunk_id(c) + for c in chunk_node_ids], + dtype=np.uint64) + + u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, + return_counts=True) + if len(u_node_chunk_ids) > 1: + raise Exception("%d: %d chunk ids found in node id list. " + "Some edges might be in the wrong order. " + "Number of occurences:" % + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + + add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T + edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), + add_edge_ids]) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True) + + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + + # Add rows for nodes that are in this chunk + # a connected component at a time + node_c = 0 # Just a counter for the log / speed measurement + + n_ccs = len(ccs) + + # Make parent id creation easier + chunk_id = u_node_chunk_ids[0] + parent_chunk_id = self.get_chunk_id( + layer=2, *self.get_chunk_coordinates(chunk_id)) + + parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + + time_start = time.time() + time_dict = collections.defaultdict(list) + + time_start_1 = time.time() + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + # Circumvent datatype issues + + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + + time_dict["sparse_indices"].append(time.time() - time_start_1) + + rows = [] + + for i_cc, cc in enumerate(ccs): + node_ids = unique_graph_ids[cc] + + u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) + + if len(u_chunk_ids) > 1: + self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + raise Exception() + + # Create parent id + parent_id = parent_ids[i_cc] + + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + + # Add rows for nodes that are in this chunk + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected + time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) + connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) + disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) + disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) + connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) + + val_dict = {column_keys.Connectivity.Partner: partners, + column_keys.Connectivity.Affinity: affinities, + column_keys.Connectivity.Area: areas, + column_keys.Connectivity.Connected: connected, + column_keys.Hierarchy.Parent: parent_id} + + rows.append(self.mutate_row(serializers.serialize_uint64(node_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + time_dict["creating_lv1_row"].append(time.time() - time_start_2) + + time_start_1 = time.time() + # Create parent node + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + {column_keys.Hierarchy.Child: node_ids}, + time_stamp=time_stamp)) + + time_dict["creating_lv2_row"].append(time.time() - time_start_1) + time_start_1 = time.time() + + cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {} + for cc_layer in u_cce_layers: + layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + + if len(layer_cross_edges) > 0: + val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ + layer_cross_edges + + if len(val_dict) > 0: + rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), + val_dict, time_stamp=time_stamp)) + node_c += 1 + + time_dict["adding_cross_edges"].append(time.time() - time_start_1) + + if len(rows) > 100000: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if len(rows) > 0: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if verbose: + self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % + (time.time() - time_start, len(ccs), node_c)) + + for k in time_dict.keys(): + self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % + (k, np.sum(time_dict[k])*1000, len(time_dict[k]), + np.mean(time_dict[k])*1000)) + def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, From 181e6b5fe6e571e9b64468bd0814a23e4428e00a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:27:27 -0400 Subject: [PATCH 0042/1097] wip: remove redundant unique chunk id check --- pychunkedgraph/backend/chunkedgraph.py | 39 +++++++++----------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 22fbc8567..c72e02b5e 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1813,38 +1813,37 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, edge_aff_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - time_start = time.time() - # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) + add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T + + chunk_node_ids = np.concatenate([ + chunk_node_ids, + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"])]) # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) + node_chunk_ids = np.array( + [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - node_chunk_ids = np.array([self.get_chunk_id(c) - for c in chunk_node_ids], - dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, - return_counts=True) + u_node_chunk_ids, c_node_chunk_ids = np.unique( + node_chunk_ids, return_counts=True) if len(u_node_chunk_ids) > 1: raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T - edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), - add_edge_ids]) + edge_ids = np.concatenate( + [edge_id_dict["in_connected"].copy(), add_edge_ids]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) @@ -1861,9 +1860,8 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, n_ccs = len(ccs) # Make parent id creation easier - chunk_id = u_node_chunk_ids[0] parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(chunk_id)) + layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) @@ -1888,20 +1886,11 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, rows = [] for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] - - u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) - - if len(u_chunk_ids) > 1: - self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") - raise Exception() - - # Create parent id parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) # Add rows for nodes that are in this chunk + node_ids = unique_graph_ids[cc] for i_node_id, node_id in enumerate(node_ids): # Extract edges relevant to this node From 29b394de9cd23124af2623e4040067a8a5ff67a5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 12 Jul 2019 15:45:57 -0400 Subject: [PATCH 0043/1097] wip: revert mistake, add more comments --- pychunkedgraph/backend/chunkedgraph.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index c72e02b5e..5df6156b2 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1816,18 +1816,12 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, # Get connected component within the chunk chunk_node_ids = np.concatenate([ isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), np.unique(edge_id_dict["cross"][:, 0]), np.unique(edge_id_dict["between_connected"][:, 0]), np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - add_edge_ids = np.vstack([chunk_node_ids.copy(), chunk_node_ids.copy()]).T - - chunk_node_ids = np.concatenate([ - chunk_node_ids, - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"])]) - # nothing to do if not len(chunk_node_ids): return 0 chunk_node_ids = np.unique(chunk_node_ids) @@ -1842,12 +1836,16 @@ def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - edge_ids = np.concatenate( - [edge_id_dict["in_connected"].copy(), add_edge_ids]) + + # add self edge to all node_ids to make sure they're + # part of connected components because the graph is processed component wise + # if not, the node_ids won't be stored + edge_ids = np.concatenate([ + edge_id_dict["in_connected"].copy(), + np.vstack([chunk_node_ids, chunk_node_ids]).T]) graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) if verbose: From 866baabd7ab851cde56a2d9ff87d259df4a626d5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 15:46:00 -0400 Subject: [PATCH 0044/1097] move add edges to separate module --- pychunkedgraph/backend/chunkedgraph.py | 278 -------------------- pychunkedgraph/backend/chunkedgraph_init.py | 112 ++++++-- pychunkedgraph/ingest/chunkEdges.proto | 9 + 3 files changed, 106 insertions(+), 293 deletions(-) create mode 100644 pychunkedgraph/ingest/chunkEdges.proto diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 5df6156b2..a5a9a9a5d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1763,284 +1763,6 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, (k, np.sum(time_dict[k])*1000, len(time_dict[k]), np.mean(time_dict[k])*1000)) - def add_atomic_edges_in_chunks_v2(self, edge_id_dict: dict, - edge_aff_dict: dict, edge_area_dict: dict, - isolated_node_ids: Sequence[np.uint64], - verbose: bool = True, - time_stamp: Optional[datetime.datetime] = None): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime - """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - - edge_aff_keys = [ - 'in_connected','in_disconnected','between_connected','between_disconnected'] - edge_id_keys = edge_aff_keys[:].insert(2, 'cross') - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) - - for key in edge_id_keys: - edge_id_dict[key] = np.concatenate( - edge_id_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) - - for key in edge_aff_keys: - edge_aff_dict[key] = np.concatenate( - edge_aff_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - - # Get connected component within the chunk - chunk_node_ids = np.concatenate([ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - if not len(chunk_node_ids): return 0 - - chunk_node_ids = np.unique(chunk_node_ids) - node_chunk_ids = np.array( - [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique( - node_chunk_ids, return_counts=True) - if len(u_node_chunk_ids) > 1: - raise Exception("%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - - - # add self edge to all node_ids to make sure they're - # part of connected components because the graph is processed component wise - # if not, the node_ids won't be stored - edge_ids = np.concatenate([ - edge_id_dict["in_connected"].copy(), - np.vstack([chunk_node_ids, chunk_node_ids]).T]) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - - # Add rows for nodes that are in this chunk - # a connected component at a time - node_c = 0 # Just a counter for the log / speed measurement - - n_ccs = len(ccs) - - # Make parent id creation easier - parent_chunk_id = self.get_chunk_id( - layer=2, *self.get_chunk_coordinates(u_node_chunk_ids[0])) - - parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - - time_start = time.time() - time_dict = collections.defaultdict(list) - - time_start_1 = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start_1) - - rows = [] - - for i_cc, cc in enumerate(ccs): - parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - - # Add rows for nodes that are in this chunk - node_ids = unique_graph_ids[cc] - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected - time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) - - val_dict = {column_keys.Connectivity.Partner: partners, - column_keys.Connectivity.Affinity: affinities, - column_keys.Connectivity.Area: areas, - column_keys.Connectivity.Connected: connected, - column_keys.Hierarchy.Parent: parent_id} - - rows.append(self.mutate_row(serializers.serialize_uint64(node_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - - time_start_1 = time.time() - # Create parent node - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp)) - - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() - - cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {} - for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - - if len(layer_cross_edges) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ - layer_cross_edges - - if len(val_dict) > 0: - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - - time_dict["adding_cross_edges"].append(time.time() - time_start_1) - - if len(rows) > 100000: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if len(rows) > 0: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % - (time.time() - time_start, len(ccs), node_c)) - - for k in time_dict.keys(): - self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % - (k, np.sum(time_dict[k])*1000, len(time_dict[k]), - np.mean(time_dict[k])*1000)) - def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index b6ea25196..1557ab14c 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -1,21 +1,12 @@ import time import datetime import os -import collections + from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple -import pytz import numpy as np from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils -from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes -from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ - compute_bitmasks, get_google_compatible_time_stamp, \ - get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ - combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict - - -UTC = pytz.UTC def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_aff_dict: dict, edge_area_dict: dict, @@ -60,7 +51,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_id_dict[key] = np.concatenate( edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[key]) + n_edge_ids += len(edge_id_dict[edge_id_key]) for key in edge_aff_keys: edge_aff_dict[key] = np.concatenate( @@ -88,7 +79,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % - (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) + (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) # add self edge to all node_ids to make sure they're @@ -100,8 +91,6 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) - - time_start = time.time() ccs = flatgraph_utils.connected_components(graph) if verbose: @@ -145,10 +134,103 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, # Add rows for nodes that are in this chunk node_ids = unique_graph_ids[cc] - for node_id in node_ids: + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) + connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) + disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) + disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) + connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) + connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) + + parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) val_dict = {column_keys.Hierarchy.Parent: parent_id} + rows.append(cg.mutate_row(serializers.serialize_uint64(node_id), val_dict, time_stamp=time_stamp)) node_c += 1 diff --git a/pychunkedgraph/ingest/chunkEdges.proto b/pychunkedgraph/ingest/chunkEdges.proto new file mode 100644 index 000000000..369fe9505 --- /dev/null +++ b/pychunkedgraph/ingest/chunkEdges.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package test; + +message Edges { + bytes edgeList = 1; + bytes affinities = 2; + bytes areas = 3; +} \ No newline at end of file From 6ffc041c20f6935238ff7a0f52667fa2476efb7f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 19 Jul 2019 16:10:34 -0400 Subject: [PATCH 0045/1097] remove edge stuff --- pychunkedgraph/backend/chunkedgraph_init.py | 112 +++----------------- 1 file changed, 15 insertions(+), 97 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 1557ab14c..b6ea25196 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -1,12 +1,21 @@ import time import datetime import os - +import collections from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple +import pytz import numpy as np from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils +from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes +from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ + compute_bitmasks, get_google_compatible_time_stamp, \ + get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ + combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict + + +UTC = pytz.UTC def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_aff_dict: dict, edge_area_dict: dict, @@ -51,7 +60,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, edge_id_dict[key] = np.concatenate( edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[edge_id_key]) + n_edge_ids += len(edge_id_dict[key]) for key in edge_aff_keys: edge_aff_dict[key] = np.concatenate( @@ -79,7 +88,7 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, raise Exception("%d: %d chunk ids found in node id list. " "Some edges might be in the wrong order. " "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) + (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) # add self edge to all node_ids to make sure they're @@ -91,6 +100,8 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True) + + time_start = time.time() ccs = flatgraph_utils.connected_components(graph) if verbose: @@ -134,103 +145,10 @@ def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, # Add rows for nodes that are in this chunk node_ids = unique_graph_ids[cc] - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected + for node_id in node_ids: time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) val_dict = {column_keys.Hierarchy.Parent: parent_id} - rows.append(cg.mutate_row(serializers.serialize_uint64(node_id), val_dict, time_stamp=time_stamp)) node_c += 1 From 0c6564793992fa7fa1b8608714fee104ad9a4f03 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 11:30:15 -0400 Subject: [PATCH 0046/1097] remove unused stuff --- pychunkedgraph/backend/chunkedgraph.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index a5a9a9a5d..3a18ef590 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3052,15 +3052,9 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() - child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) - u_ccids = np.unique(child_chunk_ids) - # Make blocks of child ids that are in the same chunk - child_blocks = [] - for u_ccid in u_ccids: - child_blocks.append(child_ids[child_chunk_ids == u_ccid]) - - this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) + n_child_ids = len(child_ids) + this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, From 1abb235b48df322678db3b9f690cf45fa5bfdc7b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 14:38:00 -0400 Subject: [PATCH 0047/1097] wip: subpackage to segregate storage --- pychunkedgraph/backend/chunkedgraph.py | 10 ++++++++-- pychunkedgraph/io/gcs.py | 22 ++-------------------- 2 files changed, 10 insertions(+), 22 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3a18ef590..a5a9a9a5d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3052,9 +3052,15 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() + child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) + u_ccids = np.unique(child_chunk_ids) - n_child_ids = len(child_ids) - this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) + # Make blocks of child ids that are in the same chunk + child_blocks = [] + for u_ccid in u_ccids: + child_blocks.append(child_ids[child_chunk_ids == u_ccid]) + + this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py index c8261aa5e..63ac7ece5 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/gcs.py @@ -2,29 +2,11 @@ Functions to use when dealing with Google Cloud Storage ''' -import numpy as np -import zstandard as zstd - -from cloudvolume import Storage -from .protobuf.chunkEdges_pb3 import Edges - # TODO some funtions in ChunkedGraph # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id: np.uint64): +def get_chunk_edges(cg, chunk_id): chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = '_'.join(str(coord) for coord in chunk_coords) + chunk_str = repr(chunk)[1:-1].replace(', ','_') fname = f'edges_{chunk_str}.data' - - edgesMessage = Edges() - with Storage(cg._cv_path) as st: - file_content = st.get_file(fname) - - file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) - edgesMessage.ParseFromString(file_content) - edges = np.frombuffer(edgesMessage.edgeList) - areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Mon, 22 Jul 2019 15:16:04 -0400 Subject: [PATCH 0048/1097] wip: updates --- pychunkedgraph/ingest/chunkEdges.proto | 9 --------- pychunkedgraph/io/gcs.py | 22 ++++++++++++++++++++-- 2 files changed, 20 insertions(+), 11 deletions(-) delete mode 100644 pychunkedgraph/ingest/chunkEdges.proto diff --git a/pychunkedgraph/ingest/chunkEdges.proto b/pychunkedgraph/ingest/chunkEdges.proto deleted file mode 100644 index 369fe9505..000000000 --- a/pychunkedgraph/ingest/chunkEdges.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package test; - -message Edges { - bytes edgeList = 1; - bytes affinities = 2; - bytes areas = 3; -} \ No newline at end of file diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/gcs.py index 63ac7ece5..c8261aa5e 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/gcs.py @@ -2,11 +2,29 @@ Functions to use when dealing with Google Cloud Storage ''' +import numpy as np +import zstandard as zstd + +from cloudvolume import Storage +from .protobuf.chunkEdges_pb3 import Edges + # TODO some funtions in ChunkedGraph # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id): +def get_chunk_edges(cg, chunk_id: np.uint64): chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = repr(chunk)[1:-1].replace(', ','_') + chunk_str = '_'.join(str(coord) for coord in chunk_coords) fname = f'edges_{chunk_str}.data' + + edgesMessage = Edges() + with Storage(cg._cv_path) as st: + file_content = st.get_file(fname) + + file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) + edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) + areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Mon, 22 Jul 2019 16:08:53 -0400 Subject: [PATCH 0049/1097] wip --- pychunkedgraph/io/{gcs.py => storage.py} | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) rename pychunkedgraph/io/{gcs.py => storage.py} (76%) diff --git a/pychunkedgraph/io/gcs.py b/pychunkedgraph/io/storage.py similarity index 76% rename from pychunkedgraph/io/gcs.py rename to pychunkedgraph/io/storage.py index c8261aa5e..9cc7ae7a8 100644 --- a/pychunkedgraph/io/gcs.py +++ b/pychunkedgraph/io/storage.py @@ -1,7 +1,9 @@ ''' -Functions to use when dealing with Google Cloud Storage +Functions to use when dealing with any cloud storage via CloudVolume ''' +from typing import List + import numpy as np import zstandard as zstd @@ -12,14 +14,17 @@ # should be class methods or util functions # for now pass instance of ChunkedGraph -def get_chunk_edges(cg, chunk_id: np.uint64): +def get_chunk_edges(cg, chunk_ids: List[np.uint64]): + fnames = [] chunk_coords = cg.get_chunk_coordinates(chunk_id) chunk_str = '_'.join(str(coord) for coord in chunk_coords) fname = f'edges_{chunk_str}.data' edgesMessage = Edges() with Storage(cg._cv_path) as st: - file_content = st.get_file(fname) + file_content = st.get_files(fnames) + + # TODO move decompression to a generator file_content = zstd.ZstdDecompressor().decompressobj().decompress(file_content) edgesMessage.ParseFromString(file_content) From 9af2d0ce09e693f4aaebe274b3b94751ffc3a57e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:26:02 -0400 Subject: [PATCH 0050/1097] wip: add generator to decompress files --- pychunkedgraph/io/storage.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 9cc7ae7a8..47034a5e1 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -2,7 +2,7 @@ Functions to use when dealing with any cloud storage via CloudVolume ''' -from typing import List +from typing import List, Dict import numpy as np import zstandard as zstd @@ -14,22 +14,32 @@ # should be class methods or util functions # for now pass instance of ChunkedGraph +def _decompress_edges(files: List[Dict]): + """ + :param files: list of dicts (from CloudVolume.Storage.get_files) + :return: Tuple[edges:np.array[np.uint64, np.uint64], + areas:np.array[np.uint64] + affinities: np.array[np.float64]] + """ + edgesMessage = Edges() + + for _file in files: + file_content = zstd.ZstdDecompressor().decompressobj().decompress(_file['content']) + edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) + areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Mon, 22 Jul 2019 16:29:04 -0400 Subject: [PATCH 0051/1097] wip: updates --- pychunkedgraph/io/storage.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 47034a5e1..2cd1f6926 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -15,12 +15,12 @@ # for now pass instance of ChunkedGraph def _decompress_edges(files: List[Dict]): - """ + ''' :param files: list of dicts (from CloudVolume.Storage.get_files) :return: Tuple[edges:np.array[np.uint64, np.uint64], areas:np.array[np.uint64] affinities: np.array[np.float64]] - """ + ''' edgesMessage = Edges() for _file in files: @@ -33,10 +33,15 @@ def _decompress_edges(files: List[Dict]): def get_chunk_edges(cg, chunk_ids: List[np.uint64]): + ''' + :param cg: ChunkedGraph instance + :return: a generator that yields decompressed file content + ''' fnames = [] - chunk_coords = cg.get_chunk_coordinates(chunk_id) - chunk_str = '_'.join(str(coord) for coord in chunk_coords) - fname = f'edges_{chunk_str}.data' + for chunk_id in chunk_ids: + chunk_coords = cg.get_chunk_coordinates(chunk_id) + chunk_str = '_'.join(str(coord) for coord in chunk_coords) + fnames.append(f'edges_{chunk_str}.data') files = [] with Storage(cg._cv_path) as st: From d3d77953daf1607e5edb84cca61171ac7ca83444 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:34:12 -0400 Subject: [PATCH 0052/1097] wip: fix path --- pychunkedgraph/io/storage.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 2cd1f6926..ae3d0a19c 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -10,9 +10,6 @@ from cloudvolume import Storage from .protobuf.chunkEdges_pb3 import Edges -# TODO some funtions in ChunkedGraph -# should be class methods or util functions -# for now pass instance of ChunkedGraph def _decompress_edges(files: List[Dict]): ''' @@ -44,7 +41,7 @@ def get_chunk_edges(cg, chunk_ids: List[np.uint64]): fnames.append(f'edges_{chunk_str}.data') files = [] - with Storage(cg._cv_path) as st: + with Storage(f'{cg._cv_path}/edges_dir') as st: files = st.get_files(fnames) return _decompress_edges(files) \ No newline at end of file From 477f5f6b53778d4c83e855b77bdc8834f2118861 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:48:34 -0400 Subject: [PATCH 0053/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 15 ++------------- pychunkedgraph/io/storage.py | 6 +++--- 2 files changed, 5 insertions(+), 16 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index a5a9a9a5d..12d9b54bd 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3032,14 +3032,9 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, # connected_edges=connected_edges, # time_stamp=time_stamp) - def _get_subgraph_layer2_edges(node_ids) -> \ + def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return self.get_subgraph_chunk_v2(node_ids, - connected_edges=connected_edges, - time_stamp=time_stamp) - - time_stamp = self.read_node_id_row(agglomeration_id, - columns=column_keys.Hierarchy.Child)[0].timestamp + return get_chunk_edges(self, chunk_ids) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) @@ -3054,12 +3049,6 @@ def _get_subgraph_layer2_edges(node_ids) -> \ child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) u_ccids = np.unique(child_chunk_ids) - - # Make blocks of child ids that are in the same chunk - child_blocks = [] - for u_ccid in u_ccids: - child_blocks.append(child_ids[child_chunk_ids == u_ccid]) - this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index ae3d0a19c..b340fe799 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -29,9 +29,9 @@ def _decompress_edges(files: List[Dict]): yield edges, areas, affinities -def get_chunk_edges(cg, chunk_ids: List[np.uint64]): +def get_chunk_edges(edges_dir:str, chunk_ids: List[np.uint64]): ''' - :param cg: ChunkedGraph instance + :param chunk_ids :return: a generator that yields decompressed file content ''' fnames = [] @@ -41,7 +41,7 @@ def get_chunk_edges(cg, chunk_ids: List[np.uint64]): fnames.append(f'edges_{chunk_str}.data') files = [] - with Storage(f'{cg._cv_path}/edges_dir') as st: + with Storage(edges_dir) as st: files = st.get_files(fnames) return _decompress_edges(files) \ No newline at end of file From de43eb3a34fc2e0a9a844d290d768c9e418d58c5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 22 Jul 2019 16:55:18 -0400 Subject: [PATCH 0054/1097] wip --- pychunkedgraph/io/storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index b340fe799..502a791f1 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -29,7 +29,7 @@ def _decompress_edges(files: List[Dict]): yield edges, areas, affinities -def get_chunk_edges(edges_dir:str, chunk_ids: List[np.uint64]): +def get_chunk_edges(edges_dir:str, chunk_coords: np.array([x, y, z])): ''' :param chunk_ids :return: a generator that yields decompressed file content From d14946bfb0b084f6412121926ef97e9efe986233 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 23 Jul 2019 16:20:07 -0400 Subject: [PATCH 0055/1097] wip: udpates --- pychunkedgraph/backend/chunkedgraph.py | 4 +- pychunkedgraph/io/protobuf/chunkEdges_pb3.py | 84 -------------------- pychunkedgraph/io/storage.py | 9 +-- 3 files changed, 7 insertions(+), 90 deletions(-) delete mode 100644 pychunkedgraph/io/protobuf/chunkEdges_pb3.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 12d9b54bd..62ddb65ef 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3034,7 +3034,9 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return get_chunk_edges(self, chunk_ids) + return get_chunk_edges( + 'edges', + [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb3.py b/pychunkedgraph/io/protobuf/chunkEdges_pb3.py deleted file mode 100644 index 52b0de633..000000000 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb3.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: chunkEdges.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='chunkEdges.proto', - package='test', - syntax='proto3', - serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x04test\"<\n\x05\x45\x64ges\x12\x10\n\x08\x65\x64geList\x18\x01 \x01(\x0c\x12\x12\n\naffinities\x18\x02 \x01(\x0c\x12\r\n\x05\x61reas\x18\x03 \x01(\x0c\x62\x06proto3') -) - - - - -_EDGES = _descriptor.Descriptor( - name='Edges', - full_name='test.Edges', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='edgeList', full_name='test.Edges.edgeList', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='affinities', full_name='test.Edges.affinities', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='areas', full_name='test.Edges.areas', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=26, - serialized_end=86, -) - -DESCRIPTOR.message_types_by_name['Edges'] = _EDGES -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Edges = _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { - 'DESCRIPTOR' : _EDGES, - '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:test.Edges) - }) -_sym_db.RegisterMessage(Edges) - - -# @@protoc_insertion_point(module_scope) diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 502a791f1..da0d22f31 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -8,7 +8,7 @@ import zstandard as zstd from cloudvolume import Storage -from .protobuf.chunkEdges_pb3 import Edges +from .protobuf.chunkEdges_pb2 import Edges def _decompress_edges(files: List[Dict]): @@ -29,14 +29,13 @@ def _decompress_edges(files: List[Dict]): yield edges, areas, affinities -def get_chunk_edges(edges_dir:str, chunk_coords: np.array([x, y, z])): +def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray]): ''' - :param chunk_ids + :param: chunks_coordinates np.array of chunk coordinates :return: a generator that yields decompressed file content ''' fnames = [] - for chunk_id in chunk_ids: - chunk_coords = cg.get_chunk_coordinates(chunk_id) + for chunk_coords in chunks_coordinates: chunk_str = '_'.join(str(coord) for coord in chunk_coords) fnames.append(f'edges_{chunk_str}.data') From 5c51b293de3dabbaacf501f2ff886625ad53f1a2 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 24 Jul 2019 11:15:17 -0400 Subject: [PATCH 0056/1097] wip: read edges from 256 chunks --- pychunkedgraph/backend/chunkedgraph.py | 64 ++++++++++---------------- pychunkedgraph/io/storage.py | 25 ++++++++-- 2 files changed, 46 insertions(+), 43 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 62ddb65ef..d111e5022 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3026,17 +3026,11 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, :return: edge list """ - # def _get_subgraph_layer2_edges(node_ids) -> \ - # Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - # return self.get_subgraph_chunk(node_ids, - # connected_edges=connected_edges, - # time_stamp=time_stamp) - - def _get_subgraph_layer2_edges(chunk_ids) -> \ + def _get_subgraph_layer2_edges(node_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return get_chunk_edges( - 'edges', - [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) + return self.get_subgraph_chunk(node_ids, + connected_edges=connected_edges, + time_stamp=time_stamp) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) @@ -3075,9 +3069,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ return edges, affinities, areas - def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), - this_n_threads = 4, - cv_threads = 20, + + def get_subgraph_edges_v2(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3088,39 +3081,34 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( 'testing_ignore_this', - [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], - cv_threads) + [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) - timings = {} - timings['total'] = time.time() + # test assumes agglomeration id spans 256 chunks + # just reads edges from all 256 chunks + offset = np.array([102, 51, 5]) - timings['determine_chunks_ids'] = time.time() x_start, y_start, z_start = offset x_end, y_end, z_end = map( int, np.ceil( - np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) + np.array(cg.dataset_info['scales'][0]['size']) / cg.chunk_size) - offset) chunks = [] for x in range(x_start,x_end): for y in range(y_start, y_end): for z in range(z_start, z_end): - chunks.append((x, y, z)) + chunks.append((x, y, z)) - chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) - timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] - - # print(f'chunks: {len(chunks)}') - # print(f'threads: {this_n_threads}') + chunk_ids = [cg.get_chunk_id(None, 1, *chunk) for chunk in chunks] + this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + + if verbose: + time_start = time.time() - timings['reading_edges'] = time.time() edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, np.array_split(chunk_ids, this_n_threads), n_threads=this_n_threads, debug=this_n_threads == 1) - timings['reading_edges'] = time.time() - timings['reading_edges'] - - timings['collecting_edges'] = time.time() edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) areas = np.array([], dtype=np.uint64) @@ -3130,18 +3118,16 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ edges = np.concatenate([edges, _edges]) affinities = np.concatenate([affinities, _affinities]) areas = np.concatenate([areas, _areas]) - timings['collecting_edges'] = time.time() - timings['collecting_edges'] - - timings['total'] = time.time() - timings['total'] - return timings - - # print(json.dumps(timings, default=str, indent=4)) - # print(f'edges: {len(edges)}') - # print(f'affinities: {len(affinities)}') - # print(f'areas: {len(areas)}') + if verbose: + print(f'time: {time.time() - time_start)}') + print(f'chunks: {len(chunks)}') + print(f'threads: {len(this_n_threads)}') + print(f'edges: {len(edges)}') + print(f'affinities: {len(affinities)}') + print(f'areas: {len(areas)}') - # return edges, affinities, areas + return edges, affinities, areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index da0d22f31..53afe6c21 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -2,6 +2,7 @@ Functions to use when dealing with any cloud storage via CloudVolume ''' +import os from typing import List, Dict import numpy as np @@ -23,17 +24,21 @@ def _decompress_edges(files: List[Dict]): for _file in files: file_content = zstd.ZstdDecompressor().decompressobj().decompress(_file['content']) edgesMessage.ParseFromString(file_content) + edges = np.frombuffer(edgesMessage.edgeList) - areas = np.frombuffer(edgesMessage.areas, dtype=' Date: Wed, 24 Jul 2019 14:53:58 -0400 Subject: [PATCH 0057/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 20 +++++------ pychunkedgraph/io/storage.py | 46 +++++++++++++------------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index d111e5022..db44ccf8d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3069,8 +3069,8 @@ def _get_subgraph_layer2_edges(node_ids) -> \ return edges, affinities, areas - def get_subgraph_edges_v2(self, agglomeration_id: np.uint64, + offset = np.array([104, 53, 6]), bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3083,14 +3083,13 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ 'testing_ignore_this', [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) - # test assumes agglomeration id spans 256 chunks - # just reads edges from all 256 chunks - offset = np.array([102, 51, 5]) + # test assumes agglomeration id spans 32 chunks + # just reads edges from all 32 chunks x_start, y_start, z_start = offset x_end, y_end, z_end = map( int, np.ceil( - np.array(cg.dataset_info['scales'][0]['size']) / cg.chunk_size) - offset) + np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) chunks = [] @@ -3099,7 +3098,7 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ for z in range(z_start, z_end): chunks.append((x, y, z)) - chunk_ids = [cg.get_chunk_id(None, 1, *chunk) for chunk in chunks] + chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) if verbose: @@ -3108,7 +3107,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, np.array_split(chunk_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1) + n_threads=this_n_threads, debug=this_n_threads == 1) + edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) areas = np.array([], dtype=np.uint64) @@ -3120,14 +3120,14 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ areas = np.concatenate([areas, _areas]) if verbose: - print(f'time: {time.time() - time_start)}') + print(f'time: {time.time() - time_start}') print(f'chunks: {len(chunks)}') - print(f'threads: {len(this_n_threads)}') + print(f'threads: {this_n_threads}') print(f'edges: {len(edges)}') print(f'affinities: {len(affinities)}') print(f'areas: {len(areas)}') - return edges, affinities, areas + # return edges, affinities, areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py index 53afe6c21..321fa2971 100644 --- a/pychunkedgraph/io/storage.py +++ b/pychunkedgraph/io/storage.py @@ -11,24 +11,26 @@ from cloudvolume import Storage from .protobuf.chunkEdges_pb2 import Edges +# Creating these everytime is costly +edgesMessage = Edges() +zstdDecompressor = zstd.ZstdDecompressor() -def _decompress_edges(files: List[Dict]): + +def _decompress_edges(content): ''' - :param files: list of dicts (from CloudVolume.Storage.get_files) + :param content: zstd compressed bytes :return: Tuple[edges:np.array[np.uint64, np.uint64], areas:np.array[np.uint64] affinities: np.array[np.float64]] ''' - edgesMessage = Edges() + zstdDecompressor = zstd.ZstdDecompressor() + file_content = zstdDecompressor.decompressobj().decompress(content) + edgesMessage.ParseFromString(file_content) - for _file in files: - file_content = zstd.ZstdDecompressor().decompressobj().decompress(_file['content']) - edgesMessage.ParseFromString(file_content) - - edges = np.frombuffer(edgesMessage.edgeList) - affinities = np.frombuffer(edgesMessage.affinities, dtype=' Date: Thu, 25 Jul 2019 12:25:52 -0400 Subject: [PATCH 0058/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 10 ++-- pychunkedgraph/io/edge_storage.py | 54 +++++++++------------- pychunkedgraph/io/storage.py | 63 -------------------------- 3 files changed, 28 insertions(+), 99 deletions(-) delete mode 100644 pychunkedgraph/io/storage.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index db44ccf8d..feb2454b7 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3069,8 +3069,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ return edges, affinities, areas - def get_subgraph_edges_v2(self, agglomeration_id: np.uint64, - offset = np.array([104, 53, 6]), + def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3090,6 +3089,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ x_end, y_end, z_end = map( int, np.ceil( np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) + print(x_start, y_start, z_start) + print(x_end, y_end, z_end) chunks = [] @@ -3100,6 +3101,9 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + + print(f'chunks: {len(chunks)}') + print(f'threads: {this_n_threads}') if verbose: time_start = time.time() @@ -3121,8 +3125,6 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ if verbose: print(f'time: {time.time() - time_start}') - print(f'chunks: {len(chunks)}') - print(f'threads: {this_n_threads}') print(f'edges: {len(edges)}') print(f'affinities: {len(affinities)}') print(f'areas: {len(areas)}') diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index dbbc57486..2063b7302 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -1,70 +1,60 @@ -""" +''' Functions to use when dealing with any cloud storage via CloudVolume -""" +''' import os -from typing import List, Dict, Tuple +from typing import List, Dict import numpy as np import zstandard as zstd from cloudvolume import Storage -from cloudvolume.storage import SimpleStorage from .protobuf.chunkEdges_pb2 import Edges def _decompress_edges(content): - """ + ''' :param content: zstd compressed bytes :return: Tuple[edges:np.array[np.uint64, np.uint64], areas:np.array[np.uint64] affinities: np.array[np.float64]] - """ - edgesMessage = Edges() + ''' + edgesMessage = Edges() zstdDecompressorObj = zstd.ZstdDecompressor().decompressobj() file_content = zstdDecompressorObj.decompress(content) edgesMessage.ParseFromString(file_content) - + edges = np.frombuffer(edgesMessage.edgeList).reshape(-1, 2) - affinities = np.frombuffer(edgesMessage.affinities, dtype=" Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ +def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray]): + ''' :param: chunks_coordinates np.array of chunk coordinates - :return: tuple of edge infos (edges, affinities, areas) - """ + :return: a generator that yields decompressed file content + ''' edges_dir = os.environ.get( - "EDIR", "gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9" - ) + 'EDIR', + 'gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9') fnames = [] for chunk_coords in chunks_coordinates: - chunk_str = "_".join(str(coord) for coord in chunk_coords) - fnames.append(f"chunk_{chunk_str}_zstd_level_17_proto.data") + chunk_str = '_'.join(str(coord) for coord in chunk_coords) + fnames.append(f'chunk_{chunk_str}_zstd_level_17_proto.data') edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) areas = np.array([], dtype=np.uint64) - st = SimpleStorage(edges_dir) - if cv_threads > 1: - st = Storage(edges_dir, n_threads=cv_threads) - files = [] - with st: + with Storage(edges_dir) as st: files = st.get_files(fnames) for _file in files: - if not _file["content"]: - continue - _edges, _affinities, _areas = _decompress_edges(_file["content"]) + if not _file['content']: continue + _edges, _affinities, _areas = _decompress_edges(_file['content']) edges = np.concatenate([edges, _edges]) affinities = np.concatenate([affinities, _affinities]) - areas = np.concatenate([areas, _areas]) + areas = np.concatenate([areas, _areas]) - return edges, affinities, areas + return edges, affinities, areas \ No newline at end of file diff --git a/pychunkedgraph/io/storage.py b/pychunkedgraph/io/storage.py deleted file mode 100644 index 321fa2971..000000000 --- a/pychunkedgraph/io/storage.py +++ /dev/null @@ -1,63 +0,0 @@ -''' -Functions to use when dealing with any cloud storage via CloudVolume -''' - -import os -from typing import List, Dict - -import numpy as np -import zstandard as zstd - -from cloudvolume import Storage -from .protobuf.chunkEdges_pb2 import Edges - -# Creating these everytime is costly -edgesMessage = Edges() -zstdDecompressor = zstd.ZstdDecompressor() - - -def _decompress_edges(content): - ''' - :param content: zstd compressed bytes - :return: Tuple[edges:np.array[np.uint64, np.uint64], - areas:np.array[np.uint64] - affinities: np.array[np.float64]] - ''' - zstdDecompressor = zstd.ZstdDecompressor() - file_content = zstdDecompressor.decompressobj().decompress(content) - edgesMessage.ParseFromString(file_content) - - edges = np.frombuffer(edgesMessage.edgeList).reshape(-1, 2) - affinities = np.frombuffer(edgesMessage.affinities, dtype=' Date: Thu, 25 Jul 2019 12:27:53 -0400 Subject: [PATCH 0059/1097] wip: number of threads as argument --- pychunkedgraph/backend/chunkedgraph.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index feb2454b7..e47176d09 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3070,6 +3070,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ return edges, affinities, areas def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), + this_n_threads = 4, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3100,7 +3101,7 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ chunks.append((x, y, z)) chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) - this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + # this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) print(f'chunks: {len(chunks)}') print(f'threads: {this_n_threads}') From 4adea56fd5659a001196c814934cf47caa6fa769 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 25 Jul 2019 17:22:35 -0400 Subject: [PATCH 0060/1097] wip: pass cv thread count --- pychunkedgraph/backend/chunkedgraph.py | 39 ++++++++++++++------------ pychunkedgraph/io/edge_storage.py | 4 +-- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index e47176d09..f648e0ede 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3071,6 +3071,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), this_n_threads = 4, + cv_threads = 20, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3081,39 +3082,39 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( 'testing_ignore_this', - [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids]) + [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], + cv_threads) - # test assumes agglomeration id spans 32 chunks - # just reads edges from all 32 chunks + timings = {} + timings['total'] = time.time() + timings['determine_chunks_ids'] = time.time() x_start, y_start, z_start = offset x_end, y_end, z_end = map( int, np.ceil( np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) - print(x_start, y_start, z_start) - print(x_end, y_end, z_end) chunks = [] for x in range(x_start,x_end): for y in range(y_start, y_end): for z in range(z_start, z_end): - chunks.append((x, y, z)) + chunks.append((x, y, z)) chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) - # this_n_threads = np.min([int(len(chunk_ids) // 50000) + 1, mu.n_cpus]) + timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] - print(f'chunks: {len(chunks)}') - print(f'threads: {this_n_threads}') - - if verbose: - time_start = time.time() +# print(f'chunks: {len(chunks)}') +# print(f'threads: {this_n_threads}') + timings['reading_edges'] = time.time() edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, np.array_split(chunk_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1) + n_threads=this_n_threads, debug=this_n_threads == 1) + timings['reading_edges'] = time.time() - timings['reading_edges'] + timings['collecting_edges'] = time.time() edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) areas = np.array([], dtype=np.uint64) @@ -3123,12 +3124,14 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ edges = np.concatenate([edges, _edges]) affinities = np.concatenate([affinities, _affinities]) areas = np.concatenate([areas, _areas]) + timings['collecting_edges'] = time.time() - timings['collecting_edges'] + + timings['total'] = time.time() - timings['total'] - if verbose: - print(f'time: {time.time() - time_start}') - print(f'edges: {len(edges)}') - print(f'affinities: {len(affinities)}') - print(f'areas: {len(areas)}') + print(json.dumps(timings, default=str, indent=4)) + # print(f'edges: {len(edges)}') + # print(f'affinities: {len(affinities)}') + # print(f'areas: {len(areas)}') # return edges, affinities, areas diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 2063b7302..33327a48b 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -30,7 +30,7 @@ def _decompress_edges(content): return edges, affinities, areas -def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray]): +def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray], cv_threads): ''' :param: chunks_coordinates np.array of chunk coordinates :return: a generator that yields decompressed file content @@ -48,7 +48,7 @@ def get_chunk_edges(edges_dir:str, chunks_coordinates: List[np.ndarray]): areas = np.array([], dtype=np.uint64) files = [] - with Storage(edges_dir) as st: + with Storage(edges_dir, n_threads = cv_threads) as st: files = st.get_files(fnames) for _file in files: if not _file['content']: continue From 36459ae9afbef2e61574580fddb920162fe35541 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 26 Jul 2019 13:32:54 -0400 Subject: [PATCH 0061/1097] wip: more performance testing --- pychunkedgraph/backend/chunkedgraph.py | 3 ++ pychunkedgraph/io/edge_storage.py | 54 +++++++++++++++----------- 2 files changed, 35 insertions(+), 22 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index f648e0ede..6013fa393 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3032,6 +3032,9 @@ def _get_subgraph_layer2_edges(node_ids) -> \ connected_edges=connected_edges, time_stamp=time_stamp) + time_stamp = self.read_node_id_row(agglomeration_id, + columns=column_keys.Hierarchy.Child)[0].timestamp + bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) # Layer 3+ diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 33327a48b..dbbc57486 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -1,60 +1,70 @@ -''' +""" Functions to use when dealing with any cloud storage via CloudVolume -''' +""" import os -from typing import List, Dict +from typing import List, Dict, Tuple import numpy as np import zstandard as zstd from cloudvolume import Storage +from cloudvolume.storage import SimpleStorage from .protobuf.chunkEdges_pb2 import Edges def _decompress_edges(content): - ''' + """ :param content: zstd compressed bytes :return: Tuple[edges:np.array[np.uint64, np.uint64], areas:np.array[np.uint64] affinities: np.array[np.float64]] - ''' - edgesMessage = Edges() + """ + edgesMessage = Edges() zstdDecompressorObj = zstd.ZstdDecompressor().decompressobj() file_content = zstdDecompressorObj.decompress(content) edgesMessage.ParseFromString(file_content) - + edges = np.frombuffer(edgesMessage.edgeList).reshape(-1, 2) - affinities = np.frombuffer(edgesMessage.affinities, dtype=' Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ :param: chunks_coordinates np.array of chunk coordinates - :return: a generator that yields decompressed file content - ''' + :return: tuple of edge infos (edges, affinities, areas) + """ edges_dir = os.environ.get( - 'EDIR', - 'gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9') + "EDIR", "gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9" + ) fnames = [] for chunk_coords in chunks_coordinates: - chunk_str = '_'.join(str(coord) for coord in chunk_coords) - fnames.append(f'chunk_{chunk_str}_zstd_level_17_proto.data') + chunk_str = "_".join(str(coord) for coord in chunk_coords) + fnames.append(f"chunk_{chunk_str}_zstd_level_17_proto.data") edges = np.array([], dtype=np.uint64).reshape(0, 2) affinities = np.array([], dtype=np.float32) areas = np.array([], dtype=np.uint64) + st = SimpleStorage(edges_dir) + if cv_threads > 1: + st = Storage(edges_dir, n_threads=cv_threads) + files = [] - with Storage(edges_dir, n_threads = cv_threads) as st: + with st: files = st.get_files(fnames) for _file in files: - if not _file['content']: continue - _edges, _affinities, _areas = _decompress_edges(_file['content']) + if not _file["content"]: + continue + _edges, _affinities, _areas = _decompress_edges(_file["content"]) edges = np.concatenate([edges, _edges]) affinities = np.concatenate([affinities, _affinities]) - areas = np.concatenate([areas, _areas]) + areas = np.concatenate([areas, _areas]) - return edges, affinities, areas \ No newline at end of file + return edges, affinities, areas From fec42e70b6608039a25d22412193312a8a84e280 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 26 Jul 2019 13:34:29 -0400 Subject: [PATCH 0062/1097] wip: updates --- pychunkedgraph/backend/chunkedgraph.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 6013fa393..3c315c089 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3107,8 +3107,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] -# print(f'chunks: {len(chunks)}') -# print(f'threads: {this_n_threads}') + # print(f'chunks: {len(chunks)}') + # print(f'threads: {this_n_threads}') timings['reading_edges'] = time.time() edge_infos = mu.multithread_func( @@ -3131,7 +3131,9 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ timings['total'] = time.time() - timings['total'] - print(json.dumps(timings, default=str, indent=4)) + return timings + + # print(json.dumps(timings, default=str, indent=4)) # print(f'edges: {len(edges)}') # print(f'affinities: {len(affinities)}') # print(f'areas: {len(areas)}') From c71e7c680d29336e7bd7e44e5ceee5ced6013688 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 26 Jul 2019 14:56:33 -0400 Subject: [PATCH 0063/1097] remove json import --- pychunkedgraph/backend/chunkedgraph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3c315c089..47b1f0333 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -10,7 +10,6 @@ import re import itertools import logging -import json from itertools import chain from multiwrapper import multiprocessing_utils as mu From 7a9265f9eb61bdcdd15ff4d801716e1d35e011ca Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 26 Jul 2019 15:25:11 -0400 Subject: [PATCH 0064/1097] function to serialize, compress and write file to secondary storage --- pychunkedgraph/io/edge_storage.py | 51 ++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index dbbc57486..dedcd06a1 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -1,5 +1,6 @@ """ -Functions to use when dealing with any cloud storage via CloudVolume +Functions for reading and writing edges +to secondary storage with CloudVolume """ import os @@ -13,7 +14,7 @@ from .protobuf.chunkEdges_pb2 import Edges -def _decompress_edges(content): +def _decompress_edges(content: bytes): """ :param content: zstd compressed bytes :return: Tuple[edges:np.array[np.uint64, np.uint64], @@ -32,9 +33,7 @@ def _decompress_edges(content): def get_chunk_edges( - edges_dir: str, - chunks_coordinates: List[np.ndarray], - cv_threads, + edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ :param: chunks_coordinates np.array of chunk coordinates @@ -68,3 +67,45 @@ def get_chunk_edges( areas = np.concatenate([areas, _areas]) return edges, affinities, areas + + +def put_chunk_edges( + chunk_str: str, + edges: np.ndarray, + affinities: np.ndarray, + areas: np.ndarray, + edges_dir: str, + compression_level: int, +) -> None: + """ + :param: chunk_str - chunk coords in format x_y_z + :type: str + :param: edges - (supervoxel1, supervoxel2) + :type: np.ndarray + :param: affinities + :type: np.ndarray + :param: areas + :type: np.ndarray + :param: edges_dir - google cloud storage path + :type: str + :param: compression_level - for zstandard (1-22, higher - better ratio) + :type: int + """ + edgesMessage = Edges() + edgesMessage.edgeList = edges.tobytes() + edgesMessage.affinities = affinities.tobytes() + edgesMessage.areas = areas.tobytes() + + cctx = zstd.ZstdCompressor(level=compression_level) + compressed_proto = cctx.compress(edgesMessage.SerializeToString()) + + # filename - "chunk_" + chunk_coords + compression_tool + serialization_method + + file = f"chunk_{chunk_str}_zstd_proto.data" + with Storage(edges_dir) as st: + st.put_file( + file_path=file, + content=compressed_proto, + compress=None, + cache_control="no-cache", + ) From 239b20f2fa5b9f5477ee48bd60cab820650e58a3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 26 Jul 2019 15:50:05 -0400 Subject: [PATCH 0065/1097] improve doc strings --- pychunkedgraph/io/edge_storage.py | 37 +++++++++++++++++-------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index dedcd06a1..6c366e001 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -17,9 +17,9 @@ def _decompress_edges(content: bytes): """ :param content: zstd compressed bytes - :return: Tuple[edges:np.array[np.uint64, np.uint64], - areas:np.array[np.uint64] - affinities: np.array[np.float64]] + :type bytes: + :return: edges, affinities, areas + :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ edgesMessage = Edges() zstdDecompressorObj = zstd.ZstdDecompressor().decompressobj() @@ -36,8 +36,10 @@ def get_chunk_edges( edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ - :param: chunks_coordinates np.array of chunk coordinates - :return: tuple of edge infos (edges, affinities, areas) + :param chunks_coordinates: + :type np.array: + :return: edges, affinities, areas + :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ edges_dir = os.environ.get( "EDIR", "gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9" @@ -78,18 +80,19 @@ def put_chunk_edges( compression_level: int, ) -> None: """ - :param: chunk_str - chunk coords in format x_y_z - :type: str - :param: edges - (supervoxel1, supervoxel2) - :type: np.ndarray - :param: affinities - :type: np.ndarray - :param: areas - :type: np.ndarray - :param: edges_dir - google cloud storage path - :type: str - :param: compression_level - for zstandard (1-22, higher - better ratio) - :type: int + :param chunk_str: chunk coords in format x_y_z + :type str: + :param edges: np.array of [supervoxel1, supervoxel2] + :type np.ndarray: + :param affinities: + :type np.ndarray: + :param areas: + :type np.ndarray: + :param edges_dir: google cloud storage path + :type str: + :param compression_level: zstandard compression level (1-22, higher - better ratio) + :type int: + :return None: """ edgesMessage = Edges() edgesMessage.edgeList = edges.tobytes() From d559231a352bc5a45ca0aa46c649fce9db25665d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 29 Jul 2019 10:58:58 -0400 Subject: [PATCH 0066/1097] change edges filename format --- pychunkedgraph/io/edge_storage.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 6c366e001..3cc1b25b5 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -37,7 +37,7 @@ def get_chunk_edges( ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ :param chunks_coordinates: - :type np.array: + :type np.ndarray: :return: edges, affinities, areas :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ @@ -47,6 +47,7 @@ def get_chunk_edges( fnames = [] for chunk_coords in chunks_coordinates: chunk_str = "_".join(str(coord) for coord in chunk_coords) + # TODO change filename format fnames.append(f"chunk_{chunk_str}_zstd_level_17_proto.data") edges = np.array([], dtype=np.uint64).reshape(0, 2) @@ -72,7 +73,7 @@ def get_chunk_edges( def put_chunk_edges( - chunk_str: str, + chunk_coordinates: np.ndarray, edges: np.ndarray, affinities: np.ndarray, areas: np.ndarray, @@ -80,8 +81,8 @@ def put_chunk_edges( compression_level: int, ) -> None: """ - :param chunk_str: chunk coords in format x_y_z - :type str: + :param chunk_coordinates: chunk coords x,y,z + :type np.ndarray: :param edges: np.array of [supervoxel1, supervoxel2] :type np.ndarray: :param affinities: @@ -102,9 +103,10 @@ def put_chunk_edges( cctx = zstd.ZstdCompressor(level=compression_level) compressed_proto = cctx.compress(edgesMessage.SerializeToString()) - # filename - "chunk_" + chunk_coords + compression_tool + serialization_method + chunk_str = "_".join(str(coord) for coord in chunk_coordinates) + # filename - "edges_x_y_z_" + compression_tool + serialization_method - file = f"chunk_{chunk_str}_zstd_proto.data" + file = f"edges_{chunk_str}_zstd_proto.data" with Storage(edges_dir) as st: st.put_file( file_path=file, From cd0ddde2b760859f85923bc96bab6a27fb3656a2 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 29 Jul 2019 14:58:51 -0400 Subject: [PATCH 0067/1097] change protobuf edges definition --- pychunkedgraph/io/protobuf/chunkEdges.proto | 12 +++- pychunkedgraph/io/protobuf/chunkEdges_pb2.py | 74 +++++++++++++++++--- 2 files changed, 74 insertions(+), 12 deletions(-) diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index 369fe9505..65b1bf4a2 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -3,7 +3,13 @@ syntax = "proto3"; package test; message Edges { - bytes edgeList = 1; - bytes affinities = 2; - bytes areas = 3; + message EdgesDef { + bytes edgeList = 1; + bytes affinities = 2; + bytes areas = 3; + } + + EdgesDef inChunk = 4; + EdgesDef crossChunk = 5; + EdgesDef betweenChunk = 6; } \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py index 52b0de633..dce03fb0c 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py +++ b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py @@ -20,35 +20,35 @@ package='test', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x04test\"<\n\x05\x45\x64ges\x12\x10\n\x08\x65\x64geList\x18\x01 \x01(\x0c\x12\x12\n\naffinities\x18\x02 \x01(\x0c\x12\r\n\x05\x61reas\x18\x03 \x01(\x0c\x62\x06proto3') + serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x04test\"\xc5\x01\n\x05\x45\x64ges\x12%\n\x07inChunk\x18\x04 \x01(\x0b\x32\x14.test.Edges.EdgesDef\x12(\n\ncrossChunk\x18\x05 \x01(\x0b\x32\x14.test.Edges.EdgesDef\x12*\n\x0c\x62\x65tweenChunk\x18\x06 \x01(\x0b\x32\x14.test.Edges.EdgesDef\x1a?\n\x08\x45\x64gesDef\x12\x10\n\x08\x65\x64geList\x18\x01 \x01(\x0c\x12\x12\n\naffinities\x18\x02 \x01(\x0c\x12\r\n\x05\x61reas\x18\x03 \x01(\x0c\x62\x06proto3') ) -_EDGES = _descriptor.Descriptor( - name='Edges', - full_name='test.Edges', +_EDGES_EDGESDEF = _descriptor.Descriptor( + name='EdgesDef', + full_name='test.Edges.EdgesDef', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='edgeList', full_name='test.Edges.edgeList', index=0, + name='edgeList', full_name='test.Edges.EdgesDef.edgeList', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='affinities', full_name='test.Edges.affinities', index=1, + name='affinities', full_name='test.Edges.EdgesDef.affinities', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='areas', full_name='test.Edges.areas', index=2, + name='areas', full_name='test.Edges.EdgesDef.areas', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, @@ -66,19 +66,75 @@ extension_ranges=[], oneofs=[ ], - serialized_start=26, - serialized_end=86, + serialized_start=161, + serialized_end=224, +) + +_EDGES = _descriptor.Descriptor( + name='Edges', + full_name='test.Edges', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inChunk', full_name='test.Edges.inChunk', index=0, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='crossChunk', full_name='test.Edges.crossChunk', index=1, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='betweenChunk', full_name='test.Edges.betweenChunk', index=2, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_EDGES_EDGESDEF, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27, + serialized_end=224, ) +_EDGES_EDGESDEF.containing_type = _EDGES +_EDGES.fields_by_name['inChunk'].message_type = _EDGES_EDGESDEF +_EDGES.fields_by_name['crossChunk'].message_type = _EDGES_EDGESDEF +_EDGES.fields_by_name['betweenChunk'].message_type = _EDGES_EDGESDEF DESCRIPTOR.message_types_by_name['Edges'] = _EDGES _sym_db.RegisterFileDescriptor(DESCRIPTOR) Edges = _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { + + 'EdgesDef' : _reflection.GeneratedProtocolMessageType('EdgesDef', (_message.Message,), { + 'DESCRIPTOR' : _EDGES_EDGESDEF, + '__module__' : 'chunkEdges_pb2' + # @@protoc_insertion_point(class_scope:test.Edges.EdgesDef) + }) + , 'DESCRIPTOR' : _EDGES, '__module__' : 'chunkEdges_pb2' # @@protoc_insertion_point(class_scope:test.Edges) }) _sym_db.RegisterMessage(Edges) +_sym_db.RegisterMessage(Edges.EdgesDef) # @@protoc_insertion_point(module_scope) From fd3a34915142eeb3135645e0e9c4d2af66a2a8bb Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 29 Jul 2019 15:10:53 -0400 Subject: [PATCH 0068/1097] more documentation and change parameter order --- pychunkedgraph/io/__init__.py | 3 +++ pychunkedgraph/io/edge_storage.py | 14 +++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/io/__init__.py b/pychunkedgraph/io/__init__.py index e69de29bb..378554f35 100644 --- a/pychunkedgraph/io/__init__.py +++ b/pychunkedgraph/io/__init__.py @@ -0,0 +1,3 @@ +""" +All secondary storage stuff must go here +""" \ No newline at end of file diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 3cc1b25b5..ba354a1ef 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -36,11 +36,14 @@ def get_chunk_edges( edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ + :param edges_dir: cloudvolume storage path + :type str: :param chunks_coordinates: :type np.ndarray: :return: edges, affinities, areas :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ + # this is just for testing edges_dir = os.environ.get( "EDIR", "gs://akhilesh-test/edges/fly_playground/bbox-102_51_5-110_59_9" ) @@ -48,6 +51,7 @@ def get_chunk_edges( for chunk_coords in chunks_coordinates: chunk_str = "_".join(str(coord) for coord in chunk_coords) # TODO change filename format + # filename format - edges_x_y_z.serialization.compression fnames.append(f"chunk_{chunk_str}_zstd_level_17_proto.data") edges = np.array([], dtype=np.uint64).reshape(0, 2) @@ -73,14 +77,16 @@ def get_chunk_edges( def put_chunk_edges( + edges_dir: str, chunk_coordinates: np.ndarray, edges: np.ndarray, affinities: np.ndarray, areas: np.ndarray, - edges_dir: str, compression_level: int, ) -> None: """ + :param edges_dir: cloudvolume storage path + :type str: :param chunk_coordinates: chunk coords x,y,z :type np.ndarray: :param edges: np.array of [supervoxel1, supervoxel2] @@ -89,8 +95,6 @@ def put_chunk_edges( :type np.ndarray: :param areas: :type np.ndarray: - :param edges_dir: google cloud storage path - :type str: :param compression_level: zstandard compression level (1-22, higher - better ratio) :type int: :return None: @@ -104,9 +108,9 @@ def put_chunk_edges( compressed_proto = cctx.compress(edgesMessage.SerializeToString()) chunk_str = "_".join(str(coord) for coord in chunk_coordinates) - # filename - "edges_x_y_z_" + compression_tool + serialization_method + # filename format - edges_x_y_z.serialization.compression - file = f"edges_{chunk_str}_zstd_proto.data" + file = f"edges_{chunk_str}.proto.zst" with Storage(edges_dir) as st: st.put_file( file_path=file, From d729edd1a276321f4dd39829b15cb761cf5ccf82 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 31 Jul 2019 14:48:19 -0400 Subject: [PATCH 0069/1097] edge message updates --- pychunkedgraph/backend/chunkedgraph_init.py | 201 ------------------- pychunkedgraph/io/edge_storage.py | 93 ++++++--- pychunkedgraph/io/protobuf/chunkEdges.proto | 12 +- pychunkedgraph/io/protobuf/chunkEdges_pb2.py | 63 +++--- 4 files changed, 103 insertions(+), 266 deletions(-) delete mode 100644 pychunkedgraph/backend/chunkedgraph_init.py diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py deleted file mode 100644 index b6ea25196..000000000 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ /dev/null @@ -1,201 +0,0 @@ -import time -import datetime -import os -import collections -from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple - -import pytz -import numpy as np - -from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils -from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes -from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ - compute_bitmasks, get_google_compatible_time_stamp, \ - get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ - combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict - - -UTC = pytz.UTC - -def add_atomic_edges_in_chunks_v2(cg, edge_id_dict: dict, - edge_aff_dict: dict, edge_area_dict: dict, - isolated_node_ids: Sequence[np.uint64], - verbose: bool = True, - time_stamp: Optional[datetime.datetime] = None): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime - """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - - edge_aff_keys = [ - 'in_connected','in_disconnected','between_connected','between_disconnected'] - edge_id_keys = edge_aff_keys[:].insert(2, 'cross') - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) - - for key in edge_id_keys: - edge_id_dict[key] = np.concatenate( - edge_id_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - n_edge_ids += len(edge_id_dict[key]) - - for key in edge_aff_keys: - edge_aff_dict[key] = np.concatenate( - edge_aff_dict.get(key, empty_edges_array.copy(), - empty_edges_array.copy())) - - # Get connected component within the chunk - chunk_node_ids = np.concatenate([ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - if not len(chunk_node_ids): return 0 - - chunk_node_ids = np.unique(chunk_node_ids) - node_chunk_ids = np.array( - [cg.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique( - node_chunk_ids, return_counts=True) - if len(u_node_chunk_ids) > 1: - raise Exception("%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % - (u_node_chunk_ids[0], len(u_node_chunk_ids)), c_node_chunk_ids) - - - # add self edge to all node_ids to make sure they're - # part of connected components because the graph is processed component wise - # if not, the node_ids won't be stored - edge_ids = np.concatenate([ - edge_id_dict["in_connected"].copy(), - np.vstack([chunk_node_ids, chunk_node_ids]).T]) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True) - - time_start = time.time() - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - cg.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - - # Add rows for nodes that are in this chunk - # a connected component at a time - node_c = 0 # Just a counter for the log / speed measurement - - n_ccs = len(ccs) - - # Make parent id creation easier - parent_chunk_id = cg.get_chunk_id( - layer=2, *cg.get_chunk_coordinates(u_node_chunk_ids[0])) - - parent_ids = cg.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - - time_start = time.time() - time_dict = collections.defaultdict(list) - - time_start_1 = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start_1) - - rows = [] - - for i_cc, cc in enumerate(ccs): - parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - - # Add rows for nodes that are in this chunk - node_ids = unique_graph_ids[cc] - for node_id in node_ids: - time_start_2 = time.time() - - val_dict = {column_keys.Hierarchy.Parent: parent_id} - rows.append(cg.mutate_row(serializers.serialize_uint64(node_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - - time_start_1 = time.time() - # Create parent node - rows.append(cg.mutate_row(serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp)) - - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() - - cce_layers = cg.get_cross_chunk_edges_layer(parent_cross_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {} - for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - - if len(layer_cross_edges) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ - layer_cross_edges - - if len(val_dict) > 0: - rows.append(cg.mutate_row(serializers.serialize_uint64(parent_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - - time_dict["adding_cross_edges"].append(time.time() - time_start_1) - - if len(rows) > 100000: - time_start_1 = time.time() - cg.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if len(rows) > 0: - time_start_1 = time.time() - cg.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - cg.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % - (time.time() - time_start, len(ccs), node_c)) - - for k in time_dict.keys(): - cg.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % - (k, np.sum(time_dict[k])*1000, len(time_dict[k]), - np.mean(time_dict[k])*1000)) \ No newline at end of file diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index ba354a1ef..79a302bc9 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -4,31 +4,54 @@ """ import os -from typing import List, Dict, Tuple +from typing import List, Dict, Tuple, Union import numpy as np import zstandard as zstd from cloudvolume import Storage from cloudvolume.storage import SimpleStorage -from .protobuf.chunkEdges_pb2 import Edges +from .protobuf.chunkEdges_pb2 import ChunkEdges -def _decompress_edges(content: bytes): +def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ :param content: zstd compressed bytes :type bytes: :return: edges, affinities, areas :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ - edgesMessage = Edges() + + def _get_edges( + edge_type: str, edgesMessage: Union[ChunkEdges, ChunkEdges.Edges] + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + if edge_type == "cross": + edges = np.frombuffer(edgesMessage.crossChunk, dtype=" None: """ @@ -89,23 +106,45 @@ def put_chunk_edges( :type str: :param chunk_coordinates: chunk coords x,y,z :type np.ndarray: - :param edges: np.array of [supervoxel1, supervoxel2] - :type np.ndarray: - :param affinities: - :type np.ndarray: - :param areas: - :type np.ndarray: + :param chunk_edges: np.array of [supervoxel1, supervoxel2] + :type dict: chunk_edges with keys "in", "cross", "between" :param compression_level: zstandard compression level (1-22, higher - better ratio) :type int: :return None: """ - edgesMessage = Edges() - edgesMessage.edgeList = edges.tobytes() - edgesMessage.affinities = affinities.tobytes() - edgesMessage.areas = areas.tobytes() + + def _get_edges(edge_type: str) -> Union[bytes, ChunkEdges.Edges]: + # convert two numpy arrays to edge list + # arr1 = [1, 2, 3] + # arr2 = [4, 5, 6] + # edges = [[1,4],[2,5],[3,6]] + edges = np.concatenate( + [ + chunk_edges[edge_type]["sv1"][:, None], + chunk_edges[edge_type]["sv2"][:, None], + ], + axis=1, + ) + edges_bytes = edges.astype(np.uint64).tobytes() + if edge_type == "cross": + return edges_bytes + + edgesMessage = ChunkEdges.Edges() + edgesMessage.edgeList = edges_bytes + edgesMessage.affinities = ( + chunk_edges[edge_type]["aff"].astype(np.float32).tobytes() + ) + edgesMessage.areas = chunk_edges[edge_type]["area"].astype(np.uint64).tobytes() + + return edgesMessage + + chunkEdgesMessage = ChunkEdges() + chunkEdgesMessage.inChunk = _get_edges("in") + chunkEdgesMessage.betweenChunk = _get_edges("between") + chunkEdgesMessage.crossChunk = _get_edges("cross") cctx = zstd.ZstdCompressor(level=compression_level) - compressed_proto = cctx.compress(edgesMessage.SerializeToString()) + compressed_proto = cctx.compress(chunkEdgesMessage.SerializeToString()) chunk_str = "_".join(str(coord) for coord in chunk_coordinates) # filename format - edges_x_y_z.serialization.compression diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index 65b1bf4a2..4766753c2 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -2,14 +2,14 @@ syntax = "proto3"; package test; -message Edges { - message EdgesDef { +message ChunkEdges { + message Edges { bytes edgeList = 1; bytes affinities = 2; - bytes areas = 3; + bytes areas = 3; } - EdgesDef inChunk = 4; - EdgesDef crossChunk = 5; - EdgesDef betweenChunk = 6; + Edges inChunk = 4; + bytes crossChunk = 5; + Edges betweenChunk = 6; } \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py index dce03fb0c..776c620cf 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py +++ b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py @@ -20,35 +20,35 @@ package='test', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x04test\"\xc5\x01\n\x05\x45\x64ges\x12%\n\x07inChunk\x18\x04 \x01(\x0b\x32\x14.test.Edges.EdgesDef\x12(\n\ncrossChunk\x18\x05 \x01(\x0b\x32\x14.test.Edges.EdgesDef\x12*\n\x0c\x62\x65tweenChunk\x18\x06 \x01(\x0b\x32\x14.test.Edges.EdgesDef\x1a?\n\x08\x45\x64gesDef\x12\x10\n\x08\x65\x64geList\x18\x01 \x01(\x0c\x12\x12\n\naffinities\x18\x02 \x01(\x0c\x12\r\n\x05\x61reas\x18\x03 \x01(\x0c\x62\x06proto3') + serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x04test\"\xb5\x01\n\nChunkEdges\x12\'\n\x07inChunk\x18\x04 \x01(\x0b\x32\x16.test.ChunkEdges.Edges\x12\x12\n\ncrossChunk\x18\x05 \x01(\x0c\x12,\n\x0c\x62\x65tweenChunk\x18\x06 \x01(\x0b\x32\x16.test.ChunkEdges.Edges\x1a<\n\x05\x45\x64ges\x12\x10\n\x08\x65\x64geList\x18\x01 \x01(\x0c\x12\x12\n\naffinities\x18\x02 \x01(\x0c\x12\r\n\x05\x61reas\x18\x03 \x01(\x0c\x62\x06proto3') ) -_EDGES_EDGESDEF = _descriptor.Descriptor( - name='EdgesDef', - full_name='test.Edges.EdgesDef', +_CHUNKEDGES_EDGES = _descriptor.Descriptor( + name='Edges', + full_name='test.ChunkEdges.Edges', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='edgeList', full_name='test.Edges.EdgesDef.edgeList', index=0, + name='edgeList', full_name='test.ChunkEdges.Edges.edgeList', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='affinities', full_name='test.Edges.EdgesDef.affinities', index=1, + name='affinities', full_name='test.ChunkEdges.Edges.affinities', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='areas', full_name='test.Edges.EdgesDef.areas', index=2, + name='areas', full_name='test.ChunkEdges.Edges.areas', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, @@ -66,33 +66,33 @@ extension_ranges=[], oneofs=[ ], - serialized_start=161, - serialized_end=224, + serialized_start=148, + serialized_end=208, ) -_EDGES = _descriptor.Descriptor( - name='Edges', - full_name='test.Edges', +_CHUNKEDGES = _descriptor.Descriptor( + name='ChunkEdges', + full_name='test.ChunkEdges', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='inChunk', full_name='test.Edges.inChunk', index=0, + name='inChunk', full_name='test.ChunkEdges.inChunk', index=0, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='crossChunk', full_name='test.Edges.crossChunk', index=1, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='crossChunk', full_name='test.ChunkEdges.crossChunk', index=1, + number=5, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='betweenChunk', full_name='test.Edges.betweenChunk', index=2, + name='betweenChunk', full_name='test.ChunkEdges.betweenChunk', index=2, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -101,7 +101,7 @@ ], extensions=[ ], - nested_types=[_EDGES_EDGESDEF, ], + nested_types=[_CHUNKEDGES_EDGES, ], enum_types=[ ], serialized_options=None, @@ -111,30 +111,29 @@ oneofs=[ ], serialized_start=27, - serialized_end=224, + serialized_end=208, ) -_EDGES_EDGESDEF.containing_type = _EDGES -_EDGES.fields_by_name['inChunk'].message_type = _EDGES_EDGESDEF -_EDGES.fields_by_name['crossChunk'].message_type = _EDGES_EDGESDEF -_EDGES.fields_by_name['betweenChunk'].message_type = _EDGES_EDGESDEF -DESCRIPTOR.message_types_by_name['Edges'] = _EDGES +_CHUNKEDGES_EDGES.containing_type = _CHUNKEDGES +_CHUNKEDGES.fields_by_name['inChunk'].message_type = _CHUNKEDGES_EDGES +_CHUNKEDGES.fields_by_name['betweenChunk'].message_type = _CHUNKEDGES_EDGES +DESCRIPTOR.message_types_by_name['ChunkEdges'] = _CHUNKEDGES _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Edges = _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { +ChunkEdges = _reflection.GeneratedProtocolMessageType('ChunkEdges', (_message.Message,), { - 'EdgesDef' : _reflection.GeneratedProtocolMessageType('EdgesDef', (_message.Message,), { - 'DESCRIPTOR' : _EDGES_EDGESDEF, + 'Edges' : _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { + 'DESCRIPTOR' : _CHUNKEDGES_EDGES, '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:test.Edges.EdgesDef) + # @@protoc_insertion_point(class_scope:test.ChunkEdges.Edges) }) , - 'DESCRIPTOR' : _EDGES, + 'DESCRIPTOR' : _CHUNKEDGES, '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:test.Edges) + # @@protoc_insertion_point(class_scope:test.ChunkEdges) }) -_sym_db.RegisterMessage(Edges) -_sym_db.RegisterMessage(Edges.EdgesDef) +_sym_db.RegisterMessage(ChunkEdges) +_sym_db.RegisterMessage(ChunkEdges.Edges) # @@protoc_insertion_point(module_scope) From a0257ff05b649c07b77ac4f5fc21a7bea899bbce Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 31 Jul 2019 16:37:39 -0400 Subject: [PATCH 0070/1097] protobuf copyfrom fix and revert get_subgraph_edges --- pychunkedgraph/backend/chunkedgraph.py | 19 ++++++++++++++----- pychunkedgraph/ingest/ran_ingestion.py | 2 ++ pychunkedgraph/io/edge_storage.py | 4 ++-- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 47b1f0333..a12cf4ed9 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3032,7 +3032,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ time_stamp=time_stamp) time_stamp = self.read_node_id_row(agglomeration_id, - columns=column_keys.Hierarchy.Child)[0].timestamp + columns=column_keys.Hierarchy.Child)[0].timestamp bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) @@ -3045,13 +3045,21 @@ def _get_subgraph_layer2_edges(node_ids) -> \ if verbose: time_start = time.time() + child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) u_ccids = np.unique(child_chunk_ids) - this_n_threads = np.min([int(len(u_ccids) // 50000) + 1, mu.n_cpus]) + + child_blocks = [] + # Make blocks of child ids that are in the same chunk + for u_ccid in u_ccids: + child_blocks.append(child_ids[child_chunk_ids == u_ccid]) + + n_child_ids = len(child_ids) + this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, - np.array_split(u_ccids, this_n_threads), + np.array_split(child_ids, this_n_threads), n_threads=this_n_threads, debug=this_n_threads == 1) affinities = np.array([], dtype=np.float32) @@ -3065,12 +3073,13 @@ def _get_subgraph_layer2_edges(node_ids) -> \ edges = np.concatenate([edges, _edges]) if verbose: - self.logger.debug("Layer %d: %.3fms for %d children with %d threads" % + self.logger.debug("Layer %d: %.3fms for %d childs with %d threads" % (2, (time.time() - time_start) * 1000, - len(child_ids), this_n_threads)) + n_child_ids, this_n_threads)) return edges, affinities, areas + def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), this_n_threads = 4, cv_threads = 20, diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index 7a742d1bf..160fa738d 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -11,6 +11,7 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu +from pychunkedgraph.io.edge_storage import put_chunk_edges def ingest_into_chunkedgraph(storage_path, ws_cv_path, cg_table_id, @@ -235,6 +236,7 @@ def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): chunk_coord = np.array(list(chunk_coord), dtype=np.int) edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype) + put_chunk_edges(None, chunk_coord, edge_dict, 17) mapping = collect_agglomeration_data(im, chunk_coord) active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 79a302bc9..1c7b3089e 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -139,8 +139,8 @@ def _get_edges(edge_type: str) -> Union[bytes, ChunkEdges.Edges]: return edgesMessage chunkEdgesMessage = ChunkEdges() - chunkEdgesMessage.inChunk = _get_edges("in") - chunkEdgesMessage.betweenChunk = _get_edges("between") + chunkEdgesMessage.inChunk.CopyFrom(_get_edges("in")) + chunkEdgesMessage.betweenChunk.CopyFrom(_get_edges("between")) chunkEdgesMessage.crossChunk = _get_edges("cross") cctx = zstd.ZstdCompressor(level=compression_level) From 438054e6998684908865c34d4eb101364e7033c1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 1 Aug 2019 10:53:41 -0400 Subject: [PATCH 0071/1097] wip: read and write edges complete --- pychunkedgraph/backend/chunkedgraph.py | 18 +++++++++--------- pychunkedgraph/io/edge_storage.py | 1 + 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index a12cf4ed9..3a9ed321a 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -45,6 +45,7 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple +from pychunkedgraph.io.edge_storage import get_chunk_edges HOME = os.path.expanduser("~") N_DIGITS_UINT64 = len(str(np.iinfo(np.uint64).max)) @@ -3078,11 +3079,12 @@ def _get_subgraph_layer2_edges(node_ids) -> \ n_child_ids, this_n_threads)) return edges, affinities, areas - - - def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), + + + def get_subgraph_edges_v2(self, edges_dir, + offset = np.array([105, 54, 6]), this_n_threads = 4, - cv_threads = 20, + cv_threads = 1, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3092,7 +3094,7 @@ def get_subgraph_edges_v2(self, offset = np.array([105, 54, 6]), def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( - 'testing_ignore_this', + edges_dir, [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], cv_threads) @@ -3115,9 +3117,6 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] - # print(f'chunks: {len(chunks)}') - # print(f'threads: {this_n_threads}') - timings['reading_edges'] = time.time() edge_infos = mu.multithread_func( _get_subgraph_layer2_edges, @@ -3146,7 +3145,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ # print(f'affinities: {len(affinities)}') # print(f'areas: {len(areas)}') - # return edges, affinities, areas + # return edges, affinities, areas + def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 1c7b3089e..263af5b3a 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -118,6 +118,7 @@ def _get_edges(edge_type: str) -> Union[bytes, ChunkEdges.Edges]: # arr1 = [1, 2, 3] # arr2 = [4, 5, 6] # edges = [[1,4],[2,5],[3,6]] + # this is faster than numpy.dstack edges = np.concatenate( [ chunk_edges[edge_type]["sv1"][:, None], From 88d7b8646b1d8691a054bb19a4a72f02488961c0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 1 Aug 2019 14:23:39 -0400 Subject: [PATCH 0072/1097] wip --- pychunkedgraph/backend/chunkedgraph_init.py | 412 ++++++++++++++++++++ 1 file changed, 412 insertions(+) create mode 100644 pychunkedgraph/backend/chunkedgraph_init.py diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py new file mode 100644 index 000000000..1b1c517ba --- /dev/null +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -0,0 +1,412 @@ +import time +import datetime +import collections + +import numpy as np +import pytz + +from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes +from pychunkedgraph.backend import flatgraph_utils +from pychunkedgraph.backend.chunkedgraph_utils import ( + compute_indices_pandas, + get_google_compatible_time_stamp, +) + +from typing import ( + Any, + Dict, + Iterable, + List, + Optional, + Sequence, + Tuple, + Union, + NamedTuple, +) + +UTC = pytz.UTC + + +def add_atomic_edges_in_chunks( + self, + edge_id_dict: dict, + edge_aff_dict: dict, + edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None, +): + """ Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk + + Alle edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime + """ + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) + + edge_id_keys = [ + "in_connected", + "in_disconnected", + "cross", + "between_connected", + "between_disconnected", + ] + edge_aff_keys = [ + "in_connected", + "in_disconnected", + "between_connected", + "between_disconnected", + ] + + # Check if keys exist and include an empty array if not + n_edge_ids = 0 + chunk_id = None + for edge_id_key in edge_id_keys: + if not edge_id_key in edge_id_dict: + empty_edges = np.array([], dtype=np.uint64).reshape(0, 2) + edge_id_dict[edge_id_key] = empty_edges + else: + n_edge_ids += len(edge_id_dict[edge_id_key]) + + if len(edge_id_dict[edge_id_key]) > 0: + node_id = edge_id_dict[edge_id_key][0, 0] + chunk_id = self.get_chunk_id(node_id) + + for edge_aff_key in edge_aff_keys: + if not edge_aff_key in edge_aff_dict: + edge_aff_dict[edge_aff_key] = np.array([], dtype=np.float32) + + time_start = time.time() + + # Catch trivial case + if n_edge_ids == 0 and len(isolated_node_ids) == 0: + return 0 + + # Make parent id creation easier + if chunk_id is None: + chunk_id = self.get_chunk_id(isolated_node_ids[0]) + + chunk_id_c = self.get_chunk_coordinates(chunk_id) + parent_chunk_id = self.get_chunk_id( + layer=2, x=chunk_id_c[0], y=chunk_id_c[1], z=chunk_id_c[2] + ) + + # Get connected component within the chunk + chunk_node_ids = np.concatenate( + [ + isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), + np.unique(edge_id_dict["cross"][:, 0]), + np.unique(edge_id_dict["between_connected"][:, 0]), + np.unique(edge_id_dict["between_disconnected"][:, 0]), + ] + ) + + chunk_node_ids = np.unique(chunk_node_ids) + + node_chunk_ids = np.array( + [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64 + ) + + u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, return_counts=True) + if len(u_node_chunk_ids) > 1: + raise Exception( + "%d: %d chunk ids found in node id list. " + "Some edges might be in the wrong order. " + "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), + c_node_chunk_ids, + ) + + add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T + edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), add_edge_ids]) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True + ) + + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + + # Add rows for nodes that are in this chunk + # a connected component at a time + node_c = 0 # Just a counter for the log / speed measurement + + n_ccs = len(ccs) + + parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + time_start = time.time() + + time_dict = collections.defaultdict(list) + + time_start_1 = time.time() + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + # Circumvent datatype issues + + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + + time_dict["sparse_indices"].append(time.time() - time_start_1) + + rows = [] + + for i_cc, cc in enumerate(ccs): + node_ids = unique_graph_ids[cc] + + u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) + + if len(u_chunk_ids) > 1: + self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + raise Exception() + + # Create parent id + parent_id = parent_ids[i_cc] + + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + + # Add rows for nodes that are in this chunk + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected + time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][ + remapping["in_connected"][node_id] + ] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][ + remapping["in_disconnected"][node_id] + ] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][ + row_ids, inv_column_ids + ] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][ + remapping["between_connected"][node_id] + ] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate( + [ + connected_ids, + edge_id_dict["between_connected"][row_ids, inv_column_ids], + ] + ) + connected_affs = np.concatenate( + [connected_affs, edge_aff_dict["between_connected"][row_ids]] + ) + connected_areas = np.concatenate( + [connected_areas, edge_area_dict["between_connected"][row_ids]] + ) + + parent_cross_edges = np.concatenate( + [parent_cross_edges, edge_id_dict["between_connected"][row_ids]] + ) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][ + remapping["between_disconnected"][node_id] + ] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate( + [ + disconnected_ids, + edge_id_dict["between_disconnected"][row_ids, inv_column_ids], + ] + ) + disconnected_affs = np.concatenate( + [disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]] + ) + disconnected_areas = np.concatenate( + [ + disconnected_areas, + edge_area_dict["between_disconnected"][row_ids], + ] + ) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][ + remapping["cross"][node_id] + ] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate( + [connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]] + ) + connected_affs = np.concatenate( + [connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)] + ) + connected_areas = np.concatenate( + [connected_areas, np.ones((len(row_ids)), dtype=np.uint64)] + ) + + parent_cross_edges = np.concatenate( + [parent_cross_edges, edge_id_dict["cross"][row_ids]] + ) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) + + val_dict = { + column_keys.Connectivity.Partner: partners, + column_keys.Connectivity.Affinity: affinities, + column_keys.Connectivity.Area: areas, + column_keys.Connectivity.Connected: connected, + column_keys.Hierarchy.Parent: parent_id, + } + + rows.append( + self.mutate_row( + serializers.serialize_uint64(node_id), + val_dict, + time_stamp=time_stamp, + ) + ) + node_c += 1 + time_dict["creating_lv1_row"].append(time.time() - time_start_2) + + time_start_1 = time.time() + # Create parent node + rows.append( + self.mutate_row( + serializers.serialize_uint64(parent_id), + {column_keys.Hierarchy.Child: node_ids}, + time_stamp=time_stamp, + ) + ) + + time_dict["creating_lv2_row"].append(time.time() - time_start_1) + time_start_1 = time.time() + + cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {} + for cc_layer in u_cce_layers: + layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + + if len(layer_cross_edges) > 0: + val_dict[ + column_keys.Connectivity.CrossChunkEdge[cc_layer] + ] = layer_cross_edges + + if len(val_dict) > 0: + rows.append( + self.mutate_row( + serializers.serialize_uint64(parent_id), + val_dict, + time_stamp=time_stamp, + ) + ) + node_c += 1 + + time_dict["adding_cross_edges"].append(time.time() - time_start_1) + + if len(rows) > 100000: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if len(rows) > 0: + time_start_1 = time.time() + self.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if verbose: + self.logger.debug( + "Time creating rows: %.3fs for %d ccs with %d nodes" + % (time.time() - time_start, len(ccs), node_c) + ) + + for k in time_dict.keys(): + self.logger.debug( + "%s -- %.3fms for %d instances -- avg = %.3fms" + % ( + k, + np.sum(time_dict[k]) * 1000, + len(time_dict[k]), + np.mean(time_dict[k]) * 1000, + ) + ) + From e67af63a0f652a8e52110cee0d05ba9e72085cb3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 1 Aug 2019 14:54:41 -0400 Subject: [PATCH 0073/1097] wip: change protobuf mesasge definition --- pychunkedgraph/io/edge_storage.py | 53 ++++---- pychunkedgraph/io/protobuf/chunkEdges.proto | 25 ++-- pychunkedgraph/io/protobuf/chunkEdges_pb2.py | 126 +++++++++++++------ 3 files changed, 129 insertions(+), 75 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 263af5b3a..bc1f9e63b 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -11,7 +11,7 @@ from cloudvolume import Storage from cloudvolume.storage import SimpleStorage -from .protobuf.chunkEdges_pb2 import ChunkEdges +from .protobuf.chunkEdges_pb2 import Edge, Edges, ChunkEdges def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -23,30 +23,30 @@ def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarra """ def _get_edges( - edge_type: str, edgesMessage: Union[ChunkEdges, ChunkEdges.Edges] + edge_type: str, edges_message: Union[ChunkEdges, ChunkEdges.Edges] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: if edge_type == "cross": - edges = np.frombuffer(edgesMessage.crossChunk, dtype=" Tuple[np.ndarray, np.ndarray, np.ndarray]: """ :param edges_dir: cloudvolume storage path @@ -64,12 +64,13 @@ def get_chunk_edges( :param chunks_coordinates: :type np.ndarray: :return: edges, affinities, areas + :param cv_threads: number of threads for cloudvolume Storage + :type int: :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ fnames = [] for chunk_coords in chunks_coordinates: chunk_str = "_".join(str(coord) for coord in chunk_coords) - # TODO change filename format # filename format - edges_x_y_z.serialization.compression fnames.append(f"edges_{chunk_str}.proto.zst") @@ -106,8 +107,8 @@ def put_chunk_edges( :type str: :param chunk_coordinates: chunk coords x,y,z :type np.ndarray: - :param chunk_edges: np.array of [supervoxel1, supervoxel2] - :type dict: chunk_edges with keys "in", "cross", "between" + :param chunk_edges: chunk_edges with keys "in", "cross", "between" + :type dict: :param compression_level: zstandard compression level (1-22, higher - better ratio) :type int: :return None: @@ -130,22 +131,22 @@ def _get_edges(edge_type: str) -> Union[bytes, ChunkEdges.Edges]: if edge_type == "cross": return edges_bytes - edgesMessage = ChunkEdges.Edges() - edgesMessage.edgeList = edges_bytes - edgesMessage.affinities = ( + edges_message = ChunkEdges.Edges() + edges_message.edgeList = edges_bytes + edges_message.affinities = ( chunk_edges[edge_type]["aff"].astype(np.float32).tobytes() ) - edgesMessage.areas = chunk_edges[edge_type]["area"].astype(np.uint64).tobytes() + edges_message.areas = chunk_edges[edge_type]["area"].astype(np.uint64).tobytes() - return edgesMessage + return edges_message - chunkEdgesMessage = ChunkEdges() - chunkEdgesMessage.inChunk.CopyFrom(_get_edges("in")) - chunkEdgesMessage.betweenChunk.CopyFrom(_get_edges("between")) - chunkEdgesMessage.crossChunk = _get_edges("cross") + chunk_edges_message = ChunkEdges() + chunk_edges_message.inChunk.CopyFrom(_get_edges("in")) + chunk_edges_message.betweenChunk.CopyFrom(_get_edges("between")) + chunk_edges_message.crossChunk = _get_edges("cross") cctx = zstd.ZstdCompressor(level=compression_level) - compressed_proto = cctx.compress(chunkEdgesMessage.SerializeToString()) + compressed_proto = cctx.compress(chunk_edges_message.SerializeToString()) chunk_str = "_".join(str(coord) for coord in chunk_coordinates) # filename format - edges_x_y_z.serialization.compression diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index 4766753c2..c2778add1 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -1,15 +1,20 @@ syntax = "proto3"; -package test; +package edges; + +message Edge { + fixed64 supervoxel_1 = 1; + fixed64 supervoxel_2 = 2; + float affinity = 3; + uint64 area = 4; +} + +message Edges { + repeated Edge edge = 1; +} message ChunkEdges { - message Edges { - bytes edgeList = 1; - bytes affinities = 2; - bytes areas = 3; - } - - Edges inChunk = 4; - bytes crossChunk = 5; - Edges betweenChunk = 6; + Edges in_chunk = 1; + Edges cross_chunk = 2; + Edges between_chunk = 3; } \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py index 776c620cf..35f85e097 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py +++ b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py @@ -17,40 +17,47 @@ DESCRIPTOR = _descriptor.FileDescriptor( name='chunkEdges.proto', - package='test', + package='edges', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x04test\"\xb5\x01\n\nChunkEdges\x12\'\n\x07inChunk\x18\x04 \x01(\x0b\x32\x16.test.ChunkEdges.Edges\x12\x12\n\ncrossChunk\x18\x05 \x01(\x0c\x12,\n\x0c\x62\x65tweenChunk\x18\x06 \x01(\x0b\x32\x16.test.ChunkEdges.Edges\x1a<\n\x05\x45\x64ges\x12\x10\n\x08\x65\x64geList\x18\x01 \x01(\x0c\x12\x12\n\naffinities\x18\x02 \x01(\x0c\x12\r\n\x05\x61reas\x18\x03 \x01(\x0c\x62\x06proto3') + serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"R\n\x04\x45\x64ge\x12\x14\n\x0csupervoxel_1\x18\x01 \x01(\x06\x12\x14\n\x0csupervoxel_2\x18\x02 \x01(\x06\x12\x10\n\x08\x61\x66\x66inity\x18\x03 \x01(\x02\x12\x0c\n\x04\x61rea\x18\x04 \x01(\x04\"\"\n\x05\x45\x64ges\x12\x19\n\x04\x65\x64ge\x18\x01 \x03(\x0b\x32\x0b.edges.Edge\"t\n\nChunkEdges\x12\x1e\n\x08in_chunk\x18\x01 \x01(\x0b\x32\x0c.edges.Edges\x12!\n\x0b\x63ross_chunk\x18\x02 \x01(\x0b\x32\x0c.edges.Edges\x12#\n\rbetween_chunk\x18\x03 \x01(\x0b\x32\x0c.edges.Edgesb\x06proto3') ) -_CHUNKEDGES_EDGES = _descriptor.Descriptor( - name='Edges', - full_name='test.ChunkEdges.Edges', +_EDGE = _descriptor.Descriptor( + name='Edge', + full_name='edges.Edge', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='edgeList', full_name='test.ChunkEdges.Edges.edgeList', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + name='supervoxel_1', full_name='edges.Edge.supervoxel_1', index=0, + number=1, type=6, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='supervoxel_2', full_name='edges.Edge.supervoxel_2', index=1, + number=2, type=6, cpp_type=4, label=1, + has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='affinities', full_name='test.ChunkEdges.Edges.affinities', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + name='affinity', full_name='edges.Edge.affinity', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='areas', full_name='test.ChunkEdges.Edges.areas', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + name='area', full_name='edges.Edge.area', index=3, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -66,34 +73,66 @@ extension_ranges=[], oneofs=[ ], - serialized_start=148, - serialized_end=208, + serialized_start=27, + serialized_end=109, ) + +_EDGES = _descriptor.Descriptor( + name='Edges', + full_name='edges.Edges', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='edge', full_name='edges.Edges.edge', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=111, + serialized_end=145, +) + + _CHUNKEDGES = _descriptor.Descriptor( name='ChunkEdges', - full_name='test.ChunkEdges', + full_name='edges.ChunkEdges', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='inChunk', full_name='test.ChunkEdges.inChunk', index=0, - number=4, type=11, cpp_type=10, label=1, + name='in_chunk', full_name='edges.ChunkEdges.in_chunk', index=0, + number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='crossChunk', full_name='test.ChunkEdges.crossChunk', index=1, - number=5, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), + name='cross_chunk', full_name='edges.ChunkEdges.cross_chunk', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='betweenChunk', full_name='test.ChunkEdges.betweenChunk', index=2, - number=6, type=11, cpp_type=10, label=1, + name='between_chunk', full_name='edges.ChunkEdges.between_chunk', index=2, + number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -101,7 +140,7 @@ ], extensions=[ ], - nested_types=[_CHUNKEDGES_EDGES, ], + nested_types=[], enum_types=[ ], serialized_options=None, @@ -110,30 +149,39 @@ extension_ranges=[], oneofs=[ ], - serialized_start=27, - serialized_end=208, + serialized_start=147, + serialized_end=263, ) -_CHUNKEDGES_EDGES.containing_type = _CHUNKEDGES -_CHUNKEDGES.fields_by_name['inChunk'].message_type = _CHUNKEDGES_EDGES -_CHUNKEDGES.fields_by_name['betweenChunk'].message_type = _CHUNKEDGES_EDGES +_EDGES.fields_by_name['edge'].message_type = _EDGE +_CHUNKEDGES.fields_by_name['in_chunk'].message_type = _EDGES +_CHUNKEDGES.fields_by_name['cross_chunk'].message_type = _EDGES +_CHUNKEDGES.fields_by_name['between_chunk'].message_type = _EDGES +DESCRIPTOR.message_types_by_name['Edge'] = _EDGE +DESCRIPTOR.message_types_by_name['Edges'] = _EDGES DESCRIPTOR.message_types_by_name['ChunkEdges'] = _CHUNKEDGES _sym_db.RegisterFileDescriptor(DESCRIPTOR) -ChunkEdges = _reflection.GeneratedProtocolMessageType('ChunkEdges', (_message.Message,), { +Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), { + 'DESCRIPTOR' : _EDGE, + '__module__' : 'chunkEdges_pb2' + # @@protoc_insertion_point(class_scope:edges.Edge) + }) +_sym_db.RegisterMessage(Edge) - 'Edges' : _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { - 'DESCRIPTOR' : _CHUNKEDGES_EDGES, - '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:test.ChunkEdges.Edges) - }) - , +Edges = _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { + 'DESCRIPTOR' : _EDGES, + '__module__' : 'chunkEdges_pb2' + # @@protoc_insertion_point(class_scope:edges.Edges) + }) +_sym_db.RegisterMessage(Edges) + +ChunkEdges = _reflection.GeneratedProtocolMessageType('ChunkEdges', (_message.Message,), { 'DESCRIPTOR' : _CHUNKEDGES, '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:test.ChunkEdges) + # @@protoc_insertion_point(class_scope:edges.ChunkEdges) }) _sym_db.RegisterMessage(ChunkEdges) -_sym_db.RegisterMessage(ChunkEdges.Edges) # @@protoc_insertion_point(module_scope) From dacf2ac17473197db68ccdedf343676efa170cb5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 1 Aug 2019 16:29:03 -0400 Subject: [PATCH 0074/1097] wip --- pychunkedgraph/io/edge_storage.py | 53 ++++++++-------- pychunkedgraph/io/protobuf/chunkEdges.proto | 24 +++----- pychunkedgraph/io/protobuf/chunkEdges_pb2.py | 64 ++++---------------- 3 files changed, 48 insertions(+), 93 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index bc1f9e63b..263af5b3a 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -11,7 +11,7 @@ from cloudvolume import Storage from cloudvolume.storage import SimpleStorage -from .protobuf.chunkEdges_pb2 import Edge, Edges, ChunkEdges +from .protobuf.chunkEdges_pb2 import ChunkEdges def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -23,30 +23,30 @@ def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarra """ def _get_edges( - edge_type: str, edges_message: Union[ChunkEdges, ChunkEdges.Edges] + edge_type: str, edgesMessage: Union[ChunkEdges, ChunkEdges.Edges] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: if edge_type == "cross": - edges = np.frombuffer(edges_message.crossChunk, dtype=" Tuple[np.ndarray, np.ndarray, np.ndarray]: """ :param edges_dir: cloudvolume storage path @@ -64,13 +64,12 @@ def get_chunk_edges( :param chunks_coordinates: :type np.ndarray: :return: edges, affinities, areas - :param cv_threads: number of threads for cloudvolume Storage - :type int: :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ fnames = [] for chunk_coords in chunks_coordinates: chunk_str = "_".join(str(coord) for coord in chunk_coords) + # TODO change filename format # filename format - edges_x_y_z.serialization.compression fnames.append(f"edges_{chunk_str}.proto.zst") @@ -107,8 +106,8 @@ def put_chunk_edges( :type str: :param chunk_coordinates: chunk coords x,y,z :type np.ndarray: - :param chunk_edges: chunk_edges with keys "in", "cross", "between" - :type dict: + :param chunk_edges: np.array of [supervoxel1, supervoxel2] + :type dict: chunk_edges with keys "in", "cross", "between" :param compression_level: zstandard compression level (1-22, higher - better ratio) :type int: :return None: @@ -131,22 +130,22 @@ def _get_edges(edge_type: str) -> Union[bytes, ChunkEdges.Edges]: if edge_type == "cross": return edges_bytes - edges_message = ChunkEdges.Edges() - edges_message.edgeList = edges_bytes - edges_message.affinities = ( + edgesMessage = ChunkEdges.Edges() + edgesMessage.edgeList = edges_bytes + edgesMessage.affinities = ( chunk_edges[edge_type]["aff"].astype(np.float32).tobytes() ) - edges_message.areas = chunk_edges[edge_type]["area"].astype(np.uint64).tobytes() + edgesMessage.areas = chunk_edges[edge_type]["area"].astype(np.uint64).tobytes() - return edges_message + return edgesMessage - chunk_edges_message = ChunkEdges() - chunk_edges_message.inChunk.CopyFrom(_get_edges("in")) - chunk_edges_message.betweenChunk.CopyFrom(_get_edges("between")) - chunk_edges_message.crossChunk = _get_edges("cross") + chunkEdgesMessage = ChunkEdges() + chunkEdgesMessage.inChunk.CopyFrom(_get_edges("in")) + chunkEdgesMessage.betweenChunk.CopyFrom(_get_edges("between")) + chunkEdgesMessage.crossChunk = _get_edges("cross") cctx = zstd.ZstdCompressor(level=compression_level) - compressed_proto = cctx.compress(chunk_edges_message.SerializeToString()) + compressed_proto = cctx.compress(chunkEdgesMessage.SerializeToString()) chunk_str = "_".join(str(coord) for coord in chunk_coordinates) # filename format - edges_x_y_z.serialization.compression diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index c2778add1..3f5ed3b95 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -2,19 +2,15 @@ syntax = "proto3"; package edges; -message Edge { - fixed64 supervoxel_1 = 1; - fixed64 supervoxel_2 = 2; - float affinity = 3; - uint64 area = 4; -} - -message Edges { - repeated Edge edge = 1; -} - message ChunkEdges { - Edges in_chunk = 1; - Edges cross_chunk = 2; - Edges between_chunk = 3; + message Edges { + bytes node_ids1 = 1; + bytes node_ids2 = 1; + bytes affinities = 2; + bytes areas = 3; + } + + Edges inChunk = 4; + bytes crossChunk = 5; + Edges betweenChunk = 6; } \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py index 35f85e097..2c411237b 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py +++ b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py @@ -20,7 +20,7 @@ package='edges', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"R\n\x04\x45\x64ge\x12\x14\n\x0csupervoxel_1\x18\x01 \x01(\x06\x12\x14\n\x0csupervoxel_2\x18\x02 \x01(\x06\x12\x10\n\x08\x61\x66\x66inity\x18\x03 \x01(\x02\x12\x0c\n\x04\x61rea\x18\x04 \x01(\x04\"\"\n\x05\x45\x64ges\x12\x19\n\x04\x65\x64ge\x18\x01 \x03(\x0b\x32\x0b.edges.Edge\"t\n\nChunkEdges\x12\x1e\n\x08in_chunk\x18\x01 \x01(\x0b\x32\x0c.edges.Edges\x12!\n\x0b\x63ross_chunk\x18\x02 \x01(\x0b\x32\x0c.edges.Edges\x12#\n\rbetween_chunk\x18\x03 \x01(\x0b\x32\x0c.edges.Edgesb\x06proto3') + serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"R\n\x04\x45\x64ge\x12\x14\n\x0csupervoxel_1\x18\x01 \x01(\x06\x12\x14\n\x0csupervoxel_2\x18\x02 \x01(\x06\x12\x10\n\x08\x61\x66\x66inity\x18\x03 \x01(\x02\x12\x0c\n\x04\x61rea\x18\x04 \x01(\x04\"q\n\nChunkEdges\x12\x1d\n\x08in_chunk\x18\x01 \x03(\x0b\x32\x0b.edges.Edge\x12 \n\x0b\x63ross_chunk\x18\x02 \x03(\x0b\x32\x0b.edges.Edge\x12\"\n\rbetween_chunk\x18\x03 \x03(\x0b\x32\x0b.edges.Edgeb\x06proto3') ) @@ -78,37 +78,6 @@ ) -_EDGES = _descriptor.Descriptor( - name='Edges', - full_name='edges.Edges', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='edge', full_name='edges.Edges.edge', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=111, - serialized_end=145, -) - - _CHUNKEDGES = _descriptor.Descriptor( name='ChunkEdges', full_name='edges.ChunkEdges', @@ -118,22 +87,22 @@ fields=[ _descriptor.FieldDescriptor( name='in_chunk', full_name='edges.ChunkEdges.in_chunk', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cross_chunk', full_name='edges.ChunkEdges.cross_chunk', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='between_chunk', full_name='edges.ChunkEdges.between_chunk', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -149,16 +118,14 @@ extension_ranges=[], oneofs=[ ], - serialized_start=147, - serialized_end=263, + serialized_start=111, + serialized_end=224, ) -_EDGES.fields_by_name['edge'].message_type = _EDGE -_CHUNKEDGES.fields_by_name['in_chunk'].message_type = _EDGES -_CHUNKEDGES.fields_by_name['cross_chunk'].message_type = _EDGES -_CHUNKEDGES.fields_by_name['between_chunk'].message_type = _EDGES +_CHUNKEDGES.fields_by_name['in_chunk'].message_type = _EDGE +_CHUNKEDGES.fields_by_name['cross_chunk'].message_type = _EDGE +_CHUNKEDGES.fields_by_name['between_chunk'].message_type = _EDGE DESCRIPTOR.message_types_by_name['Edge'] = _EDGE -DESCRIPTOR.message_types_by_name['Edges'] = _EDGES DESCRIPTOR.message_types_by_name['ChunkEdges'] = _CHUNKEDGES _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -169,13 +136,6 @@ }) _sym_db.RegisterMessage(Edge) -Edges = _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { - 'DESCRIPTOR' : _EDGES, - '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:edges.Edges) - }) -_sym_db.RegisterMessage(Edges) - ChunkEdges = _reflection.GeneratedProtocolMessageType('ChunkEdges', (_message.Message,), { 'DESCRIPTOR' : _CHUNKEDGES, '__module__' : 'chunkEdges_pb2' From 2fa546fda833b9e8f16f85affbbb2b69c92e452e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 1 Aug 2019 16:56:38 -0400 Subject: [PATCH 0075/1097] change protobuf def --- pychunkedgraph/io/protobuf/chunkEdges.proto | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index 3f5ed3b95..d1a91ad53 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -4,13 +4,13 @@ package edges; message ChunkEdges { message Edges { - bytes node_ids1 = 1; - bytes node_ids2 = 1; - bytes affinities = 2; - bytes areas = 3; + fixed64 node_ids1 = 1; + fixed64 node_ids2 = 2; + float affinities = 3; + uint64 areas = 4; } - Edges inChunk = 4; - bytes crossChunk = 5; - Edges betweenChunk = 6; + Edges inChunk = 5; + Edges crossChunk = 6; + Edges betweenChunk = 7; } \ No newline at end of file From a9ac35cc2a22ae053318d54f42bb962871901562 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 2 Aug 2019 10:30:01 -0400 Subject: [PATCH 0076/1097] wip: write test - serilaize edges as 4 arrays --- pychunkedgraph/io/edge_storage.py | 60 +++++++++--------- pychunkedgraph/io/protobuf/chunkEdges.proto | 20 +++--- pychunkedgraph/io/protobuf/chunkEdges_pb2.py | 66 ++++++++++---------- 3 files changed, 71 insertions(+), 75 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 263af5b3a..01d055444 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -11,7 +11,9 @@ from cloudvolume import Storage from cloudvolume.storage import SimpleStorage -from .protobuf.chunkEdges_pb2 import ChunkEdges + +from ..backend.utils import basetypes +from .protobuf.chunkEdges_pb2 import Edges, ChunkEdges def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -23,7 +25,7 @@ def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarra """ def _get_edges( - edge_type: str, edgesMessage: Union[ChunkEdges, ChunkEdges.Edges] + edge_type: str, edgesMessage: Edges ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: if edge_type == "cross": edges = np.frombuffer(edgesMessage.crossChunk, dtype=" 1: st = Storage(edges_dir, n_threads=cv_threads) + else: + st = SimpleStorage(edges_dir) files = [] with st: @@ -98,7 +101,7 @@ def get_chunk_edges( def put_chunk_edges( edges_dir: str, chunk_coordinates: np.ndarray, - chunk_edges: dict, + chunk_edges_raw: dict, compression_level: int, ) -> None: """ @@ -106,43 +109,36 @@ def put_chunk_edges( :type str: :param chunk_coordinates: chunk coords x,y,z :type np.ndarray: - :param chunk_edges: np.array of [supervoxel1, supervoxel2] - :type dict: chunk_edges with keys "in", "cross", "between" + :param chunk_edges_raw: chunk_edges_raw with keys "in", "cross", "between" + :type dict: :param compression_level: zstandard compression level (1-22, higher - better ratio) :type int: :return None: """ - def _get_edges(edge_type: str) -> Union[bytes, ChunkEdges.Edges]: - # convert two numpy arrays to edge list - # arr1 = [1, 2, 3] - # arr2 = [4, 5, 6] - # edges = [[1,4],[2,5],[3,6]] - # this is faster than numpy.dstack - edges = np.concatenate( - [ - chunk_edges[edge_type]["sv1"][:, None], - chunk_edges[edge_type]["sv2"][:, None], - ], - axis=1, - ) - edges_bytes = edges.astype(np.uint64).tobytes() - if edge_type == "cross": - return edges_bytes + def _get_edges(edge_type: str) -> Edges: - edgesMessage = ChunkEdges.Edges() - edgesMessage.edgeList = edges_bytes - edgesMessage.affinities = ( - chunk_edges[edge_type]["aff"].astype(np.float32).tobytes() - ) - edgesMessage.areas = chunk_edges[edge_type]["area"].astype(np.uint64).tobytes() + edges = Edges() + edges.node_ids1[:] = chunk_edges_raw[edge_type]["sv1"] + edges.node_ids2[:] = chunk_edges_raw[edge_type]["sv2"] + + n_edges = len(chunk_edges_raw[edge_type]["sv1"]) + + if edge_type == "cross": + edges.affinities[:] = float("inf") * np.ones( + n_edges, basetypes.EDGE_AFFINITY + ) + edges.areas[:] = np.ones(n_edges, basetypes.EDGE_AREA) + else: + edges.affinities[:] = chunk_edges_raw[edge_type]["aff"].astype(np.float32) + edges.areas[:] = chunk_edges_raw[edge_type]["area"].astype(np.uint64) - return edgesMessage + return edges chunkEdgesMessage = ChunkEdges() - chunkEdgesMessage.inChunk.CopyFrom(_get_edges("in")) - chunkEdgesMessage.betweenChunk.CopyFrom(_get_edges("between")) - chunkEdgesMessage.crossChunk = _get_edges("cross") + chunkEdgesMessage.in_chunk.CopyFrom(_get_edges("in")) + chunkEdgesMessage.between_chunk.CopyFrom(_get_edges("between")) + chunkEdgesMessage.cross_chunk.CopyFrom(_get_edges("cross")) cctx = zstd.ZstdCompressor(level=compression_level) compressed_proto = cctx.compress(chunkEdgesMessage.SerializeToString()) diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index d1a91ad53..ab5f6bec7 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -2,15 +2,15 @@ syntax = "proto3"; package edges; -message ChunkEdges { - message Edges { - fixed64 node_ids1 = 1; - fixed64 node_ids2 = 2; - float affinities = 3; - uint64 areas = 4; - } +message Edges { + repeated fixed64 node_ids1 = 1; + repeated fixed64 node_ids2 = 2; + repeated float affinities = 3; + repeated uint64 areas = 4; +} - Edges inChunk = 5; - Edges crossChunk = 6; - Edges betweenChunk = 7; +message ChunkEdges { + Edges in_chunk = 1; + Edges cross_chunk = 2; + Edges between_chunk = 3; } \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py index 2c411237b..28e401b22 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py +++ b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py @@ -20,44 +20,44 @@ package='edges', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"R\n\x04\x45\x64ge\x12\x14\n\x0csupervoxel_1\x18\x01 \x01(\x06\x12\x14\n\x0csupervoxel_2\x18\x02 \x01(\x06\x12\x10\n\x08\x61\x66\x66inity\x18\x03 \x01(\x02\x12\x0c\n\x04\x61rea\x18\x04 \x01(\x04\"q\n\nChunkEdges\x12\x1d\n\x08in_chunk\x18\x01 \x03(\x0b\x32\x0b.edges.Edge\x12 \n\x0b\x63ross_chunk\x18\x02 \x03(\x0b\x32\x0b.edges.Edge\x12\"\n\rbetween_chunk\x18\x03 \x03(\x0b\x32\x0b.edges.Edgeb\x06proto3') + serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"P\n\x05\x45\x64ges\x12\x11\n\tnode_ids1\x18\x01 \x03(\x06\x12\x11\n\tnode_ids2\x18\x02 \x03(\x06\x12\x12\n\naffinities\x18\x03 \x03(\x02\x12\r\n\x05\x61reas\x18\x04 \x03(\x04\"t\n\nChunkEdges\x12\x1e\n\x08in_chunk\x18\x01 \x01(\x0b\x32\x0c.edges.Edges\x12!\n\x0b\x63ross_chunk\x18\x02 \x01(\x0b\x32\x0c.edges.Edges\x12#\n\rbetween_chunk\x18\x03 \x01(\x0b\x32\x0c.edges.Edgesb\x06proto3') ) -_EDGE = _descriptor.Descriptor( - name='Edge', - full_name='edges.Edge', +_EDGES = _descriptor.Descriptor( + name='Edges', + full_name='edges.Edges', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='supervoxel_1', full_name='edges.Edge.supervoxel_1', index=0, - number=1, type=6, cpp_type=4, label=1, - has_default_value=False, default_value=0, + name='node_ids1', full_name='edges.Edges.node_ids1', index=0, + number=1, type=6, cpp_type=4, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='supervoxel_2', full_name='edges.Edge.supervoxel_2', index=1, - number=2, type=6, cpp_type=4, label=1, - has_default_value=False, default_value=0, + name='node_ids2', full_name='edges.Edges.node_ids2', index=1, + number=2, type=6, cpp_type=4, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='affinity', full_name='edges.Edge.affinity', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), + name='affinities', full_name='edges.Edges.affinities', index=2, + number=3, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='area', full_name='edges.Edge.area', index=3, - number=4, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, + name='areas', full_name='edges.Edges.areas', index=3, + number=4, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -74,7 +74,7 @@ oneofs=[ ], serialized_start=27, - serialized_end=109, + serialized_end=107, ) @@ -87,22 +87,22 @@ fields=[ _descriptor.FieldDescriptor( name='in_chunk', full_name='edges.ChunkEdges.in_chunk', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cross_chunk', full_name='edges.ChunkEdges.cross_chunk', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='between_chunk', full_name='edges.ChunkEdges.between_chunk', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -118,23 +118,23 @@ extension_ranges=[], oneofs=[ ], - serialized_start=111, - serialized_end=224, + serialized_start=109, + serialized_end=225, ) -_CHUNKEDGES.fields_by_name['in_chunk'].message_type = _EDGE -_CHUNKEDGES.fields_by_name['cross_chunk'].message_type = _EDGE -_CHUNKEDGES.fields_by_name['between_chunk'].message_type = _EDGE -DESCRIPTOR.message_types_by_name['Edge'] = _EDGE +_CHUNKEDGES.fields_by_name['in_chunk'].message_type = _EDGES +_CHUNKEDGES.fields_by_name['cross_chunk'].message_type = _EDGES +_CHUNKEDGES.fields_by_name['between_chunk'].message_type = _EDGES +DESCRIPTOR.message_types_by_name['Edges'] = _EDGES DESCRIPTOR.message_types_by_name['ChunkEdges'] = _CHUNKEDGES _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), { - 'DESCRIPTOR' : _EDGE, +Edges = _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { + 'DESCRIPTOR' : _EDGES, '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:edges.Edge) + # @@protoc_insertion_point(class_scope:edges.Edges) }) -_sym_db.RegisterMessage(Edge) +_sym_db.RegisterMessage(Edges) ChunkEdges = _reflection.GeneratedProtocolMessageType('ChunkEdges', (_message.Message,), { 'DESCRIPTOR' : _CHUNKEDGES, From 30569c580acfd8004d67dab3dd6642d1d7ace6bc Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 2 Aug 2019 15:59:23 -0400 Subject: [PATCH 0077/1097] wip: read write complete, address review comments --- pychunkedgraph/backend/chunkedgraph.py | 9 +- pychunkedgraph/io/edge_storage.py | 113 +++++++++---------- pychunkedgraph/io/protobuf/chunkEdges.proto | 8 +- pychunkedgraph/io/protobuf/chunkEdges_pb2.py | 18 +-- 4 files changed, 70 insertions(+), 78 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3a9ed321a..ced7c5f04 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3138,15 +3138,8 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ timings['total'] = time.time() - timings['total'] - return timings + return timings, edges, affinities, areas - # print(json.dumps(timings, default=str, indent=4)) - # print(f'edges: {len(edges)}') - # print(f'affinities: {len(affinities)}') - # print(f'areas: {len(areas)}') - - # return edges, affinities, areas - def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 01d055444..bec644d5c 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -1,6 +1,6 @@ """ Functions for reading and writing edges -to secondary storage with CloudVolume +to (slow) storage with CloudVolume """ import os @@ -24,70 +24,71 @@ def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarra :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ - def _get_edges( - edge_type: str, edgesMessage: Edges - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - if edge_type == "cross": - edges = np.frombuffer(edgesMessage.crossChunk, dtype=" Tuple[np.ndarray, np.ndarray, np.ndarray]: + supervoxel_ids1 = np.frombuffer(edges_message.node_ids1, basetypes.NODE_ID) + supervoxel_ids2 = np.frombuffer(edges_message.node_ids2, basetypes.NODE_ID) - chunkEdgesMessage = ChunkEdges() + edges = np.column_stack((supervoxel_ids1, supervoxel_ids2)) + affinities = np.frombuffer(edges_message.affinities, basetypes.EDGE_AFFINITY) + areas = np.frombuffer(edges_message.areas, basetypes.EDGE_AREA) + return edges, affinities, areas - zstdDecompressorObj = zstd.ZstdDecompressor().decompressobj() - file_content = zstdDecompressorObj.decompress(content) - chunkEdgesMessage.ParseFromString(file_content) + chunk_edges = ChunkEdges() + zstd_decompressor_obj = zstd.ZstdDecompressor().decompressobj() + file_content = zstd_decompressor_obj.decompress(content) + chunk_edges.ParseFromString(file_content) - in_edges, in_affinities, in_areas = _get_edges("in", chunkEdgesMessage.inChunk) - between_edges, between_affinities, between_areas = _get_edges( - "between", chunkEdgesMessage.betweenChunk - ) - cross_edges, cross_affinities, cross_areas = _get_edges("cross", chunkEdgesMessage) + # in, between and cross + in_edges, in_affinities, in_areas = _get_edges(chunk_edges.in_chunk) + bt_edges, bt_affinities, bt_areas = _get_edges(chunk_edges.between_chunk) + cx_edges, cx_affinities, cx_areas = _get_edges(chunk_edges.cross_chunk) - edges = np.concatenate([in_edges, between_edges, cross_edges]) - affinities = np.concatenate([in_affinities, between_affinities, cross_affinities]) - areas = np.concatenate([in_areas, between_areas, cross_areas]) + edges = np.concatenate([in_edges, bt_edges, cx_edges]) + affinities = np.concatenate([in_affinities, bt_affinities, cx_affinities]) + areas = np.concatenate([in_areas, bt_areas, cx_areas]) return edges, affinities, areas def get_chunk_edges( - edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads + edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads: int ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ :param edges_dir: cloudvolume storage path :type str: - :param chunks_coordinates: - :type np.ndarray: + :param chunks_coordinates: list of chunk coords for which to load edges + :type List[np.ndarray]: + :param cv_threads: cloudvolume storage client thread count + :type int: :return: edges, affinities, areas :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ fnames = [] for chunk_coords in chunks_coordinates: chunk_str = "_".join(str(coord) for coord in chunk_coords) - # TODO change filename format # filename format - edges_x_y_z.serialization.compression fnames.append(f"edges_{chunk_str}.proto.zst") - edges = np.array([], dtype=np.uint64).reshape(0, 2) - affinities = np.array([], dtype=np.float32) - areas = np.array([], dtype=np.uint64) + edges = np.array([], basetypes.NODE_ID).reshape(0, 2) + affinities = np.array([], basetypes.EDGE_AFFINITY) + areas = np.array([], basetypes.EDGE_AREA) + + st = ( + Storage(edges_dir, n_threads=cv_threads) + if cv_threads > 1 + else SimpleStorage(edges_dir) + ) - if cv_threads > 1: - st = Storage(edges_dir, n_threads=cv_threads) - else: - st = SimpleStorage(edges_dir) + print(type(st)) files = [] with st: files = st.get_files(fnames) for _file in files: + # cv error + if _file["error"]: + raise ValueError(_file["error"]) + # empty chunk if not _file["content"]: continue _edges, _affinities, _areas = _decompress_edges(_file["content"]) @@ -118,39 +119,37 @@ def put_chunk_edges( def _get_edges(edge_type: str) -> Edges: - edges = Edges() - edges.node_ids1[:] = chunk_edges_raw[edge_type]["sv1"] - edges.node_ids2[:] = chunk_edges_raw[edge_type]["sv2"] + edges_raw = chunk_edges_raw[edge_type] + + supervoxel_ids1 = edges_raw["sv1"] + supervoxel_ids2 = edges_raw["sv2"] - n_edges = len(chunk_edges_raw[edge_type]["sv1"]) + ones = np.ones(len(supervoxel_ids1)) + affinities = edges_raw.get("aff", float("inf") * ones) + areas = edges_raw.get("area", ones) - if edge_type == "cross": - edges.affinities[:] = float("inf") * np.ones( - n_edges, basetypes.EDGE_AFFINITY - ) - edges.areas[:] = np.ones(n_edges, basetypes.EDGE_AREA) - else: - edges.affinities[:] = chunk_edges_raw[edge_type]["aff"].astype(np.float32) - edges.areas[:] = chunk_edges_raw[edge_type]["area"].astype(np.uint64) + edges = Edges() + edges.node_ids1 = supervoxel_ids1.astype(basetypes.NODE_ID).tobytes() + edges.node_ids2 = supervoxel_ids2.astype(basetypes.NODE_ID).tobytes() + edges.affinities = affinities.astype(basetypes.EDGE_AFFINITY).tobytes() + edges.areas = areas.astype(basetypes.EDGE_AREA).tobytes() return edges - chunkEdgesMessage = ChunkEdges() - chunkEdgesMessage.in_chunk.CopyFrom(_get_edges("in")) - chunkEdgesMessage.between_chunk.CopyFrom(_get_edges("between")) - chunkEdgesMessage.cross_chunk.CopyFrom(_get_edges("cross")) + chunk_edges = ChunkEdges() + chunk_edges.in_chunk.CopyFrom(_get_edges("in")) + chunk_edges.between_chunk.CopyFrom(_get_edges("between")) + chunk_edges.cross_chunk.CopyFrom(_get_edges("cross")) cctx = zstd.ZstdCompressor(level=compression_level) - compressed_proto = cctx.compress(chunkEdgesMessage.SerializeToString()) - chunk_str = "_".join(str(coord) for coord in chunk_coordinates) - # filename format - edges_x_y_z.serialization.compression + # filename format - edges_x_y_z.serialization.compression file = f"edges_{chunk_str}.proto.zst" with Storage(edges_dir) as st: st.put_file( file_path=file, - content=compressed_proto, + content=cctx.compress(chunk_edges.SerializeToString()), compress=None, cache_control="no-cache", ) diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index ab5f6bec7..7ac139dcb 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -3,10 +3,10 @@ syntax = "proto3"; package edges; message Edges { - repeated fixed64 node_ids1 = 1; - repeated fixed64 node_ids2 = 2; - repeated float affinities = 3; - repeated uint64 areas = 4; + bytes node_ids1 = 1; + bytes node_ids2 = 2; + bytes affinities = 3; + bytes areas = 4; } message ChunkEdges { diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py index 28e401b22..3e7b1515f 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py +++ b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py @@ -20,7 +20,7 @@ package='edges', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"P\n\x05\x45\x64ges\x12\x11\n\tnode_ids1\x18\x01 \x03(\x06\x12\x11\n\tnode_ids2\x18\x02 \x03(\x06\x12\x12\n\naffinities\x18\x03 \x03(\x02\x12\r\n\x05\x61reas\x18\x04 \x03(\x04\"t\n\nChunkEdges\x12\x1e\n\x08in_chunk\x18\x01 \x01(\x0b\x32\x0c.edges.Edges\x12!\n\x0b\x63ross_chunk\x18\x02 \x01(\x0b\x32\x0c.edges.Edges\x12#\n\rbetween_chunk\x18\x03 \x01(\x0b\x32\x0c.edges.Edgesb\x06proto3') + serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"P\n\x05\x45\x64ges\x12\x11\n\tnode_ids1\x18\x01 \x01(\x0c\x12\x11\n\tnode_ids2\x18\x02 \x01(\x0c\x12\x12\n\naffinities\x18\x03 \x01(\x0c\x12\r\n\x05\x61reas\x18\x04 \x01(\x0c\"t\n\nChunkEdges\x12\x1e\n\x08in_chunk\x18\x01 \x01(\x0b\x32\x0c.edges.Edges\x12!\n\x0b\x63ross_chunk\x18\x02 \x01(\x0b\x32\x0c.edges.Edges\x12#\n\rbetween_chunk\x18\x03 \x01(\x0b\x32\x0c.edges.Edgesb\x06proto3') ) @@ -35,29 +35,29 @@ fields=[ _descriptor.FieldDescriptor( name='node_ids1', full_name='edges.Edges.node_ids1', index=0, - number=1, type=6, cpp_type=4, label=3, - has_default_value=False, default_value=[], + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='node_ids2', full_name='edges.Edges.node_ids2', index=1, - number=2, type=6, cpp_type=4, label=3, - has_default_value=False, default_value=[], + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='affinities', full_name='edges.Edges.affinities', index=2, - number=3, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='areas', full_name='edges.Edges.areas', index=3, - number=4, type=4, cpp_type=4, label=3, - has_default_value=False, default_value=[], + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), From aef75a4089a292392e563dfda99a52de3c413d57 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 2 Aug 2019 16:18:44 -0400 Subject: [PATCH 0078/1097] remove stuff to ready for merge --- pychunkedgraph/backend/chunkedgraph_init.py | 412 -------------------- pychunkedgraph/ingest/ran_ingestion.py | 2 - pychunkedgraph/io/__init__.py | 2 +- 3 files changed, 1 insertion(+), 415 deletions(-) delete mode 100644 pychunkedgraph/backend/chunkedgraph_init.py diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py deleted file mode 100644 index 1b1c517ba..000000000 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ /dev/null @@ -1,412 +0,0 @@ -import time -import datetime -import collections - -import numpy as np -import pytz - -from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes -from pychunkedgraph.backend import flatgraph_utils -from pychunkedgraph.backend.chunkedgraph_utils import ( - compute_indices_pandas, - get_google_compatible_time_stamp, -) - -from typing import ( - Any, - Dict, - Iterable, - List, - Optional, - Sequence, - Tuple, - Union, - NamedTuple, -) - -UTC = pytz.UTC - - -def add_atomic_edges_in_chunks( - self, - edge_id_dict: dict, - edge_aff_dict: dict, - edge_area_dict: dict, - isolated_node_ids: Sequence[np.uint64], - verbose: bool = True, - time_stamp: Optional[datetime.datetime] = None, -): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime - """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) - - edge_id_keys = [ - "in_connected", - "in_disconnected", - "cross", - "between_connected", - "between_disconnected", - ] - edge_aff_keys = [ - "in_connected", - "in_disconnected", - "between_connected", - "between_disconnected", - ] - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - chunk_id = None - for edge_id_key in edge_id_keys: - if not edge_id_key in edge_id_dict: - empty_edges = np.array([], dtype=np.uint64).reshape(0, 2) - edge_id_dict[edge_id_key] = empty_edges - else: - n_edge_ids += len(edge_id_dict[edge_id_key]) - - if len(edge_id_dict[edge_id_key]) > 0: - node_id = edge_id_dict[edge_id_key][0, 0] - chunk_id = self.get_chunk_id(node_id) - - for edge_aff_key in edge_aff_keys: - if not edge_aff_key in edge_aff_dict: - edge_aff_dict[edge_aff_key] = np.array([], dtype=np.float32) - - time_start = time.time() - - # Catch trivial case - if n_edge_ids == 0 and len(isolated_node_ids) == 0: - return 0 - - # Make parent id creation easier - if chunk_id is None: - chunk_id = self.get_chunk_id(isolated_node_ids[0]) - - chunk_id_c = self.get_chunk_coordinates(chunk_id) - parent_chunk_id = self.get_chunk_id( - layer=2, x=chunk_id_c[0], y=chunk_id_c[1], z=chunk_id_c[2] - ) - - # Get connected component within the chunk - chunk_node_ids = np.concatenate( - [ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0]), - ] - ) - - chunk_node_ids = np.unique(chunk_node_ids) - - node_chunk_ids = np.array( - [self.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64 - ) - - u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, return_counts=True) - if len(u_node_chunk_ids) > 1: - raise Exception( - "%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), - c_node_chunk_ids, - ) - - add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T - edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), add_edge_ids]) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True - ) - - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - - # Add rows for nodes that are in this chunk - # a connected component at a time - node_c = 0 # Just a counter for the log / speed measurement - - n_ccs = len(ccs) - - parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - time_start = time.time() - - time_dict = collections.defaultdict(list) - - time_start_1 = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start_1) - - rows = [] - - for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] - - u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) - - if len(u_chunk_ids) > 1: - self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") - raise Exception() - - # Create parent id - parent_id = parent_ids[i_cc] - - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - - # Add rows for nodes that are in this chunk - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected - time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][ - remapping["in_connected"][node_id] - ] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][ - remapping["in_disconnected"][node_id] - ] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][ - row_ids, inv_column_ids - ] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][ - remapping["between_connected"][node_id] - ] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate( - [ - connected_ids, - edge_id_dict["between_connected"][row_ids, inv_column_ids], - ] - ) - connected_affs = np.concatenate( - [connected_affs, edge_aff_dict["between_connected"][row_ids]] - ) - connected_areas = np.concatenate( - [connected_areas, edge_area_dict["between_connected"][row_ids]] - ) - - parent_cross_edges = np.concatenate( - [parent_cross_edges, edge_id_dict["between_connected"][row_ids]] - ) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][ - remapping["between_disconnected"][node_id] - ] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate( - [ - disconnected_ids, - edge_id_dict["between_disconnected"][row_ids, inv_column_ids], - ] - ) - disconnected_affs = np.concatenate( - [disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]] - ) - disconnected_areas = np.concatenate( - [ - disconnected_areas, - edge_area_dict["between_disconnected"][row_ids], - ] - ) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][ - remapping["cross"][node_id] - ] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate( - [connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]] - ) - connected_affs = np.concatenate( - [connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)] - ) - connected_areas = np.concatenate( - [connected_areas, np.ones((len(row_ids)), dtype=np.uint64)] - ) - - parent_cross_edges = np.concatenate( - [parent_cross_edges, edge_id_dict["cross"][row_ids]] - ) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) - - val_dict = { - column_keys.Connectivity.Partner: partners, - column_keys.Connectivity.Affinity: affinities, - column_keys.Connectivity.Area: areas, - column_keys.Connectivity.Connected: connected, - column_keys.Hierarchy.Parent: parent_id, - } - - rows.append( - self.mutate_row( - serializers.serialize_uint64(node_id), - val_dict, - time_stamp=time_stamp, - ) - ) - node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - - time_start_1 = time.time() - # Create parent node - rows.append( - self.mutate_row( - serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp, - ) - ) - - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() - - cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {} - for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - - if len(layer_cross_edges) > 0: - val_dict[ - column_keys.Connectivity.CrossChunkEdge[cc_layer] - ] = layer_cross_edges - - if len(val_dict) > 0: - rows.append( - self.mutate_row( - serializers.serialize_uint64(parent_id), - val_dict, - time_stamp=time_stamp, - ) - ) - node_c += 1 - - time_dict["adding_cross_edges"].append(time.time() - time_start_1) - - if len(rows) > 100000: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if len(rows) > 0: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - self.logger.debug( - "Time creating rows: %.3fs for %d ccs with %d nodes" - % (time.time() - time_start, len(ccs), node_c) - ) - - for k in time_dict.keys(): - self.logger.debug( - "%s -- %.3fms for %d instances -- avg = %.3fms" - % ( - k, - np.sum(time_dict[k]) * 1000, - len(time_dict[k]), - np.mean(time_dict[k]) * 1000, - ) - ) - diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index 160fa738d..7a742d1bf 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -11,7 +11,6 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu -from pychunkedgraph.io.edge_storage import put_chunk_edges def ingest_into_chunkedgraph(storage_path, ws_cv_path, cg_table_id, @@ -236,7 +235,6 @@ def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): chunk_coord = np.array(list(chunk_coord), dtype=np.int) edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype) - put_chunk_edges(None, chunk_coord, edge_dict, 17) mapping = collect_agglomeration_data(im, chunk_coord) active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping) diff --git a/pychunkedgraph/io/__init__.py b/pychunkedgraph/io/__init__.py index 378554f35..60fe4ebb1 100644 --- a/pychunkedgraph/io/__init__.py +++ b/pychunkedgraph/io/__init__.py @@ -1,3 +1,3 @@ """ -All secondary storage stuff must go here +All secondary (slow) storage stuff must go here """ \ No newline at end of file From 06c6088448c1eff3426dd203c700ae56778b86e9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 2 Aug 2019 16:49:16 -0400 Subject: [PATCH 0079/1097] remove unused import --- pychunkedgraph/io/edge_storage.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index bec644d5c..ba534434f 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -3,7 +3,6 @@ to (slow) storage with CloudVolume """ -import os from typing import List, Dict, Tuple, Union import numpy as np From 7d7d51d040642c92cfa8aa9a69f152b1be0a7199 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 2 Aug 2019 16:50:28 -0400 Subject: [PATCH 0080/1097] remove print statement --- pychunkedgraph/io/edge_storage.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index ba534434f..35339ac7c 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -78,8 +78,6 @@ def get_chunk_edges( else SimpleStorage(edges_dir) ) - print(type(st)) - files = [] with st: files = st.get_files(fnames) From a56b77f00a8758686cf2f6fe2e35dc5ae0368400 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 10:39:47 -0400 Subject: [PATCH 0081/1097] wip: separate chunkedgraph initialization --- pychunkedgraph/backend/chunkedgraph_init.py | 408 ++++++++++++++++++++ 1 file changed, 408 insertions(+) create mode 100644 pychunkedgraph/backend/chunkedgraph_init.py diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py new file mode 100644 index 000000000..997bf78c6 --- /dev/null +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -0,0 +1,408 @@ +""" +Module for stuff related to creating the initial chunkedgraph +""" + + +import time +import datetime +import collections +from typing import Optional, Sequence + +import pytz +import numpy as np + +from google.cloud.bigtable.row_filters import ( + TimestampRange, + TimestampRangeFilter, + ColumnRangeFilter, + ValueRangeFilter, + RowFilterChain, + ColumnQualifierRegexFilter, + RowFilterUnion, + ConditionalRowFilter, + PassAllFilter, + RowFilter, + RowKeyRegexFilter, + FamilyNameRegexFilter, +) +from google.cloud.bigtable.row_set import RowSet +from google.cloud.bigtable.column_family import MaxVersionsGCRule + + +from pychunkedgraph.backend.chunkedgraph_utils import ( + compute_indices_pandas, + get_google_compatible_time_stamp, +) +from pychunkedgraph.backend import flatgraph_utils +from pychunkedgraph.backend.utils import serializers, column_keys + +UTC = pytz.UTC + + +def add_atomic_edges_in_chunks( + cg, + edge_id_dict: dict, + edge_aff_dict: dict, + edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None, +): + """ Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk + Alle edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime + """ + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) + + edge_aff_keys = [ + "in_connected", + "in_disconnected", + "between_connected", + "between_disconnected", + ] + edge_id_keys = edge_aff_keys[:].insert(2, "cross") + + # Check if keys exist and include an empty array if not + n_edge_ids = 0 + empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) + + for key in edge_id_keys: + edge_id_dict[key] = np.concatenate( + edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy()) + ) + n_edge_ids += len(edge_id_dict[key]) + + for key in edge_aff_keys: + edge_aff_dict[key] = np.concatenate( + edge_aff_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy()) + ) + + time_start = time.time() + + # Get connected component within the chunk + chunk_node_ids = np.concatenate( + [ + isolated_node_ids.astype(np.uint64), + np.unique(edge_id_dict["in_connected"]), + np.unique(edge_id_dict["in_disconnected"]), + np.unique(edge_id_dict["cross"][:, 0]), + np.unique(edge_id_dict["between_connected"][:, 0]), + np.unique(edge_id_dict["between_disconnected"][:, 0]), + ] + ) + + # nothing to do + if not len(chunk_node_ids): + return 0 + + chunk_node_ids = np.unique(chunk_node_ids) + + node_chunk_ids = np.array( + [cg.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64 + ) + + u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, return_counts=True) + if len(u_node_chunk_ids) > 1: + raise Exception( + "%d: %d chunk ids found in node id list. " + "Some edges might be in the wrong order. " + "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), + c_node_chunk_ids, + ) + + add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T + edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), add_edge_ids]) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True + ) + + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + cg.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + + # Add rows for nodes that are in this chunk + # a connected component at a time + node_c = 0 # Just a counter for the log / speed measurement + + n_ccs = len(ccs) + + # Make parent id creation easier + chunk_id = u_node_chunk_ids[0] + parent_chunk_id = cg.get_chunk_id(layer=2, *cg.get_chunk_coordinates(chunk_id)) + + parent_ids = cg.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + + time_start = time.time() + time_dict = collections.defaultdict(list) + + time_start_1 = time.time() + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + # Circumvent datatype issues + + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + + time_dict["sparse_indices"].append(time.time() - time_start_1) + + rows = [] + + for i_cc, cc in enumerate(ccs): + node_ids = unique_graph_ids[cc] + + u_chunk_ids = np.unique([cg.get_chunk_id(n) for n in node_ids]) + + if len(u_chunk_ids) > 1: + cg.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + raise Exception() + + # Create parent id + parent_id = parent_ids[i_cc] + + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + + # Add rows for nodes that are in this chunk + for i_node_id, node_id in enumerate(node_ids): + # Extract edges relevant to this node + + # in chunk + connected + time_start_2 = time.time() + if node_id in remapping["in_connected"]: + row_ids, column_ids = sparse_indices["in_connected"][ + remapping["in_connected"][node_id] + ] + + inv_column_ids = (column_ids + 1) % 2 + + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_affs = edge_aff_dict["in_connected"][row_ids] + connected_areas = edge_area_dict["in_connected"][row_ids] + time_dict["in_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + connected_ids = np.array([], dtype=np.uint64) + connected_affs = np.array([], dtype=np.float32) + connected_areas = np.array([], dtype=np.uint64) + + # in chunk + disconnected + if node_id in remapping["in_disconnected"]: + row_ids, column_ids = sparse_indices["in_disconnected"][ + remapping["in_disconnected"][node_id] + ] + inv_column_ids = (column_ids + 1) % 2 + + disconnected_ids = edge_id_dict["in_disconnected"][ + row_ids, inv_column_ids + ] + disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] + disconnected_areas = edge_area_dict["in_disconnected"][row_ids] + time_dict["in_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + else: + disconnected_ids = np.array([], dtype=np.uint64) + disconnected_affs = np.array([], dtype=np.float32) + disconnected_areas = np.array([], dtype=np.uint64) + + # out chunk + connected + if node_id in remapping["between_connected"]: + row_ids, column_ids = sparse_indices["between_connected"][ + remapping["between_connected"][node_id] + ] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate( + [ + connected_ids, + edge_id_dict["between_connected"][row_ids, inv_column_ids], + ] + ) + connected_affs = np.concatenate( + [connected_affs, edge_aff_dict["between_connected"][row_ids]] + ) + connected_areas = np.concatenate( + [connected_areas, edge_area_dict["between_connected"][row_ids]] + ) + + parent_cross_edges = np.concatenate( + [parent_cross_edges, edge_id_dict["between_connected"][row_ids]] + ) + + time_dict["out_connected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # out chunk + disconnected + if node_id in remapping["between_disconnected"]: + row_ids, column_ids = sparse_indices["between_disconnected"][ + remapping["between_disconnected"][node_id] + ] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + disconnected_ids = np.concatenate( + [ + disconnected_ids, + edge_id_dict["between_disconnected"][row_ids, inv_column_ids], + ] + ) + disconnected_affs = np.concatenate( + [disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]] + ) + disconnected_areas = np.concatenate( + [ + disconnected_areas, + edge_area_dict["between_disconnected"][row_ids], + ] + ) + + time_dict["out_disconnected"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # cross + if node_id in remapping["cross"]: + row_ids, column_ids = sparse_indices["cross"][ + remapping["cross"][node_id] + ] + + row_ids = row_ids[column_ids == 0] + column_ids = column_ids[column_ids == 0] + inv_column_ids = (column_ids + 1) % 2 + time_dict["cross_mask"].append(time.time() - time_start_2) + time_start_2 = time.time() + + connected_ids = np.concatenate( + [connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]] + ) + connected_affs = np.concatenate( + [connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)] + ) + connected_areas = np.concatenate( + [connected_areas, np.ones((len(row_ids)), dtype=np.uint64)] + ) + + parent_cross_edges = np.concatenate( + [parent_cross_edges, edge_id_dict["cross"][row_ids]] + ) + time_dict["cross"].append(time.time() - time_start_2) + time_start_2 = time.time() + + # Create node + partners = np.concatenate([connected_ids, disconnected_ids]) + affinities = np.concatenate([connected_affs, disconnected_affs]) + areas = np.concatenate([connected_areas, disconnected_areas]) + connected = np.arange(len(connected_ids), dtype=np.int) + + val_dict = { + column_keys.Connectivity.Partner: partners, + column_keys.Connectivity.Affinity: affinities, + column_keys.Connectivity.Area: areas, + column_keys.Connectivity.Connected: connected, + column_keys.Hierarchy.Parent: parent_id, + } + + rows.append( + cg.mutate_row( + serializers.serialize_uint64(node_id), + val_dict, + time_stamp=time_stamp, + ) + ) + node_c += 1 + time_dict["creating_lv1_row"].append(time.time() - time_start_2) + + time_start_1 = time.time() + # Create parent node + rows.append( + cg.mutate_row( + serializers.serialize_uint64(parent_id), + {column_keys.Hierarchy.Child: node_ids}, + time_stamp=time_stamp, + ) + ) + + time_dict["creating_lv2_row"].append(time.time() - time_start_1) + time_start_1 = time.time() + + cce_layers = cg.get_cross_chunk_edges_layer(parent_cross_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {} + for cc_layer in u_cce_layers: + layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + + if len(layer_cross_edges) > 0: + val_dict[ + column_keys.Connectivity.CrossChunkEdge[cc_layer] + ] = layer_cross_edges + + if len(val_dict) > 0: + rows.append( + cg.mutate_row( + serializers.serialize_uint64(parent_id), + val_dict, + time_stamp=time_stamp, + ) + ) + node_c += 1 + + time_dict["adding_cross_edges"].append(time.time() - time_start_1) + + if len(rows) > 100000: + time_start_1 = time.time() + cg.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if len(rows) > 0: + time_start_1 = time.time() + cg.bulk_write(rows) + time_dict["writing"].append(time.time() - time_start_1) + + if verbose: + cg.logger.debug( + "Time creating rows: %.3fs for %d ccs with %d nodes" + % (time.time() - time_start, len(ccs), node_c) + ) + + for k in time_dict.keys(): + cg.logger.debug( + "%s -- %.3fms for %d instances -- avg = %.3fms" + % ( + k, + np.sum(time_dict[k]) * 1000, + len(time_dict[k]), + np.mean(time_dict[k]) * 1000, + ) + ) + From 972479e0b595f991c3e09bfb90e1275f4e1ffe7a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 13:42:28 -0400 Subject: [PATCH 0082/1097] wip: edges refactor --- pychunkedgraph/backend/chunkedgraph_init.py | 15 ------- pychunkedgraph/backend/utils/edges.py | 23 ++++++++++ pychunkedgraph/ingest/ran_ingestion.py | 48 +++++++-------------- 3 files changed, 39 insertions(+), 47 deletions(-) create mode 100644 pychunkedgraph/backend/utils/edges.py diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 997bf78c6..d362f1028 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -2,7 +2,6 @@ Module for stuff related to creating the initial chunkedgraph """ - import time import datetime import collections @@ -11,20 +10,6 @@ import pytz import numpy as np -from google.cloud.bigtable.row_filters import ( - TimestampRange, - TimestampRangeFilter, - ColumnRangeFilter, - ValueRangeFilter, - RowFilterChain, - ColumnQualifierRegexFilter, - RowFilterUnion, - ConditionalRowFilter, - PassAllFilter, - RowFilter, - RowKeyRegexFilter, - FamilyNameRegexFilter, -) from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.column_family import MaxVersionsGCRule diff --git a/pychunkedgraph/backend/utils/edges.py b/pychunkedgraph/backend/utils/edges.py new file mode 100644 index 000000000..b24394397 --- /dev/null +++ b/pychunkedgraph/backend/utils/edges.py @@ -0,0 +1,23 @@ +""" +Utils for edges +""" + + +import numpy as np +from basetypes import NODE_ID, EDGE_AFFINITY, EDGE_AREA + +TYPES = ["in", "between", "cross"] + + +class Edges: + def __init__( + self, + node_ids1: np.ndarray, + node_ids2: np.ndarray, + affinities: np.ndarray, + areas: np.ndarray, + ): + self.node_ids1 = node_ids1 + self.node_ids1 = node_ids2 + self.affinities = affinities + self.areas = areas diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index 7a742d1bf..d71031ac2 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -11,6 +11,7 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu +from ..backend.utils.edges import TYPES as EDGE_TYPES, ATTRS def ingest_into_chunkedgraph(storage_path, ws_cv_path, cg_table_id, @@ -236,38 +237,21 @@ def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype) mapping = collect_agglomeration_data(im, chunk_coord) - active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping) - - edge_ids = {} - edge_affs = {} - edge_areas = {} - - for k in edge_dict.keys(): - if k == "cross": - edge_ids[k] = np.concatenate([edge_dict[k]["sv1"][:, None], - edge_dict[k]["sv2"][:, None]], - axis=1) - continue - - sv1_conn = edge_dict[k]["sv1"][active_edge_dict[k]] - sv2_conn = edge_dict[k]["sv2"][active_edge_dict[k]] - aff_conn = edge_dict[k]["aff"][active_edge_dict[k]] - area_conn = edge_dict[k]["area"][active_edge_dict[k]] - edge_ids[f"{k}_connected"] = np.concatenate([sv1_conn[:, None], - sv2_conn[:, None]], - axis=1) - edge_affs[f"{k}_connected"] = aff_conn.astype(np.float32) - edge_areas[f"{k}_connected"] = area_conn - - sv1_disconn = edge_dict[k]["sv1"][~active_edge_dict[k]] - sv2_disconn = edge_dict[k]["sv2"][~active_edge_dict[k]] - aff_disconn = edge_dict[k]["aff"][~active_edge_dict[k]] - area_disconn = edge_dict[k]["area"][~active_edge_dict[k]] - edge_ids[f"{k}_disconnected"] = np.concatenate([sv1_disconn[:, None], - sv2_disconn[:, None]], - axis=1) - edge_affs[f"{k}_disconnected"] = aff_disconn.astype(np.float32) - edge_areas[f"{k}_disconnected"] = area_disconn + _, isolated_ids = define_active_edges(edge_dict, mapping) + + chunk_edges = {} + for edge_type in EDGE_TYPES: + supervoxel_ids1 = edge_dict[edge_type]["sv1"] + supervoxel_ids2 = edge_dict[edge_type]["sv2"] + + ones = np.ones(len(supervoxel_ids1)) + affinities = edge_dict[edge_type].get("aff", float("inf") * ones) + areas = edge_dict[edge_type].get("area", ones) + chunk_edges[edge_type] = { + ATTRS.: supervoxel_ids1, + "sv2": supervoxel_ids2, + "aff" + } im.cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas, isolated_node_ids=isolated_ids) From 489b4f3bb971be14f970521f949c1fea42aa8d59 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 13:54:49 -0400 Subject: [PATCH 0083/1097] wip: edges class --- pychunkedgraph/ingest/ran_ingestion.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index d71031ac2..a5ac738d4 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -11,7 +11,7 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu -from ..backend.utils.edges import TYPES as EDGE_TYPES, ATTRS +from ..backend.utils.edges import TYPES as EDGE_TYPES, Edges def ingest_into_chunkedgraph(storage_path, ws_cv_path, cg_table_id, @@ -247,14 +247,13 @@ def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): ones = np.ones(len(supervoxel_ids1)) affinities = edge_dict[edge_type].get("aff", float("inf") * ones) areas = edge_dict[edge_type].get("area", ones) - chunk_edges[edge_type] = { - ATTRS.: supervoxel_ids1, - "sv2": supervoxel_ids2, - "aff" - } - - im.cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas, - isolated_node_ids=isolated_ids) + chunk_edges[edge_type] = Edges( + supervoxel_ids1, supervoxel_ids2, affinities, areas + ) + + im.cg.add_atomic_edges_in_chunks( + edge_ids, edge_affs, edge_areas, isolated_node_ids=isolated_ids + ) return edge_ids, edge_affs, edge_areas From 5fe5ca77221740692c43f0c8d016acbaf1fb506f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 15:46:14 -0400 Subject: [PATCH 0084/1097] wip: only store parent info in rows --- pychunkedgraph/backend/chunkedgraph_init.py | 168 +++++++------------- pychunkedgraph/backend/utils/edges.py | 2 +- pychunkedgraph/ingest/ran_ingestion.py | 20 +-- 3 files changed, 70 insertions(+), 120 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index d362f1028..57b3c7816 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -14,6 +14,9 @@ from google.cloud.bigtable.column_family import MaxVersionsGCRule +from .utils import basetypes +from .utils.edges import Edges +from ..backend.utils.edges import TYPES as EDGE_TYPES, Edges from pychunkedgraph.backend.chunkedgraph_utils import ( compute_indices_pandas, get_google_compatible_time_stamp, @@ -25,28 +28,48 @@ def add_atomic_edges_in_chunks( - cg, - edge_id_dict: dict, - edge_aff_dict: dict, - edge_area_dict: dict, + cg_instance, + chunk_coord, + chunk_edges: dict, isolated_node_ids: Sequence[np.uint64], verbose: bool = True, time_stamp: Optional[datetime.datetime] = None, ): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime + """ + Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk. + All the edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param edge_id_dict: dict + :param edge_aff_dict: dict + :param edge_area_dict: dict + :param isolated_node_ids: list of uint64s + ids of nodes that have no edge in the chunked graph + :param verbose: bool + :param time_stamp: datetime """ + + time_start = time.time() + + # get all nodes and edges in the chunk + isolated_nodes_self_edges = np.vstack([isolated_node_ids, isolated_node_ids]).T + node_ids = [isolated_node_ids] + edge_ids = [isolated_nodes_self_edges] + for edge_type in EDGE_TYPES: + edges = chunk_edges[edge_type] + node_ids.append(edges.node_ids1) + node_ids.append(edges.node_ids2) + edge_ids.append(np.vstack([edges.node_ids1, edges.node_ids2]).T) + + chunk_node_ids = np.unique(np.concatenate(node_ids)) + chunk_edge_ids = np.unique(np.concatenate(edge_ids)) + + if not chunk_node_ids: + return 0 + if time_stamp is None: time_stamp = datetime.datetime.utcnow() @@ -56,73 +79,21 @@ def add_atomic_edges_in_chunks( # Comply to resolution of BigTables TimeRange time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) - edge_aff_keys = [ - "in_connected", - "in_disconnected", - "between_connected", - "between_disconnected", - ] - edge_id_keys = edge_aff_keys[:].insert(2, "cross") - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - empty_edges_array = np.array([], dtype=np.uint64).reshape(0, 2) - - for key in edge_id_keys: - edge_id_dict[key] = np.concatenate( - edge_id_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy()) - ) - n_edge_ids += len(edge_id_dict[key]) + chunk_id = cg_instance.get_chunk_id(layer=1, *chunk_coord) + node_chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) - for key in edge_aff_keys: - edge_aff_dict[key] = np.concatenate( - edge_aff_dict.get(key, empty_edges_array.copy(), empty_edges_array.copy()) - ) - - time_start = time.time() - - # Get connected component within the chunk - chunk_node_ids = np.concatenate( - [ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0]), - ] - ) - - # nothing to do - if not len(chunk_node_ids): - return 0 - - chunk_node_ids = np.unique(chunk_node_ids) - - node_chunk_ids = np.array( - [cg.get_chunk_id(c) for c in chunk_node_ids], dtype=np.uint64 - ) - - u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, return_counts=True) + u_node_chunk_ids = np.unique(node_chunk_ids) if len(u_node_chunk_ids) > 1: - raise Exception( - "%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % (chunk_id, len(u_node_chunk_ids)), - c_node_chunk_ids, - ) - - add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T - edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), add_edge_ids]) + raise Exception(f"{len(u_node_chunk_ids)} chunk ids found in chunk: {chunk_id}") graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True + chunk_edge_ids, make_directed=True ) ccs = flatgraph_utils.connected_components(graph) if verbose: - cg.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + cg_instance.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) # Add rows for nodes that are in this chunk # a connected component at a time @@ -130,11 +101,8 @@ def add_atomic_edges_in_chunks( n_ccs = len(ccs) - # Make parent id creation easier - chunk_id = u_node_chunk_ids[0] - parent_chunk_id = cg.get_chunk_id(layer=2, *cg.get_chunk_coordinates(chunk_id)) - - parent_ids = cg.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) + parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=n_ccs) time_start = time.time() time_dict = collections.defaultdict(list) @@ -159,10 +127,10 @@ def add_atomic_edges_in_chunks( for i_cc, cc in enumerate(ccs): node_ids = unique_graph_ids[cc] - u_chunk_ids = np.unique([cg.get_chunk_id(n) for n in node_ids]) + u_chunk_ids = np.unique([cg_instance.get_chunk_id(n) for n in node_ids]) if len(u_chunk_ids) > 1: - cg.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + cg_instance.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") raise Exception() # Create parent id @@ -318,7 +286,7 @@ def add_atomic_edges_in_chunks( } rows.append( - cg.mutate_row( + cg_instance.mutate_row( serializers.serialize_uint64(node_id), val_dict, time_stamp=time_stamp, @@ -330,7 +298,7 @@ def add_atomic_edges_in_chunks( time_start_1 = time.time() # Create parent node rows.append( - cg.mutate_row( + cg_instance.mutate_row( serializers.serialize_uint64(parent_id), {column_keys.Hierarchy.Child: node_ids}, time_stamp=time_stamp, @@ -340,21 +308,21 @@ def add_atomic_edges_in_chunks( time_dict["creating_lv2_row"].append(time.time() - time_start_1) time_start_1 = time.time() - cce_layers = cg.get_cross_chunk_edges_layer(parent_cross_edges) + cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) u_cce_layers = np.unique(cce_layers) val_dict = {} for cc_layer in u_cce_layers: layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - if len(layer_cross_edges) > 0: + if layer_cross_edges: val_dict[ column_keys.Connectivity.CrossChunkEdge[cc_layer] ] = layer_cross_edges - if len(val_dict) > 0: + if val_dict > 0: rows.append( - cg.mutate_row( + cg_instance.mutate_row( serializers.serialize_uint64(parent_id), val_dict, time_stamp=time_stamp, @@ -366,28 +334,10 @@ def add_atomic_edges_in_chunks( if len(rows) > 100000: time_start_1 = time.time() - cg.bulk_write(rows) + cg_instance.bulk_write(rows) time_dict["writing"].append(time.time() - time_start_1) - if len(rows) > 0: + if rows: time_start_1 = time.time() - cg.bulk_write(rows) + cg_instance.bulk_write(rows) time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - cg.logger.debug( - "Time creating rows: %.3fs for %d ccs with %d nodes" - % (time.time() - time_start, len(ccs), node_c) - ) - - for k in time_dict.keys(): - cg.logger.debug( - "%s -- %.3fms for %d instances -- avg = %.3fms" - % ( - k, - np.sum(time_dict[k]) * 1000, - len(time_dict[k]), - np.mean(time_dict[k]) * 1000, - ) - ) - diff --git a/pychunkedgraph/backend/utils/edges.py b/pychunkedgraph/backend/utils/edges.py index b24394397..e40bccd30 100644 --- a/pychunkedgraph/backend/utils/edges.py +++ b/pychunkedgraph/backend/utils/edges.py @@ -18,6 +18,6 @@ def __init__( areas: np.ndarray, ): self.node_ids1 = node_ids1 - self.node_ids1 = node_ids2 + self.node_ids2 = node_ids2 self.affinities = affinities self.areas = areas diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index a5ac738d4..2f383ad60 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -11,7 +11,9 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu +from ..backend.chunkedgraph_init import add_atomic_edges_in_chunks from ..backend.utils.edges import TYPES as EDGE_TYPES, Edges +from ..backend.utils import basetypes def ingest_into_chunkedgraph(storage_path, ws_cv_path, cg_table_id, @@ -222,21 +224,20 @@ def _create_atomic_chunk(args): (time.time() - time_start)) -def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): +def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY): """ Creates single atomic chunk - :param im: IngestionManager + :param imanager: IngestionManager :param chunk_coord: np.ndarray array of three ints :param aff_dtype: np.dtype np.float64 or np.float32 - :param verbose: bool :return: """ chunk_coord = np.array(list(chunk_coord), dtype=np.int) - edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype) - mapping = collect_agglomeration_data(im, chunk_coord) + edge_dict = collect_edge_data(imanager, chunk_coord, aff_dtype=aff_dtype) + mapping = collect_agglomeration_data(imanager, chunk_coord) _, isolated_ids = define_active_edges(edge_dict, mapping) chunk_edges = {} @@ -251,11 +252,10 @@ def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): supervoxel_ids1, supervoxel_ids2, affinities, areas ) - im.cg.add_atomic_edges_in_chunks( - edge_ids, edge_affs, edge_areas, isolated_node_ids=isolated_ids - ) + add_atomic_edges_in_chunks(imanager.cg, chunk_coord, chunk_edges, isolated_node_ids=isolated_ids) - return edge_ids, edge_affs, edge_areas + # to track workers completion + return chunk_coord def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): @@ -535,5 +535,5 @@ def _mapping_default(key): if k == "in": isolated.append(edge_dict[k]["sv2"][agg_2_m]) - return active, np.unique(np.concatenate(isolated).astype(np.uint64)) + return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) From 76e35a543eb7bfb789aa2bb7f8155781fd6db9d6 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 19:38:08 -0400 Subject: [PATCH 0085/1097] move edges def --- pychunkedgraph/backend/chunkedgraph_init.py | 48 ++++++++++++------- pychunkedgraph/edges/__init__.py | 0 .../utils/edges.py => edges/definitions.py} | 8 ++-- pychunkedgraph/edges/utils.py | 0 pychunkedgraph/ingest/ran_ingestion.py | 2 +- 5 files changed, 37 insertions(+), 21 deletions(-) create mode 100644 pychunkedgraph/edges/__init__.py rename pychunkedgraph/{backend/utils/edges.py => edges/definitions.py} (73%) create mode 100644 pychunkedgraph/edges/utils.py diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 57b3c7816..5ad0a15ea 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -17,12 +17,12 @@ from .utils import basetypes from .utils.edges import Edges from ..backend.utils.edges import TYPES as EDGE_TYPES, Edges -from pychunkedgraph.backend.chunkedgraph_utils import ( +from .chunkedgraph_utils import ( compute_indices_pandas, get_google_compatible_time_stamp, ) -from pychunkedgraph.backend import flatgraph_utils -from pychunkedgraph.backend.utils import serializers, column_keys +from . import flatgraph_utils +from .utils import serializers, column_keys UTC = pytz.UTC @@ -55,7 +55,8 @@ def add_atomic_edges_in_chunks( time_start = time.time() # get all nodes and edges in the chunk - isolated_nodes_self_edges = np.vstack([isolated_node_ids, isolated_node_ids]).T + isolated_nodes_self_edges = np.vstack( + [isolated_node_ids, isolated_node_ids]).T node_ids = [isolated_node_ids] edge_ids = [isolated_nodes_self_edges] for edge_type in EDGE_TYPES: @@ -84,7 +85,8 @@ def add_atomic_edges_in_chunks( u_node_chunk_ids = np.unique(node_chunk_ids) if len(u_node_chunk_ids) > 1: - raise Exception(f"{len(u_node_chunk_ids)} chunk ids found in chunk: {chunk_id}") + raise Exception( + f"{len(u_node_chunk_ids)} chunk ids found in chunk: {chunk_id}") graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( chunk_edge_ids, make_directed=True @@ -93,7 +95,8 @@ def add_atomic_edges_in_chunks( ccs = flatgraph_utils.connected_components(graph) if verbose: - cg_instance.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + cg_instance.logger.debug("CC in chunk: %.3fs" % + (time.time() - time_start)) # Add rows for nodes that are in this chunk # a connected component at a time @@ -102,7 +105,8 @@ def add_atomic_edges_in_chunks( n_ccs = len(ccs) parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) - parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=n_ccs) + parent_ids = cg_instance.get_unique_node_id_range( + parent_chunk_id, step=n_ccs) time_start = time.time() time_dict = collections.defaultdict(list) @@ -127,10 +131,12 @@ def add_atomic_edges_in_chunks( for i_cc, cc in enumerate(ccs): node_ids = unique_graph_ids[cc] - u_chunk_ids = np.unique([cg_instance.get_chunk_id(n) for n in node_ids]) + u_chunk_ids = np.unique([cg_instance.get_chunk_id(n) + for n in node_ids]) if len(u_chunk_ids) > 1: - cg_instance.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") + cg_instance.logger.error( + f"Found multiple chunk ids: {u_chunk_ids}") raise Exception() # Create parent id @@ -151,7 +157,8 @@ def add_atomic_edges_in_chunks( inv_column_ids = (column_ids + 1) % 2 - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] + connected_ids = edge_id_dict["in_connected"][row_ids, + inv_column_ids] connected_affs = edge_aff_dict["in_connected"][row_ids] connected_areas = edge_area_dict["in_connected"][row_ids] time_dict["in_connected"].append(time.time() - time_start_2) @@ -189,13 +196,15 @@ def add_atomic_edges_in_chunks( row_ids = row_ids[column_ids == 0] column_ids = column_ids[column_ids == 0] inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) + time_dict["out_connected_mask"].append( + time.time() - time_start_2) time_start_2 = time.time() connected_ids = np.concatenate( [ connected_ids, - edge_id_dict["between_connected"][row_ids, inv_column_ids], + edge_id_dict["between_connected"][row_ids, + inv_column_ids], ] ) connected_affs = np.concatenate( @@ -221,13 +230,15 @@ def add_atomic_edges_in_chunks( row_ids = row_ids[column_ids == 0] column_ids = column_ids[column_ids == 0] inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) + time_dict["out_disconnected_mask"].append( + time.time() - time_start_2) time_start_2 = time.time() disconnected_ids = np.concatenate( [ disconnected_ids, - edge_id_dict["between_disconnected"][row_ids, inv_column_ids], + edge_id_dict["between_disconnected"][row_ids, + inv_column_ids], ] ) disconnected_affs = np.concatenate( @@ -240,7 +251,8 @@ def add_atomic_edges_in_chunks( ] ) - time_dict["out_disconnected"].append(time.time() - time_start_2) + time_dict["out_disconnected"].append( + time.time() - time_start_2) time_start_2 = time.time() # cross @@ -259,7 +271,8 @@ def add_atomic_edges_in_chunks( [connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]] ) connected_affs = np.concatenate( - [connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)] + [connected_affs, np.full( + (len(row_ids)), np.inf, dtype=np.float32)] ) connected_areas = np.concatenate( [connected_areas, np.ones((len(row_ids)), dtype=np.uint64)] @@ -308,7 +321,8 @@ def add_atomic_edges_in_chunks( time_dict["creating_lv2_row"].append(time.time() - time_start_1) time_start_1 = time.time() - cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) + cce_layers = cg_instance.get_cross_chunk_edges_layer( + parent_cross_edges) u_cce_layers = np.unique(cce_layers) val_dict = {} diff --git a/pychunkedgraph/edges/__init__.py b/pychunkedgraph/edges/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pychunkedgraph/backend/utils/edges.py b/pychunkedgraph/edges/definitions.py similarity index 73% rename from pychunkedgraph/backend/utils/edges.py rename to pychunkedgraph/edges/definitions.py index e40bccd30..a6008fd3f 100644 --- a/pychunkedgraph/backend/utils/edges.py +++ b/pychunkedgraph/edges/definitions.py @@ -1,12 +1,14 @@ """ -Utils for edges +Classes and types for edges """ import numpy as np -from basetypes import NODE_ID, EDGE_AFFINITY, EDGE_AREA -TYPES = ["in", "between", "cross"] +IN_CHUNK = "in" +BT_CHUNK = "between" +CX_CHUNK = "cross" +TYPES = [IN_CHUNK, BT_CHUNK, CX_CHUNK] class Edges: diff --git a/pychunkedgraph/edges/utils.py b/pychunkedgraph/edges/utils.py new file mode 100644 index 000000000..e69de29bb diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index 2f383ad60..f3f3daa3a 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -12,7 +12,7 @@ from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu from ..backend.chunkedgraph_init import add_atomic_edges_in_chunks -from ..backend.utils.edges import TYPES as EDGE_TYPES, Edges +from ..edges.definitions import TYPES as EDGE_TYPES, Edges from ..backend.utils import basetypes From f1b16bc7d91dc135b01897ff518c9c2d1b757e94 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 19:44:19 -0400 Subject: [PATCH 0086/1097] helper function to parse nodes and edges --- pychunkedgraph/backend/chunkedgraph_init.py | 39 ++++++++++++--------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 5ad0a15ea..3c83b4257 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -54,21 +54,10 @@ def add_atomic_edges_in_chunks( time_start = time.time() - # get all nodes and edges in the chunk - isolated_nodes_self_edges = np.vstack( - [isolated_node_ids, isolated_node_ids]).T - node_ids = [isolated_node_ids] - edge_ids = [isolated_nodes_self_edges] - for edge_type in EDGE_TYPES: - edges = chunk_edges[edge_type] - node_ids.append(edges.node_ids1) - node_ids.append(edges.node_ids2) - edge_ids.append(np.vstack([edges.node_ids1, edges.node_ids2]).T) + chunk_nodes, chunk_edges = _get_chunk_nodes_and_edges( + chunk_edges, isolated_node_ids) - chunk_node_ids = np.unique(np.concatenate(node_ids)) - chunk_edge_ids = np.unique(np.concatenate(edge_ids)) - - if not chunk_node_ids: + if not chunk_nodes: return 0 if time_stamp is None: @@ -81,7 +70,7 @@ def add_atomic_edges_in_chunks( time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) chunk_id = cg_instance.get_chunk_id(layer=1, *chunk_coord) - node_chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) + node_chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_nodes) u_node_chunk_ids = np.unique(node_chunk_ids) if len(u_node_chunk_ids) > 1: @@ -89,7 +78,7 @@ def add_atomic_edges_in_chunks( f"{len(u_node_chunk_ids)} chunk ids found in chunk: {chunk_id}") graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - chunk_edge_ids, make_directed=True + chunk_edges, make_directed=True ) ccs = flatgraph_utils.connected_components(graph) @@ -355,3 +344,21 @@ def add_atomic_edges_in_chunks( time_start_1 = time.time() cg_instance.bulk_write(rows) time_dict["writing"].append(time.time() - time_start_1) + + +def _get_chunk_nodes_and_edges(chunk_edges: dict, isolated_node_ids: Sequence[np.uint64]): + """get all nodes and edges in the chunk""" + isolated_nodes_self_edges = np.vstack( + [isolated_node_ids, isolated_node_ids]).T + node_ids = [isolated_node_ids] + edge_ids = [isolated_nodes_self_edges] + for edge_type in EDGE_TYPES: + edges = chunk_edges[edge_type] + node_ids.append(edges.node_ids1) + node_ids.append(edges.node_ids2) + edge_ids.append(np.vstack([edges.node_ids1, edges.node_ids2]).T) + + chunk_node_ids = np.unique(np.concatenate(node_ids)) + chunk_edge_ids = np.unique(np.concatenate(edge_ids)) + + return (chunk_node_ids, chunk_edge_ids) From 7cdb06b3dcdf416f03e6f57163c7756a991f6166 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 19:56:04 -0400 Subject: [PATCH 0087/1097] format with black --- pychunkedgraph/backend/chunkedgraph_init.py | 74 ++++++++------------- 1 file changed, 27 insertions(+), 47 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 3c83b4257..f0c63ff65 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -17,11 +17,8 @@ from .utils import basetypes from .utils.edges import Edges from ..backend.utils.edges import TYPES as EDGE_TYPES, Edges -from .chunkedgraph_utils import ( - compute_indices_pandas, - get_google_compatible_time_stamp, -) -from . import flatgraph_utils +from .chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp +from .flatgraph_utils import build_gt_graph, connected_components from .utils import serializers, column_keys UTC = pytz.UTC @@ -54,10 +51,11 @@ def add_atomic_edges_in_chunks( time_start = time.time() - chunk_nodes, chunk_edges = _get_chunk_nodes_and_edges( - chunk_edges, isolated_node_ids) + chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges( + chunk_edges, isolated_node_ids + ) - if not chunk_nodes: + if not chunk_node_ids: return 0 if time_stamp is None: @@ -69,23 +67,16 @@ def add_atomic_edges_in_chunks( # Comply to resolution of BigTables TimeRange time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) - chunk_id = cg_instance.get_chunk_id(layer=1, *chunk_coord) - node_chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_nodes) - + node_chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) u_node_chunk_ids = np.unique(node_chunk_ids) - if len(u_node_chunk_ids) > 1: - raise Exception( - f"{len(u_node_chunk_ids)} chunk ids found in chunk: {chunk_id}") + assert len(u_node_chunk_ids) == 1 - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - chunk_edges, make_directed=True - ) + graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) - ccs = flatgraph_utils.connected_components(graph) + ccs = connected_components(graph) if verbose: - cg_instance.logger.debug("CC in chunk: %.3fs" % - (time.time() - time_start)) + cg_instance.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) # Add rows for nodes that are in this chunk # a connected component at a time @@ -94,8 +85,7 @@ def add_atomic_edges_in_chunks( n_ccs = len(ccs) parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) - parent_ids = cg_instance.get_unique_node_id_range( - parent_chunk_id, step=n_ccs) + parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=n_ccs) time_start = time.time() time_dict = collections.defaultdict(list) @@ -105,7 +95,6 @@ def add_atomic_edges_in_chunks( remapping = {} for k in edge_id_dict.keys(): # Circumvent datatype issues - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) mapped_ids = np.arange(len(u_ids), dtype=np.int32) remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) @@ -118,14 +107,12 @@ def add_atomic_edges_in_chunks( rows = [] for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] + node_ids = unique_ids[cc] - u_chunk_ids = np.unique([cg_instance.get_chunk_id(n) - for n in node_ids]) + u_chunk_ids = np.unique([cg_instance.get_chunk_id(n) for n in node_ids]) if len(u_chunk_ids) > 1: - cg_instance.logger.error( - f"Found multiple chunk ids: {u_chunk_ids}") + cg_instance.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") raise Exception() # Create parent id @@ -146,8 +133,7 @@ def add_atomic_edges_in_chunks( inv_column_ids = (column_ids + 1) % 2 - connected_ids = edge_id_dict["in_connected"][row_ids, - inv_column_ids] + connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] connected_affs = edge_aff_dict["in_connected"][row_ids] connected_areas = edge_area_dict["in_connected"][row_ids] time_dict["in_connected"].append(time.time() - time_start_2) @@ -185,15 +171,13 @@ def add_atomic_edges_in_chunks( row_ids = row_ids[column_ids == 0] column_ids = column_ids[column_ids == 0] inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append( - time.time() - time_start_2) + time_dict["out_connected_mask"].append(time.time() - time_start_2) time_start_2 = time.time() connected_ids = np.concatenate( [ connected_ids, - edge_id_dict["between_connected"][row_ids, - inv_column_ids], + edge_id_dict["between_connected"][row_ids, inv_column_ids], ] ) connected_affs = np.concatenate( @@ -219,15 +203,13 @@ def add_atomic_edges_in_chunks( row_ids = row_ids[column_ids == 0] column_ids = column_ids[column_ids == 0] inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append( - time.time() - time_start_2) + time_dict["out_disconnected_mask"].append(time.time() - time_start_2) time_start_2 = time.time() disconnected_ids = np.concatenate( [ disconnected_ids, - edge_id_dict["between_disconnected"][row_ids, - inv_column_ids], + edge_id_dict["between_disconnected"][row_ids, inv_column_ids], ] ) disconnected_affs = np.concatenate( @@ -240,8 +222,7 @@ def add_atomic_edges_in_chunks( ] ) - time_dict["out_disconnected"].append( - time.time() - time_start_2) + time_dict["out_disconnected"].append(time.time() - time_start_2) time_start_2 = time.time() # cross @@ -260,8 +241,7 @@ def add_atomic_edges_in_chunks( [connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]] ) connected_affs = np.concatenate( - [connected_affs, np.full( - (len(row_ids)), np.inf, dtype=np.float32)] + [connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)] ) connected_areas = np.concatenate( [connected_areas, np.ones((len(row_ids)), dtype=np.uint64)] @@ -310,8 +290,7 @@ def add_atomic_edges_in_chunks( time_dict["creating_lv2_row"].append(time.time() - time_start_1) time_start_1 = time.time() - cce_layers = cg_instance.get_cross_chunk_edges_layer( - parent_cross_edges) + cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) u_cce_layers = np.unique(cce_layers) val_dict = {} @@ -346,10 +325,11 @@ def add_atomic_edges_in_chunks( time_dict["writing"].append(time.time() - time_start_1) -def _get_chunk_nodes_and_edges(chunk_edges: dict, isolated_node_ids: Sequence[np.uint64]): +def _get_chunk_nodes_and_edges( + chunk_edges: dict, isolated_node_ids: Sequence[np.uint64] +): """get all nodes and edges in the chunk""" - isolated_nodes_self_edges = np.vstack( - [isolated_node_ids, isolated_node_ids]).T + isolated_nodes_self_edges = np.vstack([isolated_node_ids, isolated_node_ids]).T node_ids = [isolated_node_ids] edge_ids = [isolated_nodes_self_edges] for edge_type in EDGE_TYPES: From b1d864d5a9d00572f2a2456effba6bcc9bec93e9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 20:16:08 -0400 Subject: [PATCH 0088/1097] helper function to get timestamp --- pychunkedgraph/backend/chunkedgraph_init.py | 30 +++++++++------------ 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index f0c63ff65..538bd75c1 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -21,8 +21,6 @@ from .flatgraph_utils import build_gt_graph, connected_components from .utils import serializers, column_keys -UTC = pytz.UTC - def add_atomic_edges_in_chunks( cg_instance, @@ -47,7 +45,7 @@ def add_atomic_edges_in_chunks( ids of nodes that have no edge in the chunked graph :param verbose: bool :param time_stamp: datetime - """ + """ time_start = time.time() @@ -58,30 +56,17 @@ def add_atomic_edges_in_chunks( if not chunk_node_ids: return 0 - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) - node_chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) u_node_chunk_ids = np.unique(node_chunk_ids) assert len(u_node_chunk_ids) == 1 graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) - ccs = connected_components(graph) if verbose: cg_instance.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - # Add rows for nodes that are in this chunk - # a connected component at a time node_c = 0 # Just a counter for the log / speed measurement - n_ccs = len(ccs) parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) @@ -104,8 +89,8 @@ def add_atomic_edges_in_chunks( time_dict["sparse_indices"].append(time.time() - time_start_1) + time_stamp = _get_valid_timestamp(time_stamp) rows = [] - for i_cc, cc in enumerate(ccs): node_ids = unique_ids[cc] @@ -342,3 +327,14 @@ def _get_chunk_nodes_and_edges( chunk_edge_ids = np.unique(np.concatenate(edge_ids)) return (chunk_node_ids, chunk_edge_ids) + + +def _get_valid_timestamp(timestamp): + if timestamp is None: + timestamp = datetime.datetime.utcnow() + + if timestamp.tzinfo is None: + timestamp = pytz.UTC.localize(timestamp) + + # Comply to resolution of BigTables TimeRange + return get_google_compatible_time_stamp(timestamp, round_up=False) From ae9daef6cd4b06971a51d36eb232acf9373f52fb Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 20:28:04 -0400 Subject: [PATCH 0089/1097] use f string --- pychunkedgraph/backend/chunkedgraph_init.py | 24 ++++++++++----------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 538bd75c1..3a2924fac 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -64,7 +64,7 @@ def add_atomic_edges_in_chunks( ccs = connected_components(graph) if verbose: - cg_instance.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) + cg_instance.logger.debug(f"CC in chunk: {(time.time() - time_start):.3f}s") node_c = 0 # Just a counter for the log / speed measurement n_ccs = len(ccs) @@ -72,10 +72,8 @@ def add_atomic_edges_in_chunks( parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - time_start = time.time() time_dict = collections.defaultdict(list) - - time_start_1 = time.time() + time_start = time.time() sparse_indices = {} remapping = {} for k in edge_id_dict.keys(): @@ -87,7 +85,7 @@ def add_atomic_edges_in_chunks( sparse_indices[k] = compute_indices_pandas(remapped_arr) remapping[k] = dict(zip(u_ids, mapped_ids)) - time_dict["sparse_indices"].append(time.time() - time_start_1) + time_dict["sparse_indices"].append(time.time() - time_start) time_stamp = _get_valid_timestamp(time_stamp) rows = [] @@ -262,7 +260,7 @@ def add_atomic_edges_in_chunks( node_c += 1 time_dict["creating_lv1_row"].append(time.time() - time_start_2) - time_start_1 = time.time() + time_start = time.time() # Create parent node rows.append( cg_instance.mutate_row( @@ -272,8 +270,8 @@ def add_atomic_edges_in_chunks( ) ) - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() + time_dict["creating_lv2_row"].append(time.time() - time_start) + time_start = time.time() cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) u_cce_layers = np.unique(cce_layers) @@ -297,17 +295,17 @@ def add_atomic_edges_in_chunks( ) node_c += 1 - time_dict["adding_cross_edges"].append(time.time() - time_start_1) + time_dict["adding_cross_edges"].append(time.time() - time_start) if len(rows) > 100000: - time_start_1 = time.time() + time_start = time.time() cg_instance.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) + time_dict["writing"].append(time.time() - time_start) if rows: - time_start_1 = time.time() + time_start = time.time() cg_instance.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) + time_dict["writing"].append(time.time() - time_start) def _get_chunk_nodes_and_edges( From 88d15837aa7104e0e303e3f99b60e6ccbca58b86 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 5 Aug 2019 21:01:10 -0400 Subject: [PATCH 0090/1097] reorder some stuff --- pychunkedgraph/backend/chunkedgraph_init.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 3a2924fac..6947325a3 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -67,10 +67,6 @@ def add_atomic_edges_in_chunks( cg_instance.logger.debug(f"CC in chunk: {(time.time() - time_start):.3f}s") node_c = 0 # Just a counter for the log / speed measurement - n_ccs = len(ccs) - - parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) - parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=n_ccs) time_dict = collections.defaultdict(list) time_start = time.time() @@ -87,6 +83,9 @@ def add_atomic_edges_in_chunks( time_dict["sparse_indices"].append(time.time() - time_start) + parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) + parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) + time_stamp = _get_valid_timestamp(time_stamp) rows = [] for i_cc, cc in enumerate(ccs): From ea0a9304036aac580727483f20d89056d8ab596b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 6 Aug 2019 10:02:46 -0400 Subject: [PATCH 0091/1097] change parameter name, more formatting --- pychunkedgraph/backend/chunkedgraph_init.py | 18 +++++------------- pychunkedgraph/ingest/ran_ingestion.py | 2 +- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 6947325a3..570327bd9 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -26,7 +26,7 @@ def add_atomic_edges_in_chunks( cg_instance, chunk_coord, chunk_edges: dict, - isolated_node_ids: Sequence[np.uint64], + isolated: Sequence[np.uint64], verbose: bool = True, time_stamp: Optional[datetime.datetime] = None, ): @@ -48,11 +48,7 @@ def add_atomic_edges_in_chunks( """ time_start = time.time() - - chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges( - chunk_edges, isolated_node_ids - ) - + chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges, isolated) if not chunk_node_ids: return 0 @@ -62,12 +58,10 @@ def add_atomic_edges_in_chunks( graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) ccs = connected_components(graph) - if verbose: cg_instance.logger.debug(f"CC in chunk: {(time.time() - time_start):.3f}s") node_c = 0 # Just a counter for the log / speed measurement - time_dict = collections.defaultdict(list) time_start = time.time() sparse_indices = {} @@ -307,12 +301,10 @@ def add_atomic_edges_in_chunks( time_dict["writing"].append(time.time() - time_start) -def _get_chunk_nodes_and_edges( - chunk_edges: dict, isolated_node_ids: Sequence[np.uint64] -): +def _get_chunk_nodes_and_edges(chunk_edges: dict, isolated_ids: Sequence[np.uint64]): """get all nodes and edges in the chunk""" - isolated_nodes_self_edges = np.vstack([isolated_node_ids, isolated_node_ids]).T - node_ids = [isolated_node_ids] + isolated_nodes_self_edges = np.vstack([isolated_ids, isolated_ids]).T + node_ids = [isolated_ids] edge_ids = [isolated_nodes_self_edges] for edge_type in EDGE_TYPES: edges = chunk_edges[edge_type] diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index f3f3daa3a..3a135a64d 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -252,7 +252,7 @@ def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY supervoxel_ids1, supervoxel_ids2, affinities, areas ) - add_atomic_edges_in_chunks(imanager.cg, chunk_coord, chunk_edges, isolated_node_ids=isolated_ids) + add_atomic_edges_in_chunks(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) # to track workers completion return chunk_coord From c46e001a997765593f8125e029e58f60dbc3a78e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 6 Aug 2019 11:21:45 -0400 Subject: [PATCH 0092/1097] move new format code to new module, revert old ran_ingestion --- pychunkedgraph/backend/chunkedgraph_init.py | 19 +- pychunkedgraph/ingest/ran_ingestion.py | 61 ++- pychunkedgraph/ingest/ran_ingestion_v2.py | 577 ++++++++++++++++++++ 3 files changed, 619 insertions(+), 38 deletions(-) create mode 100644 pychunkedgraph/ingest/ran_ingestion_v2.py diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 570327bd9..51366d98f 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -22,7 +22,7 @@ from .utils import serializers, column_keys -def add_atomic_edges_in_chunks( +def add_atomic_edges( cg_instance, chunk_coord, chunk_edges: dict, @@ -52,8 +52,8 @@ def add_atomic_edges_in_chunks( if not chunk_node_ids: return 0 - node_chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) - u_node_chunk_ids = np.unique(node_chunk_ids) + chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) + u_node_chunk_ids = np.unique(chunk_ids) assert len(u_node_chunk_ids) == 1 graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) @@ -63,19 +63,6 @@ def add_atomic_edges_in_chunks( node_c = 0 # Just a counter for the log / speed measurement time_dict = collections.defaultdict(list) - time_start = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start) parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py index 3a135a64d..7a742d1bf 100644 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ b/pychunkedgraph/ingest/ran_ingestion.py @@ -11,9 +11,6 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu -from ..backend.chunkedgraph_init import add_atomic_edges_in_chunks -from ..edges.definitions import TYPES as EDGE_TYPES, Edges -from ..backend.utils import basetypes def ingest_into_chunkedgraph(storage_path, ws_cv_path, cg_table_id, @@ -224,38 +221,58 @@ def _create_atomic_chunk(args): (time.time() - time_start)) -def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY): +def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): """ Creates single atomic chunk - :param imanager: IngestionManager + :param im: IngestionManager :param chunk_coord: np.ndarray array of three ints :param aff_dtype: np.dtype np.float64 or np.float32 + :param verbose: bool :return: """ chunk_coord = np.array(list(chunk_coord), dtype=np.int) - edge_dict = collect_edge_data(imanager, chunk_coord, aff_dtype=aff_dtype) - mapping = collect_agglomeration_data(imanager, chunk_coord) - _, isolated_ids = define_active_edges(edge_dict, mapping) + edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype) + mapping = collect_agglomeration_data(im, chunk_coord) + active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping) + + edge_ids = {} + edge_affs = {} + edge_areas = {} + + for k in edge_dict.keys(): + if k == "cross": + edge_ids[k] = np.concatenate([edge_dict[k]["sv1"][:, None], + edge_dict[k]["sv2"][:, None]], + axis=1) + continue - chunk_edges = {} - for edge_type in EDGE_TYPES: - supervoxel_ids1 = edge_dict[edge_type]["sv1"] - supervoxel_ids2 = edge_dict[edge_type]["sv2"] + sv1_conn = edge_dict[k]["sv1"][active_edge_dict[k]] + sv2_conn = edge_dict[k]["sv2"][active_edge_dict[k]] + aff_conn = edge_dict[k]["aff"][active_edge_dict[k]] + area_conn = edge_dict[k]["area"][active_edge_dict[k]] + edge_ids[f"{k}_connected"] = np.concatenate([sv1_conn[:, None], + sv2_conn[:, None]], + axis=1) + edge_affs[f"{k}_connected"] = aff_conn.astype(np.float32) + edge_areas[f"{k}_connected"] = area_conn - ones = np.ones(len(supervoxel_ids1)) - affinities = edge_dict[edge_type].get("aff", float("inf") * ones) - areas = edge_dict[edge_type].get("area", ones) - chunk_edges[edge_type] = Edges( - supervoxel_ids1, supervoxel_ids2, affinities, areas - ) + sv1_disconn = edge_dict[k]["sv1"][~active_edge_dict[k]] + sv2_disconn = edge_dict[k]["sv2"][~active_edge_dict[k]] + aff_disconn = edge_dict[k]["aff"][~active_edge_dict[k]] + area_disconn = edge_dict[k]["area"][~active_edge_dict[k]] + edge_ids[f"{k}_disconnected"] = np.concatenate([sv1_disconn[:, None], + sv2_disconn[:, None]], + axis=1) + edge_affs[f"{k}_disconnected"] = aff_disconn.astype(np.float32) + edge_areas[f"{k}_disconnected"] = area_disconn - add_atomic_edges_in_chunks(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) + im.cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas, + isolated_node_ids=isolated_ids) - # to track workers completion - return chunk_coord + return edge_ids, edge_affs, edge_areas def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): @@ -535,5 +552,5 @@ def _mapping_default(key): if k == "in": isolated.append(edge_dict[k]["sv2"][agg_2_m]) - return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) + return active, np.unique(np.concatenate(isolated).astype(np.uint64)) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py new file mode 100644 index 000000000..e7436adea --- /dev/null +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -0,0 +1,577 @@ +""" +Module for ingesting in chunkedgraph format with edges stored outside bigtable +""" + +import collections +import time +import random + +import pandas as pd +import cloudvolume +import networkx as nx +import numpy as np +import numpy.lib.recfunctions as rfn +import zstandard as zstd +from multiwrapper import multiprocessing_utils as mu + +from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu +from ..backend.chunkedgraph_init import add_atomic_edges +from ..edges.definitions import TYPES as EDGE_TYPES, Edges +from ..backend.utils import basetypes + + +def ingest_into_chunkedgraph( + storage_path, + ws_cv_path, + cg_table_id, + chunk_size=[256, 256, 512], + use_skip_connections=True, + s_bits_atomic_layer=None, + fan_out=2, + aff_dtype=np.float32, + size=None, + instance_id=None, + project_id=None, + start_layer=1, + n_threads=[64, 64], +): + """ Creates a chunkedgraph from a Ran Agglomerattion + + :param storage_path: str + Google cloud bucket path (agglomeration) + example: gs://ranl-scratch/minnie_test_2 + :param ws_cv_path: str + Google cloud bucket path (watershed segmentation) + example: gs://microns-seunglab/minnie_v0/minnie10/ws_minnie_test_2/agg + :param cg_table_id: str + chunkedgraph table name + :param fan_out: int + fan out of chunked graph (2 == Octree) + :param aff_dtype: np.dtype + affinity datatype (np.float32 or np.float64) + :param instance_id: str + Google instance id + :param project_id: str + Google project id + :param start_layer: int + :param n_threads: list of ints + number of threads to use + :return: + """ + storage_path = storage_path.strip("/") + ws_cv_path = ws_cv_path.strip("/") + + cg_mesh_dir = f"{cg_table_id}_meshes" + chunk_size = np.array(chunk_size, dtype=np.uint64) + + _, n_layers_agg = iu.initialize_chunkedgraph( + cg_table_id=cg_table_id, + ws_cv_path=ws_cv_path, + chunk_size=chunk_size, + size=size, + use_skip_connections=use_skip_connections, + s_bits_atomic_layer=s_bits_atomic_layer, + cg_mesh_dir=cg_mesh_dir, + fan_out=fan_out, + instance_id=instance_id, + project_id=project_id, + ) + + im = ingestionmanager.IngestionManager( + storage_path=storage_path, + cg_table_id=cg_table_id, + n_layers=n_layers_agg, + instance_id=instance_id, + project_id=project_id, + ) + + # #TODO: Remove later: + # logging.basicConfig(level=logging.DEBUG) + # im.cg.logger = logging.getLogger(__name__) + # ------------------------------------------ + if start_layer < 3: + create_atomic_chunks(im, aff_dtype=aff_dtype, n_threads=n_threads[0]) + + create_abstract_layers(im, n_threads=n_threads[1], start_layer=start_layer) + + return im + + +def create_abstract_layers(im, start_layer=3, n_threads=1): + """ Creates abstract of chunkedgraph (> 2) + + :param im: IngestionManager + :param n_threads: int + number of threads to use + :return: + """ + if start_layer < 3: + start_layer = 3 + + assert start_layer < int(im.cg.n_layers + 1) + + for layer_id in range(start_layer, int(im.cg.n_layers + 1)): + create_layer(im, layer_id, n_threads=n_threads) + + +def create_layer(im, layer_id, block_size=100, n_threads=1): + """ Creates abstract layer of chunkedgraph + + Abstract layers have to be build in sequence. Abstract layers are all layers + above the first layer (1). `create_atomic_chunks` creates layer 2 as well. + Hence, this function is responsible for every creating layers > 2. + + :param im: IngestionManager + :param layer_id: int + > 2 + :param n_threads: int + number of threads to use + :return: + """ + assert layer_id > 2 + + child_chunk_coords = im.chunk_coords // im.cg.fan_out ** (layer_id - 3) + child_chunk_coords = child_chunk_coords.astype(np.int) + child_chunk_coords = np.unique(child_chunk_coords, axis=0) + + parent_chunk_coords = child_chunk_coords // im.cg.fan_out + parent_chunk_coords = parent_chunk_coords.astype(np.int) + parent_chunk_coords, inds = np.unique( + parent_chunk_coords, axis=0, return_inverse=True + ) + + im_info = im.get_serialized_info() + multi_args = [] + + # Randomize chunks + order = np.arange(len(parent_chunk_coords), dtype=np.int) + np.random.shuffle(order) + + # Block chunks + block_size = min(block_size, int(np.ceil(len(order) / n_threads / 3))) + n_blocks = int(len(order) / block_size) + blocks = np.array_split(order, n_blocks) + + for i_block, block in enumerate(blocks): + chunks = [] + for idx in block: + chunks.append(child_chunk_coords[inds == idx]) + + multi_args.append([im_info, layer_id, len(order), n_blocks, i_block, chunks]) + + if n_threads == 1: + mu.multiprocess_func( + _create_layers, + multi_args, + n_threads=n_threads, + verbose=True, + debug=n_threads == 1, + ) + else: + mu.multisubprocess_func( + _create_layers, multi_args, n_threads=n_threads, suffix=f"{layer_id}" + ) + + +def _create_layers(args): + """ Multiprocessing helper for create_layer """ + im_info, layer_id, n_chunks, n_blocks, i_block, chunks = args + im = ingestionmanager.IngestionManager(**im_info) + + for i_chunk, child_chunk_coords in enumerate(chunks): + time_start = time.time() + + im.cg.add_layer(layer_id, child_chunk_coords, n_threads=8, verbose=True) + + print( + f"Layer {layer_id} - Job {i_block + 1} / {n_blocks} - " + f"{i_chunk + 1} / {len(chunks)} -- %.3fs" % (time.time() - time_start) + ) + + +def create_atomic_chunks(im, aff_dtype=np.float32, n_threads=1, block_size=100): + """ Creates all atomic chunks + + :param im: IngestionManager + :param aff_dtype: np.dtype + affinity datatype (np.float32 or np.float64) + :param n_threads: int + number of threads to use + :return: + """ + + im_info = im.get_serialized_info() + + multi_args = [] + + # Randomize chunk order + chunk_coords = list(im.chunk_coord_gen) + order = np.arange(len(chunk_coords), dtype=np.int) + np.random.shuffle(order) + + # Block chunks + block_size = min(block_size, int(np.ceil(len(chunk_coords) / n_threads / 3))) + n_blocks = int(len(chunk_coords) / block_size) + blocks = np.array_split(order, n_blocks) + + for i_block, block in enumerate(blocks): + chunks = [] + for b_idx in block: + chunks.append(chunk_coords[b_idx]) + + multi_args.append([im_info, aff_dtype, n_blocks, i_block, chunks]) + + if n_threads == 1: + mu.multiprocess_func( + _create_atomic_chunk, + multi_args, + n_threads=n_threads, + verbose=True, + debug=n_threads == 1, + ) + else: + mu.multisubprocess_func(_create_atomic_chunk, multi_args, n_threads=n_threads) + + +def _create_atomic_chunk(args): + """ Multiprocessing helper for create_atomic_chunks """ + im_info, aff_dtype, n_blocks, i_block, chunks = args + im = ingestionmanager.IngestionManager(**im_info) + + for i_chunk, chunk_coord in enumerate(chunks): + time_start = time.time() + + create_atomic_chunk(im, chunk_coord, aff_dtype=aff_dtype, verbose=True) + + print( + f"Layer 1 - {chunk_coord} - Job {i_block + 1} / {n_blocks} - " + f"{i_chunk + 1} / {len(chunks)} -- %.3fs" % (time.time() - time_start) + ) + + +def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY): + """ Creates single atomic chunk + + :param imanager: IngestionManager + :param chunk_coord: np.ndarray + array of three ints + :param aff_dtype: np.dtype + np.float64 or np.float32 + :return: + """ + chunk_coord = np.array(list(chunk_coord), dtype=np.int) + + edge_dict = collect_edge_data(imanager, chunk_coord, aff_dtype=aff_dtype) + mapping = collect_agglomeration_data(imanager, chunk_coord) + _, isolated_ids = define_active_edges(edge_dict, mapping) + + chunk_edges = {} + for edge_type in EDGE_TYPES: + sv_ids1 = edge_dict[edge_type]["sv1"] + sv_ids2 = edge_dict[edge_type]["sv2"] + + ones = np.ones(len(supervoxel_ids1)) + affinities = edge_dict[edge_type].get("aff", float("inf") * ones) + areas = edge_dict[edge_type].get("area", ones) + chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + + add_atomic_edges(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) + # to track workers completion + return chunk_coord + + +def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): + """ Computes chunk coordinates that compute data between the named chunks + + :param im: IngestionManagaer + :param chunk_coord_a: np.ndarray + array of three ints + :param chunk_coord_b: np.ndarray + array of three ints + :return: np.ndarray + """ + + diff = chunk_coord_a - chunk_coord_b + + dir_dim = np.where(diff != 0)[0] + assert len(dir_dim) == 1 + dir_dim = dir_dim[0] + + if diff[dir_dim] > 0: + chunk_coord_l = chunk_coord_a + else: + chunk_coord_l = chunk_coord_b + + c_chunk_coords = [] + for dx in [-1, 0]: + for dy in [-1, 0]: + for dz in [-1, 0]: + if dz == dy == dx == 0: + continue + + c_chunk_coord = chunk_coord_l + np.array([dx, dy, dz]) + + if [dx, dy, dz][dir_dim] == 0: + continue + + if im.is_out_of_bounce(c_chunk_coord): + continue + + c_chunk_coords.append(c_chunk_coord) + + return c_chunk_coords + + +def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): + """ Loads edge for single chunk + + :param im: IngestionManager + :param chunk_coord: np.ndarray + array of three ints + :param aff_dtype: np.dtype + :return: dict of np.ndarrays + """ + subfolder = "chunked_rg" + + base_path = f"{im.storage_path}/{subfolder}/" + + chunk_coord = np.array(chunk_coord) + + chunk_id = im.cg.get_chunk_id( + layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] + ) + + filenames = collections.defaultdict(list) + swap = collections.defaultdict(list) + for x in [chunk_coord[0] - 1, chunk_coord[0]]: + for y in [chunk_coord[1] - 1, chunk_coord[1]]: + for z in [chunk_coord[2] - 1, chunk_coord[2]]: + + if im.is_out_of_bounce(np.array([x, y, z])): + continue + + # EDGES WITHIN CHUNKS + filename = f"in_chunk_0_{x}_{y}_{z}_{chunk_id}.data" + filenames["in"].append(filename) + + for d in [-1, 1]: + for dim in range(3): + diff = np.zeros([3], dtype=np.int) + diff[dim] = d + + adjacent_chunk_coord = chunk_coord + diff + adjacent_chunk_id = im.cg.get_chunk_id( + layer=1, + x=adjacent_chunk_coord[0], + y=adjacent_chunk_coord[1], + z=adjacent_chunk_coord[2], + ) + + if im.is_out_of_bounce(adjacent_chunk_coord): + continue + + c_chunk_coords = _get_cont_chunk_coords( + im, chunk_coord, adjacent_chunk_coord + ) + + larger_id = np.max([chunk_id, adjacent_chunk_id]) + smaller_id = np.min([chunk_id, adjacent_chunk_id]) + chunk_id_string = f"{smaller_id}_{larger_id}" + + for c_chunk_coord in c_chunk_coords: + x, y, z = c_chunk_coord + + # EDGES BETWEEN CHUNKS + filename = f"between_chunks_0_{x}_{y}_{z}_{chunk_id_string}.data" + filenames["between"].append(filename) + + swap[filename] = larger_id == chunk_id + + # EDGES FROM CUTS OF SVS + filename = f"fake_0_{x}_{y}_{z}_{chunk_id_string}.data" + filenames["cross"].append(filename) + + swap[filename] = larger_id == chunk_id + + edge_data = {} + read_counter = collections.Counter() + + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff", aff_dtype), + ("area", np.uint64), + ] + for k in filenames: + # print(k, len(filenames[k])) + + with cloudvolume.Storage(base_path, n_threads=10) as stor: + files = stor.get_files(filenames[k]) + + data = [] + for file in files: + if file["content"] is None: + # print(f"{file['filename']} not created or empty") + continue + + if file["error"] is not None: + # print(f"error reading {file['filename']}") + continue + + if swap[file["filename"]]: + this_dtype = [dtype[1], dtype[0], dtype[2], dtype[3]] + content = np.frombuffer(file["content"], dtype=this_dtype) + else: + content = np.frombuffer(file["content"], dtype=dtype) + + data.append(content) + + read_counter[k] += 1 + + try: + edge_data[k] = rfn.stack_arrays(data, usemask=False) + except: + raise () + + edge_data_df = pd.DataFrame(edge_data[k]) + edge_data_dfg = ( + edge_data_df.groupby(["sv1", "sv2"]).aggregate(np.sum).reset_index() + ) + edge_data[k] = edge_data_dfg.to_records() + + # # TEST + # with cloudvolume.Storage(base_path, n_threads=10) as stor: + # files = list(stor.list_files()) + # + # true_counter = collections.Counter() + # for file in files: + # if str(chunk_id) in file: + # true_counter[file.split("_")[0]] += 1 + # + # print("Truth", true_counter) + # print("Reality", read_counter) + + return edge_data + + +def _read_agg_files(filenames, base_path): + with cloudvolume.Storage(base_path, n_threads=10) as stor: + files = stor.get_files(filenames) + + edge_list = [] + for file in files: + if file["content"] is None: + continue + + if file["error"] is not None: + continue + + content = zstd.ZstdDecompressor().decompressobj().decompress(file["content"]) + edge_list.append(np.frombuffer(content, dtype=np.uint64).reshape(-1, 2)) + + return edge_list + + +def collect_agglomeration_data(im, chunk_coord): + """ Collects agglomeration information & builds connected component mapping + + :param im: IngestionManager + :param chunk_coord: np.ndarray + array of three ints + :return: dictionary + """ + subfolder = "remap" + base_path = f"{im.storage_path}/{subfolder}/" + + chunk_coord = np.array(chunk_coord) + + chunk_id = im.cg.get_chunk_id( + layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] + ) + + filenames = [] + for mip_level in range(0, int(im.n_layers - 1)): + x, y, z = np.array(chunk_coord / 2 ** mip_level, dtype=np.int) + filenames.append(f"done_{mip_level}_{x}_{y}_{z}_{chunk_id}.data.zst") + + for d in [-1, 1]: + for dim in range(3): + diff = np.zeros([3], dtype=np.int) + diff[dim] = d + + adjacent_chunk_coord = chunk_coord + diff + + adjacent_chunk_id = im.cg.get_chunk_id( + layer=1, + x=adjacent_chunk_coord[0], + y=adjacent_chunk_coord[1], + z=adjacent_chunk_coord[2], + ) + + for mip_level in range(0, int(im.n_layers - 1)): + x, y, z = np.array(adjacent_chunk_coord / 2 ** mip_level, dtype=np.int) + filenames.append( + f"done_{mip_level}_{x}_{y}_{z}_{adjacent_chunk_id}.data.zst" + ) + + # print(filenames) + edge_list = _read_agg_files(filenames, base_path) + + edges = np.concatenate(edge_list) + + G = nx.Graph() + G.add_edges_from(edges) + ccs = nx.connected_components(G) + + mapping = {} + for i_cc, cc in enumerate(ccs): + cc = list(cc) + mapping.update(dict(zip(cc, [i_cc] * len(cc)))) + + return mapping + + +def define_active_edges(edge_dict, mapping): + """ Labels edges as within or across segments and extracts isolated ids + + :param edge_dict: dict of np.ndarrays + :param mapping: dict + :return: dict of np.ndarrays, np.ndarray + bool arrays; True: connected (within same segment) + isolated node ids + """ + + def _mapping_default(key): + if key in mapping: + return mapping[key] + else: + return -1 + + mapping_vec = np.vectorize(_mapping_default) + + active = {} + isolated = [[]] + for k in edge_dict: + if len(edge_dict[k]["sv1"]) > 0: + agg_id_1 = mapping_vec(edge_dict[k]["sv1"]) + else: + assert len(edge_dict[k]["sv2"]) == 0 + active[k] = np.array([], dtype=np.bool) + continue + + agg_id_2 = mapping_vec(edge_dict[k]["sv2"]) + + active[k] = agg_id_1 == agg_id_2 + + # Set those with two -1 to False + agg_1_m = agg_id_1 == -1 + agg_2_m = agg_id_2 == -1 + active[k][agg_1_m] = False + + isolated.append(edge_dict[k]["sv1"][agg_1_m]) + + if k == "in": + isolated.append(edge_dict[k]["sv2"][agg_2_m]) + + return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) + From 00613b0dc4d516782fe36b5fe5a89affd1d6f287 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 6 Aug 2019 11:46:15 -0400 Subject: [PATCH 0093/1097] wip remove stuff not used in new format --- pychunkedgraph/backend/chunkedgraph_init.py | 136 +------------------- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 2 files changed, 4 insertions(+), 134 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 51366d98f..dfd1191dd 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -47,23 +47,17 @@ def add_atomic_edges( :param time_stamp: datetime """ - time_start = time.time() chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges, isolated) if not chunk_node_ids: return 0 chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) - u_node_chunk_ids = np.unique(chunk_ids) - assert len(u_node_chunk_ids) == 1 + assert len(np.unique(chunk_ids)) == 1 graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) ccs = connected_components(graph) - if verbose: - cg_instance.logger.debug(f"CC in chunk: {(time.time() - time_start):.3f}s") node_c = 0 # Just a counter for the log / speed measurement - time_dict = collections.defaultdict(list) - parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) @@ -71,59 +65,10 @@ def add_atomic_edges( rows = [] for i_cc, cc in enumerate(ccs): node_ids = unique_ids[cc] - - u_chunk_ids = np.unique([cg_instance.get_chunk_id(n) for n in node_ids]) - - if len(u_chunk_ids) > 1: - cg_instance.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") - raise Exception() - - # Create parent id parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - # Add rows for nodes that are in this chunk for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected - time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][ - remapping["in_connected"][node_id] - ] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][ - remapping["in_disconnected"][node_id] - ] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][ - row_ids, inv_column_ids - ] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) # out chunk + connected if node_id in remapping["between_connected"]: @@ -134,8 +79,6 @@ def add_atomic_edges( row_ids = row_ids[column_ids == 0] column_ids = column_ids[column_ids == 0] inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() connected_ids = np.concatenate( [ @@ -154,40 +97,6 @@ def add_atomic_edges( [parent_cross_edges, edge_id_dict["between_connected"][row_ids]] ) - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][ - remapping["between_disconnected"][node_id] - ] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate( - [ - disconnected_ids, - edge_id_dict["between_disconnected"][row_ids, inv_column_ids], - ] - ) - disconnected_affs = np.concatenate( - [disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]] - ) - disconnected_areas = np.concatenate( - [ - disconnected_areas, - edge_area_dict["between_disconnected"][row_ids], - ] - ) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - # cross if node_id in remapping["cross"]: row_ids, column_ids = sparse_indices["cross"][ @@ -195,40 +104,12 @@ def add_atomic_edges( ] row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate( - [connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]] - ) - connected_affs = np.concatenate( - [connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)] - ) - connected_areas = np.concatenate( - [connected_areas, np.ones((len(row_ids)), dtype=np.uint64)] - ) parent_cross_edges = np.concatenate( [parent_cross_edges, edge_id_dict["cross"][row_ids]] ) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) - - val_dict = { - column_keys.Connectivity.Partner: partners, - column_keys.Connectivity.Affinity: affinities, - column_keys.Connectivity.Area: areas, - column_keys.Connectivity.Connected: connected, - column_keys.Hierarchy.Parent: parent_id, - } + + val_dict = {column_keys.Hierarchy.Parent: parent_id} rows.append( cg_instance.mutate_row( @@ -238,9 +119,7 @@ def add_atomic_edges( ) ) node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - time_start = time.time() # Create parent node rows.append( cg_instance.mutate_row( @@ -250,9 +129,6 @@ def add_atomic_edges( ) ) - time_dict["creating_lv2_row"].append(time.time() - time_start) - time_start = time.time() - cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) u_cce_layers = np.unique(cce_layers) @@ -275,17 +151,11 @@ def add_atomic_edges( ) node_c += 1 - time_dict["adding_cross_edges"].append(time.time() - time_start) - if len(rows) > 100000: - time_start = time.time() cg_instance.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start) if rows: - time_start = time.time() cg_instance.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start) def _get_chunk_nodes_and_edges(chunk_edges: dict, isolated_ids: Sequence[np.uint64]): diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index e7436adea..485d9355b 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -270,7 +270,7 @@ def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY sv_ids1 = edge_dict[edge_type]["sv1"] sv_ids2 = edge_dict[edge_type]["sv2"] - ones = np.ones(len(supervoxel_ids1)) + ones = np.ones(len(sv_ids1)) affinities = edge_dict[edge_type].get("aff", float("inf") * ones) areas = edge_dict[edge_type].get("area", ones) chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) From 0b0ba9c7940811254dc0e19e150a26712fe552b4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 6 Aug 2019 14:19:40 -0400 Subject: [PATCH 0094/1097] remove unused param, change var name pep8 --- pychunkedgraph/backend/chunkedgraph_init.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index dfd1191dd..c255c7bfc 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -27,7 +27,6 @@ def add_atomic_edges( chunk_coord, chunk_edges: dict, isolated: Sequence[np.uint64], - verbose: bool = True, time_stamp: Optional[datetime.datetime] = None, ): """ @@ -63,12 +62,12 @@ def add_atomic_edges( time_stamp = _get_valid_timestamp(time_stamp) rows = [] - for i_cc, cc in enumerate(ccs): - node_ids = unique_ids[cc] + for i_cc, component in enumerate(ccs): + node_ids = unique_ids[component] parent_id = parent_ids[i_cc] parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - for i_node_id, node_id in enumerate(node_ids): + for node_id in node_ids: # out chunk + connected if node_id in remapping["between_connected"]: @@ -141,7 +140,7 @@ def add_atomic_edges( column_keys.Connectivity.CrossChunkEdge[cc_layer] ] = layer_cross_edges - if val_dict > 0: + if val_dict: rows.append( cg_instance.mutate_row( serializers.serialize_uint64(parent_id), From 0a9a2f20297cf07a0d714684e490ab895cc88898 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 7 Aug 2019 11:10:40 -0400 Subject: [PATCH 0095/1097] use only in-chunk edges for connected components --- pychunkedgraph/backend/chunkedgraph_init.py | 43 ++++++++------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index c255c7bfc..dd7d568e6 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -15,8 +15,7 @@ from .utils import basetypes -from .utils.edges import Edges -from ..backend.utils.edges import TYPES as EDGE_TYPES, Edges +from ..edges.definitions import Edges, IN_CHUNK, TYPES as EDGE_TYPES from .chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp from .flatgraph_utils import build_gt_graph, connected_components from .utils import serializers, column_keys @@ -37,12 +36,8 @@ def add_atomic_edges( (cross_edge_ids) have to point out the chunk (first entry is the id within the chunk) - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool + :param chunk_edges: dict + :param isolated: list of isolated node ids :param time_stamp: datetime """ @@ -60,6 +55,15 @@ def add_atomic_edges( parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) + sparse_indices = {} + remapping = {} + for k in edge_id_dict.keys(): + u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) + sparse_indices[k] = compute_indices_pandas(remapped_arr) + remapping[k] = dict(zip(u_ids, mapped_ids)) + time_stamp = _get_valid_timestamp(time_stamp) rows = [] for i_cc, component in enumerate(ccs): @@ -76,22 +80,6 @@ def add_atomic_edges( ] row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = np.concatenate( - [ - connected_ids, - edge_id_dict["between_connected"][row_ids, inv_column_ids], - ] - ) - connected_affs = np.concatenate( - [connected_affs, edge_aff_dict["between_connected"][row_ids]] - ) - connected_areas = np.concatenate( - [connected_areas, edge_area_dict["between_connected"][row_ids]] - ) - parent_cross_edges = np.concatenate( [parent_cross_edges, edge_id_dict["between_connected"][row_ids]] ) @@ -165,11 +153,12 @@ def _get_chunk_nodes_and_edges(chunk_edges: dict, isolated_ids: Sequence[np.uint for edge_type in EDGE_TYPES: edges = chunk_edges[edge_type] node_ids.append(edges.node_ids1) - node_ids.append(edges.node_ids2) - edge_ids.append(np.vstack([edges.node_ids1, edges.node_ids2]).T) + if edge_type == IN_CHUNK: + node_ids.append(edges.node_ids2) + edge_ids.append(np.vstack([edges.node_ids1, edges.node_ids2]).T) chunk_node_ids = np.unique(np.concatenate(node_ids)) - chunk_edge_ids = np.unique(np.concatenate(edge_ids)) + chunk_edge_ids = np.concatenate(edge_ids) return (chunk_node_ids, chunk_edge_ids) From 9831932de26c5c0829c65ed66fe92269f78f1d04 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 7 Aug 2019 12:01:47 -0400 Subject: [PATCH 0096/1097] helper functions for parent cross edges and get edges as pairs --- pychunkedgraph/backend/chunkedgraph_init.py | 105 ++++++++++++-------- pychunkedgraph/edges/definitions.py | 10 ++ 2 files changed, 71 insertions(+), 44 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index dd7d568e6..1d5925744 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -14,17 +14,18 @@ from google.cloud.bigtable.column_family import MaxVersionsGCRule +from .chunkedgraph import ChunkedGraph from .utils import basetypes -from ..edges.definitions import Edges, IN_CHUNK, TYPES as EDGE_TYPES +from ..edges.definitions import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK, TYPES as EDGE_TYPES from .chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp from .flatgraph_utils import build_gt_graph, connected_components from .utils import serializers, column_keys def add_atomic_edges( - cg_instance, - chunk_coord, - chunk_edges: dict, + cg_instance: ChunkedGraph, + chunk_coord: np.ndarray, + chunk_edges_d: dict, isolated: Sequence[np.uint64], time_stamp: Optional[datetime.datetime] = None, ): @@ -36,12 +37,14 @@ def add_atomic_edges( (cross_edge_ids) have to point out the chunk (first entry is the id within the chunk) - :param chunk_edges: dict + :param cg_instance: + :param chunk_coord: [x,y,z] + :param chunk_edges_d: dict of {"edge_type": Edges} :param isolated: list of isolated node ids :param time_stamp: datetime """ - chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges, isolated) + chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges_d, isolated) if not chunk_node_ids: return 0 @@ -55,47 +58,19 @@ def add_atomic_edges( parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - + sparse_indices, remapping = _get_remapping(chunk_edges_d) time_stamp = _get_valid_timestamp(time_stamp) rows = [] for i_cc, component in enumerate(ccs): node_ids = unique_ids[component] parent_id = parent_ids[i_cc] - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + parent_cross_edges = [] for node_id in node_ids: - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][ - remapping["between_connected"][node_id] - ] - - row_ids = row_ids[column_ids == 0] - parent_cross_edges = np.concatenate( - [parent_cross_edges, edge_id_dict["between_connected"][row_ids]] - ) - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][ - remapping["cross"][node_id] - ] - - row_ids = row_ids[column_ids == 0] - - parent_cross_edges = np.concatenate( - [parent_cross_edges, edge_id_dict["cross"][row_ids]] - ) - + _edges = _get_parent_cross_edges( + node_id, chunk_edges_d, sparse_indices, remapping + ) + parent_cross_edges.append(_edges) val_dict = {column_keys.Hierarchy.Parent: parent_id} rows.append( @@ -116,6 +91,7 @@ def add_atomic_edges( ) ) + parent_cross_edges = np.concatenate(parent_cross_edges) cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) u_cce_layers = np.unique(cce_layers) @@ -145,17 +121,19 @@ def add_atomic_edges( cg_instance.bulk_write(rows) -def _get_chunk_nodes_and_edges(chunk_edges: dict, isolated_ids: Sequence[np.uint64]): - """get all nodes and edges in the chunk""" +def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[np.uint64]): + """ + returns IN_CHUNK edges and nodes_ids + """ isolated_nodes_self_edges = np.vstack([isolated_ids, isolated_ids]).T node_ids = [isolated_ids] edge_ids = [isolated_nodes_self_edges] for edge_type in EDGE_TYPES: - edges = chunk_edges[edge_type] + edges = chunk_edges_d[edge_type] node_ids.append(edges.node_ids1) if edge_type == IN_CHUNK: node_ids.append(edges.node_ids2) - edge_ids.append(np.vstack([edges.node_ids1, edges.node_ids2]).T) + edge_ids.append(edges.get_pairs()) chunk_node_ids = np.unique(np.concatenate(node_ids)) chunk_edge_ids = np.concatenate(edge_ids) @@ -163,6 +141,22 @@ def _get_chunk_nodes_and_edges(chunk_edges: dict, isolated_ids: Sequence[np.uint return (chunk_node_ids, chunk_edge_ids) +def _get_remapping(chunk_edges_d: dict): + """ + TODO add logic + """ + sparse_indices = {} + remapping = {} + for edge_type in EDGE_TYPES: + edges = chunk_edges_d[edge_type].get_pairs() + u_ids, inv_ids = np.unique(edges, return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edges.shape) + sparse_indices[edge_type] = compute_indices_pandas(remapped_arr) + remapping[edge_type] = dict(zip(u_ids, mapped_ids)) + return sparse_indices, remapping + + def _get_valid_timestamp(timestamp): if timestamp is None: timestamp = datetime.datetime.utcnow() @@ -172,3 +166,26 @@ def _get_valid_timestamp(timestamp): # Comply to resolution of BigTables TimeRange return get_google_compatible_time_stamp(timestamp, round_up=False) + + +def _get_parent_cross_edges(node_id, chunk_edges_d, sparse_indices, remapping): + """ + TODO add docs + """ + parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + if node_id in remapping[BT_CHUNK]: + row_ids, column_ids = sparse_indices[BT_CHUNK][remapping[BT_CHUNK][node_id]] + row_ids = row_ids[column_ids == 0] + participating_edges = chunk_edges_d[BT_CHUNK][row_ids] + parent_cross_edges = np.concatenate( + [parent_cross_edges, participating_edges.get_pairs()] + ) + if node_id in remapping[CX_CHUNK]: + row_ids, column_ids = sparse_indices[CX_CHUNK][remapping[CX_CHUNK][node_id]] + row_ids = row_ids[column_ids == 0] + participating_edges = chunk_edges_d[CX_CHUNK][row_ids] + parent_cross_edges = np.concatenate( + [parent_cross_edges, participating_edges.get_pairs()] + ) + return parent_cross_edges + diff --git a/pychunkedgraph/edges/definitions.py b/pychunkedgraph/edges/definitions.py index a6008fd3f..c3be7f30f 100644 --- a/pychunkedgraph/edges/definitions.py +++ b/pychunkedgraph/edges/definitions.py @@ -23,3 +23,13 @@ def __init__( self.node_ids2 = node_ids2 self.affinities = affinities self.areas = areas + self._as_pairs = None + + def get_pairs(self): + """ + return numpy array of edge pairs [[sv1, sv2] ... ] + """ + if self._as_pairs: + return self._as_pairs + self._as_pairs = np.vstack([self.node_ids1, self.node_ids2]).T + return self._as_pairs From 43fda636f27a341a7c04fefc46881d98c3938a7c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 7 Aug 2019 13:31:57 -0400 Subject: [PATCH 0097/1097] remove redundant line --- pychunkedgraph/backend/chunkedgraph_init.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 1d5925744..917f9e654 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -54,7 +54,6 @@ def add_atomic_edges( graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) ccs = connected_components(graph) - node_c = 0 # Just a counter for the log / speed measurement parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) @@ -80,7 +79,6 @@ def add_atomic_edges( time_stamp=time_stamp, ) ) - node_c += 1 # Create parent node rows.append( @@ -112,13 +110,11 @@ def add_atomic_edges( time_stamp=time_stamp, ) ) - node_c += 1 if len(rows) > 100000: cg_instance.bulk_write(rows) - if rows: - cg_instance.bulk_write(rows) + cg_instance.bulk_write(rows) def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[np.uint64]): @@ -188,4 +184,3 @@ def _get_parent_cross_edges(node_id, chunk_edges_d, sparse_indices, remapping): [parent_cross_edges, participating_edges.get_pairs()] ) return parent_cross_edges - From 5188e4eb7db1d811f63bf9ea27a6464ba313b092 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 7 Aug 2019 13:55:22 -0400 Subject: [PATCH 0098/1097] reset rows list after write --- pychunkedgraph/backend/chunkedgraph_init.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 917f9e654..dededf32b 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -113,6 +113,7 @@ def add_atomic_edges( if len(rows) > 100000: cg_instance.bulk_write(rows) + rows = [] cg_instance.bulk_write(rows) From 11c2142a42eae19b428676190dc8e4d233ac7fa6 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 7 Aug 2019 14:14:04 -0400 Subject: [PATCH 0099/1097] remove more redundant code --- pychunkedgraph/backend/chunkedgraph_init.py | 22 +++++---------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index dededf32b..7d45810f8 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -80,20 +80,11 @@ def add_atomic_edges( ) ) - # Create parent node - rows.append( - cg_instance.mutate_row( - serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp, - ) - ) - parent_cross_edges = np.concatenate(parent_cross_edges) cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) u_cce_layers = np.unique(cce_layers) - val_dict = {} + val_dict = {column_keys.Hierarchy.Child: node_ids} for cc_layer in u_cce_layers: layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] @@ -102,14 +93,11 @@ def add_atomic_edges( column_keys.Connectivity.CrossChunkEdge[cc_layer] ] = layer_cross_edges - if val_dict: - rows.append( - cg_instance.mutate_row( - serializers.serialize_uint64(parent_id), - val_dict, - time_stamp=time_stamp, - ) + rows.append( + cg_instance.mutate_row( + serializers.serialize_uint64(parent_id), val_dict, time_stamp=time_stamp ) + ) if len(rows) > 100000: cg_instance.bulk_write(rows) From 8eee4ec90650627392d3d335b171a7fe8a82ec4a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 7 Aug 2019 14:42:45 -0400 Subject: [PATCH 0100/1097] use predefined types in basetypes --- pychunkedgraph/backend/chunkedgraph_init.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 7d45810f8..278ab2d75 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -2,9 +2,7 @@ Module for stuff related to creating the initial chunkedgraph """ -import time import datetime -import collections from typing import Optional, Sequence import pytz @@ -26,7 +24,7 @@ def add_atomic_edges( cg_instance: ChunkedGraph, chunk_coord: np.ndarray, chunk_edges_d: dict, - isolated: Sequence[np.uint64], + isolated: Sequence[basetypes.NODE_ID], time_stamp: Optional[datetime.datetime] = None, ): """ @@ -106,7 +104,9 @@ def add_atomic_edges( cg_instance.bulk_write(rows) -def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[np.uint64]): +def _get_chunk_nodes_and_edges( + chunk_edges_d: dict, isolated_ids: Sequence[basetypes.NODE_ID] +): """ returns IN_CHUNK edges and nodes_ids """ @@ -128,7 +128,7 @@ def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[np.ui def _get_remapping(chunk_edges_d: dict): """ - TODO add logic + TODO add logic explanation """ sparse_indices = {} remapping = {} @@ -157,7 +157,7 @@ def _get_parent_cross_edges(node_id, chunk_edges_d, sparse_indices, remapping): """ TODO add docs """ - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) + parent_cross_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) if node_id in remapping[BT_CHUNK]: row_ids, column_ids = sparse_indices[BT_CHUNK][remapping[BT_CHUNK][node_id]] row_ids = row_ids[column_ids == 0] From 13d1513652418ffe7034a9e835512e6275758cb1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 7 Aug 2019 16:05:26 -0400 Subject: [PATCH 0101/1097] pinky changes --- pychunkedgraph/ingest/ingestion_utils.py | 26 +++++++++++++++++++ pychunkedgraph/ingest/ingestionmanager.py | 31 +++++++++++++++++++++-- 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/ingestion_utils.py b/pychunkedgraph/ingest/ingestion_utils.py index 3a3897940..fdf3c3f4d 100644 --- a/pychunkedgraph/ingest/ingestion_utils.py +++ b/pychunkedgraph/ingest/ingestion_utils.py @@ -2,6 +2,7 @@ from pychunkedgraph.backend import chunkedgraph, chunkedgraph_utils import cloudvolume +import collections def calc_n_layers(ws_cv, chunk_size, fan_out): @@ -79,3 +80,28 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, cg = chunkedgraph.ChunkedGraph(**kwargs) return cg, n_layers_agg + + +def postprocess_edge_data(im, edge_dict): + if im.data_version == 2: + return edge_dict + elif im.data_version in [3, 4]: + new_edge_dict = {} + for k in edge_dict: + areas = edge_dict[k]["area_x"] * im.cg.cv.resolution[0] + \ + edge_dict[k]["area_y"] * im.cg.cv.resolution[1] + \ + edge_dict[k]["area_z"] * im.cg.cv.resolution[2] + + affs = edge_dict[k]["aff_x"] * im.cg.cv.resolution[0] + \ + edge_dict[k]["aff_y"] * im.cg.cv.resolution[1] + \ + edge_dict[k]["aff_z"] * im.cg.cv.resolution[2] + + new_edge_dict[k] = {} + new_edge_dict[k]["sv1"] = edge_dict[k]["sv1"] + new_edge_dict[k]["sv2"] = edge_dict[k]["sv2"] + new_edge_dict[k]["area"] = areas + new_edge_dict[k]["aff"] = affs + + return new_edge_dict + else: + raise Exception(f"Unknown data_version: {data_version}") \ No newline at end of file diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 55b132960..8b5eb2e63 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -6,18 +6,44 @@ class IngestionManager(object): def __init__(self, storage_path, cg_table_id=None, n_layers=None, - instance_id=None, project_id=None): + instance_id=None, project_id=None, data_version=2): self._storage_path = storage_path self._cg_table_id = cg_table_id self._instance_id = instance_id self._project_id = project_id self._cg = None self._n_layers = n_layers + self._data_version = data_version @property def storage_path(self): return self._storage_path + @property + def data_version(self): + assert self._data_version in [2, 3, 4] + return self._data_version + + @property + def edge_dtype(self): + if self.data_version == 4: + dtype = [("sv1", np.uint64), ("sv2", np.uint64), + ("aff_x", np.float32), ("area_x", np.uint64), + ("aff_y", np.float32), ("area_y", np.uint64), + ("aff_z", np.float32), ("area_z", np.uint64)] + elif self.data_version == 3: + dtype = [("sv1", np.uint64), ("sv2", np.uint64), + ("aff_x", np.float64), ("area_x", np.uint64), + ("aff_y", np.float64), ("area_y", np.uint64), + ("aff_z", np.float64), ("area_z", np.uint64)] + elif self.data_version == 2: + dtype = [("sv1", np.uint64), ("sv2", np.uint64), + ("aff", np.float32), ("area", np.uint64)] + else: + raise Exception() + + return dtype + @property def cg(self): if self._cg is None: @@ -64,7 +90,8 @@ def get_serialized_info(self): "cg_table_id": self._cg_table_id, "n_layers": self.n_layers, "instance_id": self._instance_id, - "project_id": self._project_id} + "project_id": self._project_id, + "data_version": self.data_version} return info From ed9b0933a0d44d3d28153958ac41347935b14078 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 10:02:43 -0400 Subject: [PATCH 0102/1097] use only between and cross edges to get remapping --- pychunkedgraph/backend/chunkedgraph_init.py | 24 ++++++++------------- pychunkedgraph/ingest/ran_ingestion_v2.py | 4 ---- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 278ab2d75..be3b9bba0 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -132,7 +132,7 @@ def _get_remapping(chunk_edges_d: dict): """ sparse_indices = {} remapping = {} - for edge_type in EDGE_TYPES: + for edge_type in [BT_CHUNK, CX_CHUNK]: edges = chunk_edges_d[edge_type].get_pairs() u_ids, inv_ids = np.unique(edges, return_inverse=True) mapped_ids = np.arange(len(u_ids), dtype=np.int32) @@ -158,18 +158,12 @@ def _get_parent_cross_edges(node_id, chunk_edges_d, sparse_indices, remapping): TODO add docs """ parent_cross_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) - if node_id in remapping[BT_CHUNK]: - row_ids, column_ids = sparse_indices[BT_CHUNK][remapping[BT_CHUNK][node_id]] - row_ids = row_ids[column_ids == 0] - participating_edges = chunk_edges_d[BT_CHUNK][row_ids] - parent_cross_edges = np.concatenate( - [parent_cross_edges, participating_edges.get_pairs()] - ) - if node_id in remapping[CX_CHUNK]: - row_ids, column_ids = sparse_indices[CX_CHUNK][remapping[CX_CHUNK][node_id]] - row_ids = row_ids[column_ids == 0] - participating_edges = chunk_edges_d[CX_CHUNK][row_ids] - parent_cross_edges = np.concatenate( - [parent_cross_edges, participating_edges.get_pairs()] - ) + for edge_type in remapping: + if node_id in remapping[edge_type]: + row_ids, column_ids = sparse_indices[edge_type][remapping[edge_type][node_id]] + row_ids = row_ids[column_ids == 0] + participating_edges = chunk_edges_d[edge_type][row_ids] + parent_cross_edges = np.concatenate( + [parent_cross_edges, participating_edges.get_pairs()] + ) return parent_cross_edges diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 485d9355b..a0bea04ea 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -85,10 +85,6 @@ def ingest_into_chunkedgraph( project_id=project_id, ) - # #TODO: Remove later: - # logging.basicConfig(level=logging.DEBUG) - # im.cg.logger = logging.getLogger(__name__) - # ------------------------------------------ if start_layer < 3: create_atomic_chunks(im, aff_dtype=aff_dtype, n_threads=n_threads[0]) From 083e2795d4492cd876b9e5066a064e80d09522b4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 10:12:29 -0400 Subject: [PATCH 0103/1097] shorter variables names for better formatting --- pychunkedgraph/backend/chunkedgraph_init.py | 33 +++++++-------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index be3b9bba0..285fd36fe 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -64,19 +64,12 @@ def add_atomic_edges( parent_cross_edges = [] for node_id in node_ids: - _edges = _get_parent_cross_edges( - node_id, chunk_edges_d, sparse_indices, remapping - ) + _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) parent_cross_edges.append(_edges) val_dict = {column_keys.Hierarchy.Parent: parent_id} - rows.append( - cg_instance.mutate_row( - serializers.serialize_uint64(node_id), - val_dict, - time_stamp=time_stamp, - ) - ) + r_key = serializers.serialize_uint64(node_id) + rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) parent_cross_edges = np.concatenate(parent_cross_edges) cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) @@ -85,22 +78,16 @@ def add_atomic_edges( val_dict = {column_keys.Hierarchy.Child: node_ids} for cc_layer in u_cce_layers: layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - if layer_cross_edges: - val_dict[ - column_keys.Connectivity.CrossChunkEdge[cc_layer] - ] = layer_cross_edges + col_key = column_keys.Connectivity.CrossChunkEdge[cc_layer] + val_dict[col_key] = layer_cross_edges - rows.append( - cg_instance.mutate_row( - serializers.serialize_uint64(parent_id), val_dict, time_stamp=time_stamp - ) - ) + r_key = serializers.serialize_uint64(parent_id) + rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) if len(rows) > 100000: cg_instance.bulk_write(rows) rows = [] - cg_instance.bulk_write(rows) @@ -153,14 +140,16 @@ def _get_valid_timestamp(timestamp): return get_google_compatible_time_stamp(timestamp, round_up=False) -def _get_parent_cross_edges(node_id, chunk_edges_d, sparse_indices, remapping): +def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): """ TODO add docs """ parent_cross_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) for edge_type in remapping: if node_id in remapping[edge_type]: - row_ids, column_ids = sparse_indices[edge_type][remapping[edge_type][node_id]] + row_ids, column_ids = sparse_indices[edge_type][ + remapping[edge_type][node_id] + ] row_ids = row_ids[column_ids == 0] participating_edges = chunk_edges_d[edge_type][row_ids] parent_cross_edges = np.concatenate( From 85eb07f5f57af8d899791078b0d1221fe1f9305b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 10:15:53 -0400 Subject: [PATCH 0104/1097] add some documentation --- pychunkedgraph/backend/chunkedgraph_init.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 285fd36fe..43f4fa4c1 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -61,23 +61,23 @@ def add_atomic_edges( for i_cc, component in enumerate(ccs): node_ids = unique_ids[component] parent_id = parent_ids[i_cc] - parent_cross_edges = [] + chunk_out_edges = [] for node_id in node_ids: _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) - parent_cross_edges.append(_edges) + chunk_out_edges.append(_edges) val_dict = {column_keys.Hierarchy.Parent: parent_id} r_key = serializers.serialize_uint64(node_id) rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) - parent_cross_edges = np.concatenate(parent_cross_edges) - cce_layers = cg_instance.get_cross_chunk_edges_layer(parent_cross_edges) + chunk_out_edges = np.concatenate(chunk_out_edges) + cce_layers = cg_instance.get_cross_chunk_edges_layer(chunk_out_edges) u_cce_layers = np.unique(cce_layers) val_dict = {column_keys.Hierarchy.Child: node_ids} for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] + layer_cross_edges = chunk_out_edges[cce_layers == cc_layer] if layer_cross_edges: col_key = column_keys.Connectivity.CrossChunkEdge[cc_layer] val_dict[col_key] = layer_cross_edges @@ -143,16 +143,18 @@ def _get_valid_timestamp(timestamp): def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): """ TODO add docs + returns edges pointing outside the chunk """ - parent_cross_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) + chunk_out_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) for edge_type in remapping: if node_id in remapping[edge_type]: row_ids, column_ids = sparse_indices[edge_type][ remapping[edge_type][node_id] ] row_ids = row_ids[column_ids == 0] + # edges that this node is part of participating_edges = chunk_edges_d[edge_type][row_ids] - parent_cross_edges = np.concatenate( - [parent_cross_edges, participating_edges.get_pairs()] + chunk_out_edges = np.concatenate( + [chunk_out_edges, participating_edges.get_pairs()] ) - return parent_cross_edges + return chunk_out_edges From 08ac22d6900932e8ce25afb0544e0ae64a9b458d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 10:31:59 -0400 Subject: [PATCH 0105/1097] refactor: move componenet logic to its own function --- pychunkedgraph/backend/chunkedgraph_init.py | 70 +++++++++++++-------- 1 file changed, 45 insertions(+), 25 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 43f4fa4c1..1f7b85442 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -59,31 +59,16 @@ def add_atomic_edges( time_stamp = _get_valid_timestamp(time_stamp) rows = [] for i_cc, component in enumerate(ccs): - node_ids = unique_ids[component] - parent_id = parent_ids[i_cc] - chunk_out_edges = [] - - for node_id in node_ids: - _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) - chunk_out_edges.append(_edges) - val_dict = {column_keys.Hierarchy.Parent: parent_id} - - r_key = serializers.serialize_uint64(node_id) - rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) - - chunk_out_edges = np.concatenate(chunk_out_edges) - cce_layers = cg_instance.get_cross_chunk_edges_layer(chunk_out_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {column_keys.Hierarchy.Child: node_ids} - for cc_layer in u_cce_layers: - layer_cross_edges = chunk_out_edges[cce_layers == cc_layer] - if layer_cross_edges: - col_key = column_keys.Connectivity.CrossChunkEdge[cc_layer] - val_dict[col_key] = layer_cross_edges - - r_key = serializers.serialize_uint64(parent_id) - rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) + _rows = _process_component( + cg_instance, + chunk_edges_d, + parent_ids[i_cc], + unique_ids[component], + sparse_indices, + remapping, + time_stamp, + ) + rows.extend(_rows) if len(rows) > 100000: cg_instance.bulk_write(rows) @@ -140,6 +125,41 @@ def _get_valid_timestamp(timestamp): return get_google_compatible_time_stamp(timestamp, round_up=False) +def _process_component( + cg_instance, + chunk_edges_d, + parent_id, + node_ids, + sparse_indices, + remapping, + time_stamp, +): + rows = [] + chunk_out_edges = [] # out = between + cross + for node_id in node_ids: + _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) + chunk_out_edges.append(_edges) + val_dict = {column_keys.Hierarchy.Parent: parent_id} + + r_key = serializers.serialize_uint64(node_id) + rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) + + chunk_out_edges = np.concatenate(chunk_out_edges) + cce_layers = cg_instance.get_cross_chunk_edges_layer(chunk_out_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {column_keys.Hierarchy.Child: node_ids} + for cc_layer in u_cce_layers: + layer_out_edges = chunk_out_edges[cce_layers == cc_layer] + if layer_out_edges: + col_key = column_keys.Connectivity.CrossChunkEdge[cc_layer] + val_dict[col_key] = layer_out_edges + + r_key = serializers.serialize_uint64(parent_id) + rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) + return rows + + def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): """ TODO add docs From d574181d7b015984298a4ae7d36ea71e51d768a3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 14:59:32 -0400 Subject: [PATCH 0106/1097] command to test ingestion --- pychunkedgraph/backend/chunkedgraph_init.py | 2 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 99 +++++++-------------- 2 files changed, 31 insertions(+), 70 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index 1f7b85442..ba56098db 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -163,7 +163,7 @@ def _process_component( def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): """ TODO add docs - returns edges pointing outside the chunk + returns edges of node_id pointing outside the chunk (between and cross) """ chunk_out_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) for edge_type in remapping: diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index a0bea04ea..e4a636404 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -4,8 +4,8 @@ import collections import time -import random +import click import pandas as pd import cloudvolume import networkx as nx @@ -14,26 +14,37 @@ import zstandard as zstd from multiwrapper import multiprocessing_utils as mu +from flask import current_app +from flask.cli import AppGroup + from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu from ..backend.chunkedgraph_init import add_atomic_edges from ..edges.definitions import TYPES as EDGE_TYPES, Edges from ..backend.utils import basetypes +ingest_cli = AppGroup("ingest") + +@ingest_cli.command("atomic") +@click.argument("storage_path", type=str) +@click.argument("ws_cv_path", type=str) +@click.argument("cg_table_id", type=str) +@click.argument("bits", type=int) +@click.argument("edge_dir", type=str) def ingest_into_chunkedgraph( storage_path, ws_cv_path, cg_table_id, - chunk_size=[256, 256, 512], + chunk_size=[512, 512, 128], use_skip_connections=True, - s_bits_atomic_layer=None, + bits=None, fan_out=2, aff_dtype=np.float32, size=None, instance_id=None, project_id=None, start_layer=1, - n_threads=[64, 64], + edge_dir=None, ): """ Creates a chunkedgraph from a Ran Agglomerattion @@ -70,7 +81,7 @@ def ingest_into_chunkedgraph( chunk_size=chunk_size, size=size, use_skip_connections=use_skip_connections, - s_bits_atomic_layer=s_bits_atomic_layer, + s_bits_atomic_layer=bits, cg_mesh_dir=cg_mesh_dir, fan_out=fan_out, instance_id=instance_id, @@ -85,10 +96,9 @@ def ingest_into_chunkedgraph( project_id=project_id, ) - if start_layer < 3: - create_atomic_chunks(im, aff_dtype=aff_dtype, n_threads=n_threads[0]) - - create_abstract_layers(im, n_threads=n_threads[1], start_layer=start_layer) + # if start_layer < 3: + create_atomic_chunks(im, edge_dir) + # create_abstract_layers(im, n_threads=n_threads[1], start_layer=start_layer) return im @@ -185,76 +195,27 @@ def _create_layers(args): ) -def create_atomic_chunks(im, aff_dtype=np.float32, n_threads=1, block_size=100): - """ Creates all atomic chunks - - :param im: IngestionManager - :param aff_dtype: np.dtype - affinity datatype (np.float32 or np.float64) - :param n_threads: int - number of threads to use - :return: - """ - - im_info = im.get_serialized_info() - - multi_args = [] - - # Randomize chunk order +def create_atomic_chunks(im, edge_dir): + """ Creates all atomic chunks""" chunk_coords = list(im.chunk_coord_gen) - order = np.arange(len(chunk_coords), dtype=np.int) - np.random.shuffle(order) - - # Block chunks - block_size = min(block_size, int(np.ceil(len(chunk_coords) / n_threads / 3))) - n_blocks = int(len(chunk_coords) / block_size) - blocks = np.array_split(order, n_blocks) - - for i_block, block in enumerate(blocks): - chunks = [] - for b_idx in block: - chunks.append(chunk_coords[b_idx]) - - multi_args.append([im_info, aff_dtype, n_blocks, i_block, chunks]) + np.random.shuffle(chunk_coords) - if n_threads == 1: - mu.multiprocess_func( + for chunk_coord in chunk_coords[:5]: + current_app.test_q.enqueue( _create_atomic_chunk, - multi_args, - n_threads=n_threads, - verbose=True, - debug=n_threads == 1, + job_timeout="59m", + args=(im.get_serialized_info(), chunk_coord, edge_dir), ) - else: - mu.multisubprocess_func(_create_atomic_chunk, multi_args, n_threads=n_threads) -def _create_atomic_chunk(args): +def _create_atomic_chunk(im_info, chunk_coord, edge_dir): """ Multiprocessing helper for create_atomic_chunks """ - im_info, aff_dtype, n_blocks, i_block, chunks = args - im = ingestionmanager.IngestionManager(**im_info) - - for i_chunk, chunk_coord in enumerate(chunks): - time_start = time.time() - - create_atomic_chunk(im, chunk_coord, aff_dtype=aff_dtype, verbose=True) - - print( - f"Layer 1 - {chunk_coord} - Job {i_block + 1} / {n_blocks} - " - f"{i_chunk + 1} / {len(chunks)} -- %.3fs" % (time.time() - time_start) - ) + imanager = ingestionmanager.IngestionManager(**im_info) + create_atomic_chunk(imanager, chunk_coord) def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY): - """ Creates single atomic chunk - - :param imanager: IngestionManager - :param chunk_coord: np.ndarray - array of three ints - :param aff_dtype: np.dtype - np.float64 or np.float32 - :return: - """ + """ Creates single atomic chunk""" chunk_coord = np.array(list(chunk_coord), dtype=np.int) edge_dict = collect_edge_data(imanager, chunk_coord, aff_dtype=aff_dtype) From a1f80a37e9bb08b673c42682aaa35a8bfec45134 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 15:16:10 -0400 Subject: [PATCH 0107/1097] wip --- pychunkedgraph/ingest/ran_ingestion_v2.py | 21 +++++++++++---- pychunkedgraph/io/edge_storage.py | 33 ++++++++++------------- 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index e4a636404..973fc46a5 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -21,6 +21,7 @@ from ..backend.chunkedgraph_init import add_atomic_edges from ..edges.definitions import TYPES as EDGE_TYPES, Edges from ..backend.utils import basetypes +from ..io.edge_storage import put_chunk_edges ingest_cli = AppGroup("ingest") @@ -37,7 +38,6 @@ def ingest_into_chunkedgraph( cg_table_id, chunk_size=[512, 512, 128], use_skip_connections=True, - bits=None, fan_out=2, aff_dtype=np.float32, size=None, @@ -81,7 +81,7 @@ def ingest_into_chunkedgraph( chunk_size=chunk_size, size=size, use_skip_connections=use_skip_connections, - s_bits_atomic_layer=bits, + s_bits_atomic_layer=10, cg_mesh_dir=cg_mesh_dir, fan_out=fan_out, instance_id=instance_id, @@ -94,6 +94,7 @@ def ingest_into_chunkedgraph( n_layers=n_layers_agg, instance_id=instance_id, project_id=project_id, + data_version=4, ) # if start_layer < 3: @@ -211,14 +212,16 @@ def create_atomic_chunks(im, edge_dir): def _create_atomic_chunk(im_info, chunk_coord, edge_dir): """ Multiprocessing helper for create_atomic_chunks """ imanager = ingestionmanager.IngestionManager(**im_info) - create_atomic_chunk(imanager, chunk_coord) + create_atomic_chunk(imanager, chunk_coord, edge_dir) -def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY): +def create_atomic_chunk(imanager, chunk_coord, edge_dir): """ Creates single atomic chunk""" chunk_coord = np.array(list(chunk_coord), dtype=np.int) - edge_dict = collect_edge_data(imanager, chunk_coord, aff_dtype=aff_dtype) + edge_dict = collect_edge_data( + imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY + ) mapping = collect_agglomeration_data(imanager, chunk_coord) _, isolated_ids = define_active_edges(edge_dict, mapping) @@ -232,7 +235,15 @@ def create_atomic_chunk(imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY areas = edge_dict[edge_type].get("area", ones) chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + print(chunk_coord) + start = time.time() + put_chunk_edges(edge_dir, chunk_coord, chunk_edges, 17) + print(f"cloud storage time: {time.time() - start}") + + start = time.time() add_atomic_edges(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) + print(f"big table time: {time.time() - start}") + # to track workers completion return chunk_coord diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 35339ac7c..b2becb8f4 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -99,12 +99,12 @@ def get_chunk_edges( def put_chunk_edges( edges_dir: str, chunk_coordinates: np.ndarray, - chunk_edges_raw: dict, + chunk_edges_raw, compression_level: int, ) -> None: """ :param edges_dir: cloudvolume storage path - :type str: + :type str: :param chunk_coordinates: chunk coords x,y,z :type np.ndarray: :param chunk_edges_raw: chunk_edges_raw with keys "in", "cross", "between" @@ -115,23 +115,18 @@ def put_chunk_edges( """ def _get_edges(edge_type: str) -> Edges: - - edges_raw = chunk_edges_raw[edge_type] - - supervoxel_ids1 = edges_raw["sv1"] - supervoxel_ids2 = edges_raw["sv2"] - - ones = np.ones(len(supervoxel_ids1)) - affinities = edges_raw.get("aff", float("inf") * ones) - areas = edges_raw.get("area", ones) - - edges = Edges() - edges.node_ids1 = supervoxel_ids1.astype(basetypes.NODE_ID).tobytes() - edges.node_ids2 = supervoxel_ids2.astype(basetypes.NODE_ID).tobytes() - edges.affinities = affinities.astype(basetypes.EDGE_AFFINITY).tobytes() - edges.areas = areas.astype(basetypes.EDGE_AREA).tobytes() - - return edges + # TODO change protobuf class name + edges = chunk_edges_raw[edge_type] + + edges_proto = Edges() + edges_proto.node_ids1 = edges.node_ids1.astype(basetypes.NODE_ID).tobytes() + edges_proto.node_ids2 = edges.node_ids2.astype(basetypes.NODE_ID).tobytes() + edges_proto.affinities = edges.affinities.astype( + basetypes.EDGE_AFFINITY + ).tobytes() + edges_proto.areas = edges.areas.astype(basetypes.EDGE_AREA).tobytes() + + return edges_proto chunk_edges = ChunkEdges() chunk_edges.in_chunk.CopyFrom(_get_edges("in")) From ef4b90a6f335a599c0b5083587d67ebe65432e1e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 16:29:11 -0400 Subject: [PATCH 0108/1097] wip test run --- pychunkedgraph/app/__init__.py | 7 ++++++- pychunkedgraph/ingest/ran_ingestion_v2.py | 7 ++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/app/__init__.py b/pychunkedgraph/app/__init__.py index e5c35e48c..b7983219a 100644 --- a/pychunkedgraph/app/__init__.py +++ b/pychunkedgraph/app/__init__.py @@ -16,6 +16,9 @@ from pychunkedgraph.app import cg_app_blueprint, meshing_app_blueprint from pychunkedgraph.logging import jsonformatter # from pychunkedgraph.app import manifest_app_blueprint + +from ..ingest.ran_ingestion_v2 import init_ingest_cmds + os.environ['TRAVIS_BRANCH'] = "IDONTKNOWWHYINEEDTHIS" @@ -73,4 +76,6 @@ def configure_app(app): if app.config['USE_REDIS_JOBS']: app.redis = redis.Redis.from_url(app.config['REDIS_URL']) - app.test_q = Queue('test' ,connection=app.redis) \ No newline at end of file + app.test_q = Queue('test', connection=app.redis) + with app.app_context(): + init_ingest_cmds(app) \ No newline at end of file diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 973fc46a5..5c56144fa 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -30,7 +30,6 @@ @click.argument("storage_path", type=str) @click.argument("ws_cv_path", type=str) @click.argument("cg_table_id", type=str) -@click.argument("bits", type=int) @click.argument("edge_dir", type=str) def ingest_into_chunkedgraph( storage_path, @@ -222,6 +221,7 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): edge_dict = collect_edge_data( imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY ) + edge_dict = iu.postprocess_edge_data(imanager, edge_dict) mapping = collect_agglomeration_data(imanager, chunk_coord) _, isolated_ids = define_active_edges(edge_dict, mapping) @@ -371,8 +371,6 @@ def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): ("area", np.uint64), ] for k in filenames: - # print(k, len(filenames[k])) - with cloudvolume.Storage(base_path, n_threads=10) as stor: files = stor.get_files(filenames[k]) @@ -543,3 +541,6 @@ def _mapping_default(key): return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) + +def init_ingest_cmds(app): + app.cli.add_command(ingest_cli) \ No newline at end of file From 0af8e5fa16f485ff3773284fbb58d32b6a38aa14 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 8 Aug 2019 22:18:34 -0400 Subject: [PATCH 0109/1097] fix: use Edges object --- pychunkedgraph/backend/chunkedgraph_init.py | 23 ++++---- pychunkedgraph/edges/definitions.py | 2 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 58 +++++++-------------- 3 files changed, 32 insertions(+), 51 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/chunkedgraph_init.py index ba56098db..e009290d0 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/chunkedgraph_init.py @@ -24,7 +24,7 @@ def add_atomic_edges( cg_instance: ChunkedGraph, chunk_coord: np.ndarray, chunk_edges_d: dict, - isolated: Sequence[basetypes.NODE_ID], + isolated: Sequence[int], time_stamp: Optional[datetime.datetime] = None, ): """ @@ -43,7 +43,7 @@ def add_atomic_edges( """ chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges_d, isolated) - if not chunk_node_ids: + if not chunk_node_ids.size: return 0 chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) @@ -52,7 +52,10 @@ def add_atomic_edges( graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) ccs = connected_components(graph) - parent_chunk_id = cg_instance.get_chunk_id(layer=2, *chunk_coord) + print(chunk_coord) + parent_chunk_id = cg_instance.get_chunk_id( + layer=2, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] + ) parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) sparse_indices, remapping = _get_remapping(chunk_edges_d) @@ -76,9 +79,7 @@ def add_atomic_edges( cg_instance.bulk_write(rows) -def _get_chunk_nodes_and_edges( - chunk_edges_d: dict, isolated_ids: Sequence[basetypes.NODE_ID] -): +def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[int]): """ returns IN_CHUNK edges and nodes_ids """ @@ -151,7 +152,7 @@ def _process_component( val_dict = {column_keys.Hierarchy.Child: node_ids} for cc_layer in u_cce_layers: layer_out_edges = chunk_out_edges[cce_layers == cc_layer] - if layer_out_edges: + if layer_out_edges.size: col_key = column_keys.Connectivity.CrossChunkEdge[cc_layer] val_dict[col_key] = layer_out_edges @@ -168,13 +169,13 @@ def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): chunk_out_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) for edge_type in remapping: if node_id in remapping[edge_type]: + edges_obj = chunk_edges_d[edge_type] + edges = edges_obj.get_pairs() + row_ids, column_ids = sparse_indices[edge_type][ remapping[edge_type][node_id] ] row_ids = row_ids[column_ids == 0] # edges that this node is part of - participating_edges = chunk_edges_d[edge_type][row_ids] - chunk_out_edges = np.concatenate( - [chunk_out_edges, participating_edges.get_pairs()] - ) + chunk_out_edges = np.concatenate([chunk_out_edges, edges[row_ids]]) return chunk_out_edges diff --git a/pychunkedgraph/edges/definitions.py b/pychunkedgraph/edges/definitions.py index c3be7f30f..f6481a15e 100644 --- a/pychunkedgraph/edges/definitions.py +++ b/pychunkedgraph/edges/definitions.py @@ -29,7 +29,7 @@ def get_pairs(self): """ return numpy array of edge pairs [[sv1, sv2] ... ] """ - if self._as_pairs: + if not self._as_pairs is None: return self._as_pairs self._as_pairs = np.vstack([self.node_ids1, self.node_ids2]).T return self._as_pairs diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 5c56144fa..e5ae09933 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -219,7 +219,7 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): chunk_coord = np.array(list(chunk_coord), dtype=np.int) edge_dict = collect_edge_data( - imanager, chunk_coord, aff_dtype=basetypes.EDGE_AFFINITY + imanager, chunk_coord ) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) mapping = collect_agglomeration_data(imanager, chunk_coord) @@ -290,13 +290,14 @@ def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): return c_chunk_coords -def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): +def collect_edge_data(im, chunk_coord): """ Loads edge for single chunk :param im: IngestionManager :param chunk_coord: np.ndarray array of three ints :param aff_dtype: np.dtype + :param v3_data: bool :return: dict of np.ndarrays """ subfolder = "chunked_rg" @@ -305,9 +306,8 @@ def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): chunk_coord = np.array(chunk_coord) - chunk_id = im.cg.get_chunk_id( - layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] - ) + chunk_id = im.cg.get_chunk_id(layer=1, x=chunk_coord[0], y=chunk_coord[1], + z=chunk_coord[2]) filenames = collections.defaultdict(list) swap = collections.defaultdict(list) @@ -328,19 +328,16 @@ def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): diff[dim] = d adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = im.cg.get_chunk_id( - layer=1, - x=adjacent_chunk_coord[0], - y=adjacent_chunk_coord[1], - z=adjacent_chunk_coord[2], - ) + adjacent_chunk_id = im.cg.get_chunk_id(layer=1, + x=adjacent_chunk_coord[0], + y=adjacent_chunk_coord[1], + z=adjacent_chunk_coord[2]) if im.is_out_of_bounce(adjacent_chunk_coord): continue - c_chunk_coords = _get_cont_chunk_coords( - im, chunk_coord, adjacent_chunk_coord - ) + c_chunk_coords = _get_cont_chunk_coords(im, chunk_coord, + adjacent_chunk_coord) larger_id = np.max([chunk_id, adjacent_chunk_id]) smaller_id = np.min([chunk_id, adjacent_chunk_id]) @@ -364,13 +361,9 @@ def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): edge_data = {} read_counter = collections.Counter() - dtype = [ - ("sv1", np.uint64), - ("sv2", np.uint64), - ("aff", aff_dtype), - ("area", np.uint64), - ] for k in filenames: + # print(k, len(filenames[k])) + with cloudvolume.Storage(base_path, n_threads=10) as stor: files = stor.get_files(filenames[k]) @@ -385,10 +378,11 @@ def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): continue if swap[file["filename"]]: - this_dtype = [dtype[1], dtype[0], dtype[2], dtype[3]] + this_dtype = [im.edge_dtype[1], im.edge_dtype[0]] + \ + im.edge_dtype[2:] content = np.frombuffer(file["content"], dtype=this_dtype) else: - content = np.frombuffer(file["content"], dtype=dtype) + content = np.frombuffer(file["content"], dtype=im.edge_dtype) data.append(content) @@ -397,26 +391,12 @@ def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): try: edge_data[k] = rfn.stack_arrays(data, usemask=False) except: - raise () + raise() edge_data_df = pd.DataFrame(edge_data[k]) - edge_data_dfg = ( - edge_data_df.groupby(["sv1", "sv2"]).aggregate(np.sum).reset_index() - ) + edge_data_dfg = edge_data_df.groupby(["sv1", "sv2"]).aggregate(np.sum).reset_index() edge_data[k] = edge_data_dfg.to_records() - # # TEST - # with cloudvolume.Storage(base_path, n_threads=10) as stor: - # files = list(stor.list_files()) - # - # true_counter = collections.Counter() - # for file in files: - # if str(chunk_id) in file: - # true_counter[file.split("_")[0]] += 1 - # - # print("Truth", true_counter) - # print("Reality", read_counter) - return edge_data @@ -539,7 +519,7 @@ def _mapping_default(key): if k == "in": isolated.append(edge_dict[k]["sv2"][agg_2_m]) - return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) + return active, np.unique(np.concatenate(isolated).astype(np.uint64)) def init_ingest_cmds(app): From f93d849b51cfcc375868f0cab130484443afa5b3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 9 Aug 2019 10:00:42 -0400 Subject: [PATCH 0110/1097] rename protobuf classes to avoid conflict with python classes --- pychunkedgraph/io/protobuf/chunkEdges.proto | 10 ++-- pychunkedgraph/io/protobuf/chunkEdges_pb2.py | 60 ++++++++++---------- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/pychunkedgraph/io/protobuf/chunkEdges.proto b/pychunkedgraph/io/protobuf/chunkEdges.proto index 7ac139dcb..377e182c0 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges.proto +++ b/pychunkedgraph/io/protobuf/chunkEdges.proto @@ -2,15 +2,15 @@ syntax = "proto3"; package edges; -message Edges { +message EdgesMsg { bytes node_ids1 = 1; bytes node_ids2 = 2; bytes affinities = 3; bytes areas = 4; } -message ChunkEdges { - Edges in_chunk = 1; - Edges cross_chunk = 2; - Edges between_chunk = 3; +message ChunkEdgesMsg { + EdgesMsg in_chunk = 1; + EdgesMsg cross_chunk = 2; + EdgesMsg between_chunk = 3; } \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py index 3e7b1515f..929137e88 100644 --- a/pychunkedgraph/io/protobuf/chunkEdges_pb2.py +++ b/pychunkedgraph/io/protobuf/chunkEdges_pb2.py @@ -20,42 +20,42 @@ package='edges', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"P\n\x05\x45\x64ges\x12\x11\n\tnode_ids1\x18\x01 \x01(\x0c\x12\x11\n\tnode_ids2\x18\x02 \x01(\x0c\x12\x12\n\naffinities\x18\x03 \x01(\x0c\x12\r\n\x05\x61reas\x18\x04 \x01(\x0c\"t\n\nChunkEdges\x12\x1e\n\x08in_chunk\x18\x01 \x01(\x0b\x32\x0c.edges.Edges\x12!\n\x0b\x63ross_chunk\x18\x02 \x01(\x0b\x32\x0c.edges.Edges\x12#\n\rbetween_chunk\x18\x03 \x01(\x0b\x32\x0c.edges.Edgesb\x06proto3') + serialized_pb=_b('\n\x10\x63hunkEdges.proto\x12\x05\x65\x64ges\"S\n\x08\x45\x64gesMsg\x12\x11\n\tnode_ids1\x18\x01 \x01(\x0c\x12\x11\n\tnode_ids2\x18\x02 \x01(\x0c\x12\x12\n\naffinities\x18\x03 \x01(\x0c\x12\r\n\x05\x61reas\x18\x04 \x01(\x0c\"\x80\x01\n\rChunkEdgesMsg\x12!\n\x08in_chunk\x18\x01 \x01(\x0b\x32\x0f.edges.EdgesMsg\x12$\n\x0b\x63ross_chunk\x18\x02 \x01(\x0b\x32\x0f.edges.EdgesMsg\x12&\n\rbetween_chunk\x18\x03 \x01(\x0b\x32\x0f.edges.EdgesMsgb\x06proto3') ) -_EDGES = _descriptor.Descriptor( - name='Edges', - full_name='edges.Edges', +_EDGESMSG = _descriptor.Descriptor( + name='EdgesMsg', + full_name='edges.EdgesMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='node_ids1', full_name='edges.Edges.node_ids1', index=0, + name='node_ids1', full_name='edges.EdgesMsg.node_ids1', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='node_ids2', full_name='edges.Edges.node_ids2', index=1, + name='node_ids2', full_name='edges.EdgesMsg.node_ids2', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='affinities', full_name='edges.Edges.affinities', index=2, + name='affinities', full_name='edges.EdgesMsg.affinities', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='areas', full_name='edges.Edges.areas', index=3, + name='areas', full_name='edges.EdgesMsg.areas', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, @@ -74,33 +74,33 @@ oneofs=[ ], serialized_start=27, - serialized_end=107, + serialized_end=110, ) -_CHUNKEDGES = _descriptor.Descriptor( - name='ChunkEdges', - full_name='edges.ChunkEdges', +_CHUNKEDGESMSG = _descriptor.Descriptor( + name='ChunkEdgesMsg', + full_name='edges.ChunkEdgesMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='in_chunk', full_name='edges.ChunkEdges.in_chunk', index=0, + name='in_chunk', full_name='edges.ChunkEdgesMsg.in_chunk', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='cross_chunk', full_name='edges.ChunkEdges.cross_chunk', index=1, + name='cross_chunk', full_name='edges.ChunkEdgesMsg.cross_chunk', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='between_chunk', full_name='edges.ChunkEdges.between_chunk', index=2, + name='between_chunk', full_name='edges.ChunkEdgesMsg.between_chunk', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -118,30 +118,30 @@ extension_ranges=[], oneofs=[ ], - serialized_start=109, - serialized_end=225, + serialized_start=113, + serialized_end=241, ) -_CHUNKEDGES.fields_by_name['in_chunk'].message_type = _EDGES -_CHUNKEDGES.fields_by_name['cross_chunk'].message_type = _EDGES -_CHUNKEDGES.fields_by_name['between_chunk'].message_type = _EDGES -DESCRIPTOR.message_types_by_name['Edges'] = _EDGES -DESCRIPTOR.message_types_by_name['ChunkEdges'] = _CHUNKEDGES +_CHUNKEDGESMSG.fields_by_name['in_chunk'].message_type = _EDGESMSG +_CHUNKEDGESMSG.fields_by_name['cross_chunk'].message_type = _EDGESMSG +_CHUNKEDGESMSG.fields_by_name['between_chunk'].message_type = _EDGESMSG +DESCRIPTOR.message_types_by_name['EdgesMsg'] = _EDGESMSG +DESCRIPTOR.message_types_by_name['ChunkEdgesMsg'] = _CHUNKEDGESMSG _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Edges = _reflection.GeneratedProtocolMessageType('Edges', (_message.Message,), { - 'DESCRIPTOR' : _EDGES, +EdgesMsg = _reflection.GeneratedProtocolMessageType('EdgesMsg', (_message.Message,), { + 'DESCRIPTOR' : _EDGESMSG, '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:edges.Edges) + # @@protoc_insertion_point(class_scope:edges.EdgesMsg) }) -_sym_db.RegisterMessage(Edges) +_sym_db.RegisterMessage(EdgesMsg) -ChunkEdges = _reflection.GeneratedProtocolMessageType('ChunkEdges', (_message.Message,), { - 'DESCRIPTOR' : _CHUNKEDGES, +ChunkEdgesMsg = _reflection.GeneratedProtocolMessageType('ChunkEdgesMsg', (_message.Message,), { + 'DESCRIPTOR' : _CHUNKEDGESMSG, '__module__' : 'chunkEdges_pb2' - # @@protoc_insertion_point(class_scope:edges.ChunkEdges) + # @@protoc_insertion_point(class_scope:edges.ChunkEdgesMsg) }) -_sym_db.RegisterMessage(ChunkEdges) +_sym_db.RegisterMessage(ChunkEdgesMsg) # @@protoc_insertion_point(module_scope) From 070ea9093ec598593abb67285ee7ed8d213dba8d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 9 Aug 2019 10:01:28 -0400 Subject: [PATCH 0111/1097] rename protobuf classes to avoid conflict with python classes --- pychunkedgraph/io/edge_storage.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index b2becb8f4..30c88edac 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -12,7 +12,7 @@ from cloudvolume.storage import SimpleStorage from ..backend.utils import basetypes -from .protobuf.chunkEdges_pb2 import Edges, ChunkEdges +from .protobuf.chunkEdges_pb2 import EdgesMsg, ChunkEdgesMsg def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -23,7 +23,7 @@ def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarra :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ - def _get_edges(edges_message: Edges) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def _get_edges(edges_message: EdgesMsg) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: supervoxel_ids1 = np.frombuffer(edges_message.node_ids1, basetypes.NODE_ID) supervoxel_ids2 = np.frombuffer(edges_message.node_ids2, basetypes.NODE_ID) @@ -32,7 +32,7 @@ def _get_edges(edges_message: Edges) -> Tuple[np.ndarray, np.ndarray, np.ndarray areas = np.frombuffer(edges_message.areas, basetypes.EDGE_AREA) return edges, affinities, areas - chunk_edges = ChunkEdges() + chunk_edges = ChunkEdgesMsg() zstd_decompressor_obj = zstd.ZstdDecompressor().decompressobj() file_content = zstd_decompressor_obj.decompress(content) chunk_edges.ParseFromString(file_content) @@ -114,11 +114,9 @@ def put_chunk_edges( :return None: """ - def _get_edges(edge_type: str) -> Edges: - # TODO change protobuf class name + def _get_edges(edge_type: str) -> EdgesMsg: edges = chunk_edges_raw[edge_type] - - edges_proto = Edges() + edges_proto = EdgesMsg() edges_proto.node_ids1 = edges.node_ids1.astype(basetypes.NODE_ID).tobytes() edges_proto.node_ids2 = edges.node_ids2.astype(basetypes.NODE_ID).tobytes() edges_proto.affinities = edges.affinities.astype( @@ -128,7 +126,7 @@ def _get_edges(edge_type: str) -> Edges: return edges_proto - chunk_edges = ChunkEdges() + chunk_edges = ChunkEdgesMsg() chunk_edges.in_chunk.CopyFrom(_get_edges("in")) chunk_edges.between_chunk.CopyFrom(_get_edges("between")) chunk_edges.cross_chunk.CopyFrom(_get_edges("cross")) From 5ccc423868dca3aa950d294036881c449eb935ae Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 9 Aug 2019 10:20:54 -0400 Subject: [PATCH 0112/1097] more refactor --- .../definitions}/__init__.py | 0 .../definitions/edges.py} | 0 .../backend/initialization/__init__.py | 3 ++ .../create.py} | 20 ++++---- pychunkedgraph/edges/utils.py | 0 pychunkedgraph/ingest/ran_ingestion_v2.py | 47 ++++++++++--------- 6 files changed, 36 insertions(+), 34 deletions(-) rename pychunkedgraph/{edges => backend/definitions}/__init__.py (100%) rename pychunkedgraph/{edges/definitions.py => backend/definitions/edges.py} (100%) create mode 100644 pychunkedgraph/backend/initialization/__init__.py rename pychunkedgraph/backend/{chunkedgraph_init.py => initialization/create.py} (91%) delete mode 100644 pychunkedgraph/edges/utils.py diff --git a/pychunkedgraph/edges/__init__.py b/pychunkedgraph/backend/definitions/__init__.py similarity index 100% rename from pychunkedgraph/edges/__init__.py rename to pychunkedgraph/backend/definitions/__init__.py diff --git a/pychunkedgraph/edges/definitions.py b/pychunkedgraph/backend/definitions/edges.py similarity index 100% rename from pychunkedgraph/edges/definitions.py rename to pychunkedgraph/backend/definitions/edges.py diff --git a/pychunkedgraph/backend/initialization/__init__.py b/pychunkedgraph/backend/initialization/__init__.py new file mode 100644 index 000000000..009da30ed --- /dev/null +++ b/pychunkedgraph/backend/initialization/__init__.py @@ -0,0 +1,3 @@ +""" +modules for chunkedgraph initialization/creation +""" diff --git a/pychunkedgraph/backend/chunkedgraph_init.py b/pychunkedgraph/backend/initialization/create.py similarity index 91% rename from pychunkedgraph/backend/chunkedgraph_init.py rename to pychunkedgraph/backend/initialization/create.py index e009290d0..7914220d4 100644 --- a/pychunkedgraph/backend/chunkedgraph_init.py +++ b/pychunkedgraph/backend/initialization/create.py @@ -3,27 +3,23 @@ """ import datetime -from typing import Optional, Sequence +from typing import Optional, Sequence, Dict import pytz import numpy as np -from google.cloud.bigtable.row_set import RowSet -from google.cloud.bigtable.column_family import MaxVersionsGCRule - - -from .chunkedgraph import ChunkedGraph -from .utils import basetypes -from ..edges.definitions import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK, TYPES as EDGE_TYPES -from .chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp -from .flatgraph_utils import build_gt_graph, connected_components -from .utils import serializers, column_keys +from ..chunkedgraph import ChunkedGraph +from ..utils import basetypes +from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK, TYPES as EDGE_TYPES +from ..chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp +from ..flatgraph_utils import build_gt_graph, connected_components +from ..utils import serializers, column_keys def add_atomic_edges( cg_instance: ChunkedGraph, chunk_coord: np.ndarray, - chunk_edges_d: dict, + chunk_edges_d: Dict[str, Edges], isolated: Sequence[int], time_stamp: Optional[datetime.datetime] = None, ): diff --git a/pychunkedgraph/edges/utils.py b/pychunkedgraph/edges/utils.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index e5ae09933..7d4bf8ea6 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -17,9 +17,9 @@ from flask import current_app from flask.cli import AppGroup -from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu -from ..backend.chunkedgraph_init import add_atomic_edges -from ..edges.definitions import TYPES as EDGE_TYPES, Edges +from . import ingestionmanager, ingestion_utils as iu +from ..backend.initialization.create import add_atomic_edges +from ..backend.definitions.edges import Edges, TYPES as EDGE_TYPES from ..backend.utils import basetypes from ..io.edge_storage import put_chunk_edges @@ -72,7 +72,7 @@ def ingest_into_chunkedgraph( ws_cv_path = ws_cv_path.strip("/") cg_mesh_dir = f"{cg_table_id}_meshes" - chunk_size = np.array(chunk_size, dtype=np.uint64) + chunk_size = np.array(chunk_size) _, n_layers_agg = iu.initialize_chunkedgraph( cg_table_id=cg_table_id, @@ -218,9 +218,7 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): """ Creates single atomic chunk""" chunk_coord = np.array(list(chunk_coord), dtype=np.int) - edge_dict = collect_edge_data( - imanager, chunk_coord - ) + edge_dict = collect_edge_data(imanager, chunk_coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) mapping = collect_agglomeration_data(imanager, chunk_coord) _, isolated_ids = define_active_edges(edge_dict, mapping) @@ -306,8 +304,9 @@ def collect_edge_data(im, chunk_coord): chunk_coord = np.array(chunk_coord) - chunk_id = im.cg.get_chunk_id(layer=1, x=chunk_coord[0], y=chunk_coord[1], - z=chunk_coord[2]) + chunk_id = im.cg.get_chunk_id( + layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] + ) filenames = collections.defaultdict(list) swap = collections.defaultdict(list) @@ -328,16 +327,19 @@ def collect_edge_data(im, chunk_coord): diff[dim] = d adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = im.cg.get_chunk_id(layer=1, - x=adjacent_chunk_coord[0], - y=adjacent_chunk_coord[1], - z=adjacent_chunk_coord[2]) + adjacent_chunk_id = im.cg.get_chunk_id( + layer=1, + x=adjacent_chunk_coord[0], + y=adjacent_chunk_coord[1], + z=adjacent_chunk_coord[2], + ) if im.is_out_of_bounce(adjacent_chunk_coord): continue - c_chunk_coords = _get_cont_chunk_coords(im, chunk_coord, - adjacent_chunk_coord) + c_chunk_coords = _get_cont_chunk_coords( + im, chunk_coord, adjacent_chunk_coord + ) larger_id = np.max([chunk_id, adjacent_chunk_id]) smaller_id = np.min([chunk_id, adjacent_chunk_id]) @@ -378,8 +380,7 @@ def collect_edge_data(im, chunk_coord): continue if swap[file["filename"]]: - this_dtype = [im.edge_dtype[1], im.edge_dtype[0]] + \ - im.edge_dtype[2:] + this_dtype = [im.edge_dtype[1], im.edge_dtype[0]] + im.edge_dtype[2:] content = np.frombuffer(file["content"], dtype=this_dtype) else: content = np.frombuffer(file["content"], dtype=im.edge_dtype) @@ -391,10 +392,12 @@ def collect_edge_data(im, chunk_coord): try: edge_data[k] = rfn.stack_arrays(data, usemask=False) except: - raise() + raise () edge_data_df = pd.DataFrame(edge_data[k]) - edge_data_dfg = edge_data_df.groupby(["sv1", "sv2"]).aggregate(np.sum).reset_index() + edge_data_dfg = ( + edge_data_df.groupby(["sv1", "sv2"]).aggregate(np.sum).reset_index() + ) edge_data[k] = edge_data_dfg.to_records() return edge_data @@ -413,7 +416,7 @@ def _read_agg_files(filenames, base_path): continue content = zstd.ZstdDecompressor().decompressobj().decompress(file["content"]) - edge_list.append(np.frombuffer(content, dtype=np.uint64).reshape(-1, 2)) + edge_list.append(np.frombuffer(content, dtype=basetypes.NODE_ID).reshape(-1, 2)) return edge_list @@ -519,8 +522,8 @@ def _mapping_default(key): if k == "in": isolated.append(edge_dict[k]["sv2"][agg_2_m]) - return active, np.unique(np.concatenate(isolated).astype(np.uint64)) + return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) def init_ingest_cmds(app): - app.cli.add_command(ingest_cli) \ No newline at end of file + app.cli.add_command(ingest_cli) From 9f78f51ffd5cc8b5944eef2159859b7d4602f883 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 9 Aug 2019 11:41:09 -0400 Subject: [PATCH 0113/1097] add support for pubsub to listen to worker result --- pychunkedgraph/app/__init__.py | 2 +- pychunkedgraph/ingest/cli.py | 42 +++++++++++++++++++++++ pychunkedgraph/ingest/ran_ingestion_v2.py | 34 +++++++++--------- 3 files changed, 59 insertions(+), 19 deletions(-) create mode 100644 pychunkedgraph/ingest/cli.py diff --git a/pychunkedgraph/app/__init__.py b/pychunkedgraph/app/__init__.py index b7983219a..88c769f5c 100644 --- a/pychunkedgraph/app/__init__.py +++ b/pychunkedgraph/app/__init__.py @@ -17,7 +17,7 @@ from pychunkedgraph.logging import jsonformatter # from pychunkedgraph.app import manifest_app_blueprint -from ..ingest.ran_ingestion_v2 import init_ingest_cmds +from ..ingest.cli import init_ingest_cmds os.environ['TRAVIS_BRANCH'] = "IDONTKNOWWHYINEEDTHIS" diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py new file mode 100644 index 000000000..d966cd95b --- /dev/null +++ b/pychunkedgraph/ingest/cli.py @@ -0,0 +1,42 @@ +""" +cli for running ingest +""" + +import click +from flask import current_app +from flask.cli import AppGroup + +from .ran_ingestion_v2 import ingest_into_chunkedgraph + +ingest_cli = AppGroup("ingest") + + +def handle_job_result(*args, **kwargs): + """handle worker return""" + with open("results.txt", "a") as results_f: + results_f.write(f"{str(args[0]['data'])}\n") + + +@ingest_cli.command("atomic") +@click.argument("storage_path", type=str) +@click.argument("ws_cv_path", type=str) +@click.argument("edge_dir", type=str) +@click.argument("cg_table_id", type=str) +@click.argument("n_chunks", type=int) +def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=-1): + + chunk_pubsub = current_app.redis.pubsub() + chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) + chunk_pubsub.run_in_thread(sleep_time=0.1) + + ingest_into_chunkedgraph( + storage_path=storage_path, + ws_cv_path=ws_cv_path, + cg_table_id=cg_table_id, + edge_dir=edge_dir, + n_chunks=n_chunks, + ) + + +def init_ingest_cmds(app): + app.cli.add_command(ingest_cli) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 7d4bf8ea6..5ecee93a8 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -2,6 +2,7 @@ Module for ingesting in chunkedgraph format with edges stored outside bigtable """ +import os import collections import time @@ -12,25 +13,22 @@ import numpy as np import numpy.lib.recfunctions as rfn import zstandard as zstd -from multiwrapper import multiprocessing_utils as mu - from flask import current_app -from flask.cli import AppGroup +from multiwrapper import multiprocessing_utils as mu +from ..utils.general import redis_job from . import ingestionmanager, ingestion_utils as iu from ..backend.initialization.create import add_atomic_edges from ..backend.definitions.edges import Edges, TYPES as EDGE_TYPES from ..backend.utils import basetypes from ..io.edge_storage import put_chunk_edges -ingest_cli = AppGroup("ingest") +REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") +REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") +REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") +REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" -@ingest_cli.command("atomic") -@click.argument("storage_path", type=str) -@click.argument("ws_cv_path", type=str) -@click.argument("cg_table_id", type=str) -@click.argument("edge_dir", type=str) def ingest_into_chunkedgraph( storage_path, ws_cv_path, @@ -44,6 +42,7 @@ def ingest_into_chunkedgraph( project_id=None, start_layer=1, edge_dir=None, + n_chunks=-1, ): """ Creates a chunkedgraph from a Ran Agglomerattion @@ -97,7 +96,7 @@ def ingest_into_chunkedgraph( ) # if start_layer < 3: - create_atomic_chunks(im, edge_dir) + create_atomic_chunks(im, edge_dir, n_chunks) # create_abstract_layers(im, n_threads=n_threads[1], start_layer=start_layer) return im @@ -195,12 +194,14 @@ def _create_layers(args): ) -def create_atomic_chunks(im, edge_dir): +def create_atomic_chunks(im, edge_dir, n_chunks): """ Creates all atomic chunks""" chunk_coords = list(im.chunk_coord_gen) np.random.shuffle(chunk_coords) - for chunk_coord in chunk_coords[:5]: + print(len(chunk_coords)) + + for chunk_coord in chunk_coords[:n_chunks]: current_app.test_q.enqueue( _create_atomic_chunk, job_timeout="59m", @@ -208,10 +209,11 @@ def create_atomic_chunks(im, edge_dir): ) +@redis_job(REDIS_URL, "ingest_channel") def _create_atomic_chunk(im_info, chunk_coord, edge_dir): """ Multiprocessing helper for create_atomic_chunks """ imanager = ingestionmanager.IngestionManager(**im_info) - create_atomic_chunk(imanager, chunk_coord, edge_dir) + return create_atomic_chunk(imanager, chunk_coord, edge_dir) def create_atomic_chunk(imanager, chunk_coord, edge_dir): @@ -243,7 +245,7 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): print(f"big table time: {time.time() - start}") # to track workers completion - return chunk_coord + return str(chunk_coord) def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): @@ -523,7 +525,3 @@ def _mapping_default(key): isolated.append(edge_dict[k]["sv2"][agg_2_m]) return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) - - -def init_ingest_cmds(app): - app.cli.add_command(ingest_cli) From 86e58e1900d5113bc025055ca6ade808a2f10311 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 9 Aug 2019 12:01:43 -0400 Subject: [PATCH 0114/1097] add docstring --- pychunkedgraph/ingest/cli.py | 10 +++++++++- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index d966cd95b..a2ed00ad9 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -24,7 +24,15 @@ def handle_job_result(*args, **kwargs): @click.argument("cg_table_id", type=str) @click.argument("n_chunks", type=int) def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=-1): - + """ + run ingestion job + eg: flask ingest atomic \ + gs://ranl/scratch/pinky100_ca_com/agg \ + gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ + gs://akhilesh-test/edges/pinky-ingest-test \ + akhilesh-ingest-test \ + 1 + """ chunk_pubsub = current_app.redis.pubsub() chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) chunk_pubsub.run_in_thread(sleep_time=0.1) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 5ecee93a8..4c6cde558 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -42,7 +42,7 @@ def ingest_into_chunkedgraph( project_id=None, start_layer=1, edge_dir=None, - n_chunks=-1, + n_chunks=None, ): """ Creates a chunkedgraph from a Ran Agglomerattion From f28c55a1efc4dbabed5b77b2ef9cb29694ea0bea Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 12 Aug 2019 10:18:18 -0400 Subject: [PATCH 0115/1097] avoid file creation when chunk has no edges --- pychunkedgraph/ingest/ran_ingestion_v2.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 4c6cde558..ca43cd39c 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -27,6 +27,7 @@ REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" +ZSTD_COMPRESSION_LEVEL = 17 def ingest_into_chunkedgraph( @@ -225,6 +226,10 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): mapping = collect_agglomeration_data(imanager, chunk_coord) _, isolated_ids = define_active_edges(edge_dict, mapping) + # flag to check if chunk has edges + # avoid writing to cloud storage if there are no edges + # unnecessary write operation + no_edges = False chunk_edges = {} for edge_type in EDGE_TYPES: sv_ids1 = edge_dict[edge_type]["sv1"] @@ -234,15 +239,11 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): affinities = edge_dict[edge_type].get("aff", float("inf") * ones) areas = edge_dict[edge_type].get("area", ones) chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + no_edges = no_edges and len(sv_ids1) - print(chunk_coord) - start = time.time() - put_chunk_edges(edge_dir, chunk_coord, chunk_edges, 17) - print(f"cloud storage time: {time.time() - start}") - - start = time.time() + if not no_edges: + put_chunk_edges(edge_dir, chunk_coord, chunk_edges, ZSTD_COMPRESSION_LEVEL) add_atomic_edges(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) - print(f"big table time: {time.time() - start}") # to track workers completion return str(chunk_coord) From 02b0359a4bd57ac32692e44f71f6f8fcba837e29 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 12 Aug 2019 10:46:45 -0400 Subject: [PATCH 0116/1097] big table crosschunk column compression --- .../backend/initialization/create.py | 4 ++-- pychunkedgraph/backend/utils/column_keys.py | 2 +- pychunkedgraph/backend/utils/serializers.py | 18 ++++++++++++++---- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/backend/initialization/create.py b/pychunkedgraph/backend/initialization/create.py index 7914220d4..76f626585 100644 --- a/pychunkedgraph/backend/initialization/create.py +++ b/pychunkedgraph/backend/initialization/create.py @@ -149,8 +149,8 @@ def _process_component( for cc_layer in u_cce_layers: layer_out_edges = chunk_out_edges[cce_layers == cc_layer] if layer_out_edges.size: - col_key = column_keys.Connectivity.CrossChunkEdge[cc_layer] - val_dict[col_key] = layer_out_edges + col = column_keys.Connectivity.CrossChunkEdge[cc_layer] + val_dict[col] = layer_out_edges r_key = serializers.serialize_uint64(parent_id) rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) diff --git a/pychunkedgraph/backend/utils/column_keys.py b/pychunkedgraph/backend/utils/column_keys.py index 2f952d4a0..fb83ec844 100644 --- a/pychunkedgraph/backend/utils/column_keys.py +++ b/pychunkedgraph/backend/utils/column_keys.py @@ -108,7 +108,7 @@ class Connectivity: CrossChunkEdge = _ColumnArray( pattern=b'atomic_cross_edges_%d', family_id='3', - serializer=serializers.NumPyArray(dtype=basetypes.NODE_ID, shape=(-1, 2))) + serializer=serializers.NumPyArray(dtype=basetypes.NODE_ID, shape=(-1, 2), compression_level=22)) class Hierarchy: diff --git a/pychunkedgraph/backend/utils/serializers.py b/pychunkedgraph/backend/utils/serializers.py index 401204642..c5b8ecac3 100644 --- a/pychunkedgraph/backend/utils/serializers.py +++ b/pychunkedgraph/backend/utils/serializers.py @@ -1,18 +1,27 @@ from typing import Any, Iterable import json import numpy as np +import zstandard as zstd class _Serializer(): - def __init__(self, serializer, deserializer, basetype=Any): + def __init__(self, serializer, deserializer, basetype=Any, compression_level=None): self._serializer = serializer self._deserializer = deserializer self._basetype = basetype + if compression_level: + self._compressor = zstd.ZstdCompressor(level=compression_level) + self._decompressor = zstd.ZstdDecompressor().decompressobj() def serialize(self, obj): - return self._serializer(obj) + content = self._serializer(obj) + if self._compressor: + return self._compressor.compress(content) + return content def deserialize(self, obj): + if self._decompressor: + obj = self._decompressor.decompress(obj) return self._deserializer(obj) @property @@ -30,11 +39,12 @@ def _deserialize(val, dtype, shape=None, order=None): return data.reshape(data.shape, order=order) return data - def __init__(self, dtype, shape=None, order=None): + def __init__(self, dtype, shape=None, order=None, compression_level=None): super().__init__( serializer=lambda x: x.newbyteorder(dtype.byteorder).tobytes(), deserializer=lambda x: NumPyArray._deserialize(x, dtype, shape=shape, order=order), - basetype=dtype.type + basetype=dtype.type, + compression_level=compression_level ) From cef923b982b2d82f33b7c3e6ba4911e0e76ad816 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 12 Aug 2019 11:50:13 -0400 Subject: [PATCH 0117/1097] no_edges flag bug --- pychunkedgraph/backend/utils/serializers.py | 4 +++- pychunkedgraph/ingest/ran_ingestion_v2.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/utils/serializers.py b/pychunkedgraph/backend/utils/serializers.py index c5b8ecac3..287e9efce 100644 --- a/pychunkedgraph/backend/utils/serializers.py +++ b/pychunkedgraph/backend/utils/serializers.py @@ -9,6 +9,8 @@ def __init__(self, serializer, deserializer, basetype=Any, compression_level=Non self._serializer = serializer self._deserializer = deserializer self._basetype = basetype + self._compressor = None + self._decompressor = None if compression_level: self._compressor = zstd.ZstdCompressor(level=compression_level) self._decompressor = zstd.ZstdDecompressor().decompressobj() @@ -21,7 +23,7 @@ def serialize(self, obj): def deserialize(self, obj): if self._decompressor: - obj = self._decompressor.decompress(obj) + obj = self._decompressor.decompress(obj) return self._deserializer(obj) @property diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index ca43cd39c..d06a565bf 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -229,7 +229,7 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): # flag to check if chunk has edges # avoid writing to cloud storage if there are no edges # unnecessary write operation - no_edges = False + no_edges = True chunk_edges = {} for edge_type in EDGE_TYPES: sv_ids1 = edge_dict[edge_type]["sv1"] @@ -239,7 +239,7 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): affinities = edge_dict[edge_type].get("aff", float("inf") * ones) areas = edge_dict[edge_type].get("area", ones) chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) - no_edges = no_edges and len(sv_ids1) + no_edges = no_edges and not sv_ids1.size if not no_edges: put_chunk_edges(edge_dir, chunk_coord, chunk_edges, ZSTD_COMPRESSION_LEVEL) From 7b43e41210a766026378911a4296553032a92f42 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 12 Aug 2019 13:57:40 -0400 Subject: [PATCH 0118/1097] compression for child_ids --- pychunkedgraph/backend/utils/column_keys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pychunkedgraph/backend/utils/column_keys.py b/pychunkedgraph/backend/utils/column_keys.py index fb83ec844..2231e020d 100644 --- a/pychunkedgraph/backend/utils/column_keys.py +++ b/pychunkedgraph/backend/utils/column_keys.py @@ -115,7 +115,7 @@ class Hierarchy: Child = _Column( key=b'children', family_id='0', - serializer=serializers.NumPyArray(dtype=basetypes.NODE_ID)) + serializer=serializers.NumPyArray(dtype=basetypes.NODE_ID, compression_level=22)) FormerParent = _Column( key=b'former_parents', From e87fc1214e79e595a104c041f10dd09875546396 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 14 Aug 2019 13:52:21 -0400 Subject: [PATCH 0119/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 69 +++++++++---------- .../backend/initialization/create.py | 10 +-- pychunkedgraph/backend/utils/edge_utils.py | 47 +++++++++++++ pychunkedgraph/ingest/ran_ingestion_v2.py | 4 +- pychunkedgraph/io/edge_storage.py | 65 ++++++++--------- 5 files changed, 117 insertions(+), 78 deletions(-) create mode 100644 pychunkedgraph/backend/utils/edge_utils.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index ced7c5f04..7ef44f2d9 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -45,6 +45,7 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple +from .utils.edge_utils import concatenate_chunk_edges from pychunkedgraph.io.edge_storage import get_chunk_edges HOME = os.path.expanduser("~") @@ -3081,20 +3082,26 @@ def _get_subgraph_layer2_edges(node_ids) -> \ return edges, affinities, areas - def get_subgraph_edges_v2(self, edges_dir, - offset = np.array([105, 54, 6]), - this_n_threads = 4, - cv_threads = 1, - bounding_box: Optional[Sequence[Sequence[int]]] = None, - bb_is_coordinate: bool = False, - connected_edges=True, - verbose: bool = True - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - + def get_subgraph_edges_v2( + self, agglomeration_id: np.uint64, + bounding_box: Optional[Sequence[Sequence[int]]] = None, + bb_is_coordinate: bool = False, + connected_edges=True, + cv_threads = 1, + verbose: bool = True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + 1. determine chunk ids + 2. read edges of those chunks + 3. determine node ids that are part of the given agglomeration + 4. filter those edges + 5. for each edge (v1,v2) - active if parent(v1) == parent(v2), inactive otherwise + 6. return the active edges + """ + # child_ids = self.get_children(node_ids, flatten=True) def _get_subgraph_layer2_edges(chunk_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( - edges_dir, + self.edges_dir, [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], cv_threads) @@ -3102,39 +3109,31 @@ def _get_subgraph_layer2_edges(chunk_ids) -> \ timings['total'] = time.time() timings['determine_chunks_ids'] = time.time() - x_start, y_start, z_start = offset - x_end, y_end, z_end = map( - int, np.ceil( - np.array(self.dataset_info['scales'][0]['size']) / self.chunk_size) - offset) - - chunks = [] + bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) + # Layer 3+ + child_ids = self._get_subgraph_higher_layer_nodes( + node_id=agglomeration_id, bounding_box=bounding_box, + return_layers=[2], verbose=verbose)[2] - for x in range(x_start,x_end): - for y in range(y_start, y_end): - for z in range(z_start, z_end): - chunks.append((x, y, z)) + child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) + chunk_ids = np.unique(child_chunk_ids) - chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) + # chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] timings['reading_edges'] = time.time() - edge_infos = mu.multithread_func( + cg_threads = 4 + chunk_edge_dicts = mu.multithread_func( _get_subgraph_layer2_edges, - np.array_split(chunk_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1) + np.array_split(chunk_ids, cg_threads), + n_threads=cg_threads, debug=False) timings['reading_edges'] = time.time() - timings['reading_edges'] - timings['collecting_edges'] = time.time() - edges = np.array([], dtype=np.uint64).reshape(0, 2) - affinities = np.array([], dtype=np.float32) - areas = np.array([], dtype=np.uint64) + edges_dict = concatenate_chunk_edges(chunk_edge_dicts) - for edge_info in edge_infos: - _edges, _affinities, _areas = edge_info - edges = np.concatenate([edges, _edges]) - affinities = np.concatenate([affinities, _affinities]) - areas = np.concatenate([areas, _areas]) - timings['collecting_edges'] = time.time() - timings['collecting_edges'] + timings['filtering_edges'] = time.time() + edges = filter_edges(, edges_dict) + timings['filtering_edges'] = time.time() - timings['filtering_edges'] timings['total'] = time.time() - timings['total'] diff --git a/pychunkedgraph/backend/initialization/create.py b/pychunkedgraph/backend/initialization/create.py index 76f626585..8e8d95826 100644 --- a/pychunkedgraph/backend/initialization/create.py +++ b/pychunkedgraph/backend/initialization/create.py @@ -146,11 +146,11 @@ def _process_component( u_cce_layers = np.unique(cce_layers) val_dict = {column_keys.Hierarchy.Child: node_ids} - for cc_layer in u_cce_layers: - layer_out_edges = chunk_out_edges[cce_layers == cc_layer] - if layer_out_edges.size: - col = column_keys.Connectivity.CrossChunkEdge[cc_layer] - val_dict[col] = layer_out_edges + # for cc_layer in u_cce_layers: + # layer_out_edges = chunk_out_edges[cce_layers == cc_layer] + # if layer_out_edges.size: + # col = column_keys.Connectivity.CrossChunkEdge[cc_layer] + # val_dict[col] = layer_out_edges r_key = serializers.serialize_uint64(parent_id) rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py new file mode 100644 index 000000000..07327be47 --- /dev/null +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -0,0 +1,47 @@ +""" +helper functions for edge stuff +""" + +from typing import Tuple + +import numpy as np +from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK + + +def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: + """combine edge_dicts of all chunks into one edge_dict""" + edges_dict = {} + for edge_type in [IN_CHUNK, BT_CHUNK, CX_CHUNK]: + sv_ids1 = [] + sv_ids2 = [] + affinities = [] + areas = [] + for edge_d in chunk_edge_dicts: + edges = edge_d[edge_type] + sv_ids1.append(edges.node_ids1) + sv_ids2.append(edges.node_ids2) + affinities.append(edges.affinities) + areas.append(edges.areas) + + sv_ids1 = np.concatenate(sv_ids1) + sv_ids2 = np.concatenate(sv_ids2) + affinities = np.concatenate(affinities) + areas = np.concatenate(areas) + edges_dict[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + return edges_dict + + +def filter_edges(node_ids: np.ndarray, edges_dict: dict) -> Edges: + """find edges for the given node_ids from the dict""" + ids1 = [] + ids2 = [] + affinities = [] + areas = [] + for edge_type in [IN_CHUNK, BT_CHUNK, CX_CHUNK]: + edges = edges_dict[edge_type] + filtered = edges.node_ids1 == node_ids + ids1.append(edges.node_ids1[filtered]) + ids2.append(edges.node_ids2[filtered]) + affinities.append(edges.affinities[filtered]) + areas.append(edges.areas[filtered]) + return Edges(ids1, ids2, affinities, areas) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index d06a565bf..a91dafcab 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -241,8 +241,8 @@ def create_atomic_chunk(imanager, chunk_coord, edge_dir): chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) no_edges = no_edges and not sv_ids1.size - if not no_edges: - put_chunk_edges(edge_dir, chunk_coord, chunk_edges, ZSTD_COMPRESSION_LEVEL) + # if not no_edges: + # put_chunk_edges(edge_dir, chunk_coord, chunk_edges, ZSTD_COMPRESSION_LEVEL) add_atomic_edges(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) # to track workers completion diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edge_storage.py index 30c88edac..f49acd14f 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edge_storage.py @@ -11,26 +11,28 @@ from cloudvolume import Storage from cloudvolume.storage import SimpleStorage +from ..backend.utils.edge_utils import concatenate_chunk_edges +from ..backend.definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK from ..backend.utils import basetypes from .protobuf.chunkEdges_pb2 import EdgesMsg, ChunkEdgesMsg -def _decompress_edges(content: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: +def _decompress_edges(content: bytes) -> dict: """ :param content: zstd compressed bytes :type bytes: - :return: edges, affinities, areas - :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] + :return: edges_dict with keys "in", "cross", "between" + :rtype: dict """ - def _get_edges(edges_message: EdgesMsg) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - supervoxel_ids1 = np.frombuffer(edges_message.node_ids1, basetypes.NODE_ID) - supervoxel_ids2 = np.frombuffer(edges_message.node_ids2, basetypes.NODE_ID) - - edges = np.column_stack((supervoxel_ids1, supervoxel_ids2)) + def _get_edges( + edges_message: EdgesMsg + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + sv_ids1 = np.frombuffer(edges_message.node_ids1, basetypes.NODE_ID) + sv_ids2 = np.frombuffer(edges_message.node_ids2, basetypes.NODE_ID) affinities = np.frombuffer(edges_message.affinities, basetypes.EDGE_AFFINITY) areas = np.frombuffer(edges_message.areas, basetypes.EDGE_AREA) - return edges, affinities, areas + return Edges(sv_ids1, sv_ids2, affinities, areas) chunk_edges = ChunkEdgesMsg() zstd_decompressor_obj = zstd.ZstdDecompressor().decompressobj() @@ -38,15 +40,12 @@ def _get_edges(edges_message: EdgesMsg) -> Tuple[np.ndarray, np.ndarray, np.ndar chunk_edges.ParseFromString(file_content) # in, between and cross - in_edges, in_affinities, in_areas = _get_edges(chunk_edges.in_chunk) - bt_edges, bt_affinities, bt_areas = _get_edges(chunk_edges.between_chunk) - cx_edges, cx_affinities, cx_areas = _get_edges(chunk_edges.cross_chunk) - - edges = np.concatenate([in_edges, bt_edges, cx_edges]) - affinities = np.concatenate([in_affinities, bt_affinities, cx_affinities]) - areas = np.concatenate([in_areas, bt_areas, cx_areas]) + edges_dict = {} + edges_dict[IN_CHUNK] = _get_edges(chunk_edges.in_chunk) + edges_dict[BT_CHUNK] = _get_edges(chunk_edges.between_chunk) + edges_dict[CX_CHUNK] = _get_edges(chunk_edges.cross_chunk) - return edges, affinities, areas + return edges_dict def get_chunk_edges( @@ -68,19 +67,16 @@ def get_chunk_edges( # filename format - edges_x_y_z.serialization.compression fnames.append(f"edges_{chunk_str}.proto.zst") - edges = np.array([], basetypes.NODE_ID).reshape(0, 2) - affinities = np.array([], basetypes.EDGE_AFFINITY) - areas = np.array([], basetypes.EDGE_AREA) - - st = ( + storage = ( Storage(edges_dir, n_threads=cv_threads) if cv_threads > 1 else SimpleStorage(edges_dir) ) - files = [] - with st: - files = st.get_files(fnames) + chunk_edge_dicts = [] + + with storage: + files = storage.get_files(fnames) for _file in files: # cv error if _file["error"]: @@ -88,12 +84,9 @@ def get_chunk_edges( # empty chunk if not _file["content"]: continue - _edges, _affinities, _areas = _decompress_edges(_file["content"]) - edges = np.concatenate([edges, _edges]) - affinities = np.concatenate([affinities, _affinities]) - areas = np.concatenate([areas, _areas]) - - return edges, affinities, areas + edges_dict = _decompress_edges(_file["content"]) + chunk_edge_dicts.append(edges_dict) + return concatenate_chunk_edges(chunk_edge_dicts) def put_chunk_edges( @@ -127,17 +120,17 @@ def _get_edges(edge_type: str) -> EdgesMsg: return edges_proto chunk_edges = ChunkEdgesMsg() - chunk_edges.in_chunk.CopyFrom(_get_edges("in")) - chunk_edges.between_chunk.CopyFrom(_get_edges("between")) - chunk_edges.cross_chunk.CopyFrom(_get_edges("cross")) + chunk_edges.in_chunk.CopyFrom(_get_edges(IN_CHUNK)) + chunk_edges.between_chunk.CopyFrom(_get_edges(BT_CHUNK)) + chunk_edges.cross_chunk.CopyFrom(_get_edges(CX_CHUNK)) cctx = zstd.ZstdCompressor(level=compression_level) chunk_str = "_".join(str(coord) for coord in chunk_coordinates) # filename format - edges_x_y_z.serialization.compression file = f"edges_{chunk_str}.proto.zst" - with Storage(edges_dir) as st: - st.put_file( + with Storage(edges_dir) as storage: + storage.put_file( file_path=file, content=cctx.compress(chunk_edges.SerializeToString()), compress=None, From 62cefcc4213f8d2abff26c6eb7d86d797f6f086f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 14 Aug 2019 18:21:21 -0400 Subject: [PATCH 0120/1097] clearer documentation --- pychunkedgraph/backend/chunkedgraph.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 7ef44f2d9..3ab8b15e6 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3090,12 +3090,13 @@ def get_subgraph_edges_v2( cv_threads = 1, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ - 1. determine chunk ids - 2. read edges of those chunks - 3. determine node ids that are part of the given agglomeration - 4. filter those edges - 5. for each edge (v1,v2) - active if parent(v1) == parent(v2), inactive otherwise - 6. return the active edges + 1. get level 2 children ids belonging to the agglomeration + 2. get relevant chunk ids from level 2 ids + 3. read edges from cloud storage + 4. get supervoxel ids from level 2 ids + 5. filter the edges with supervoxel ids + 6. for each edge (v1,v2) - active if parent(v1) == parent(v2), inactive otherwise + 7. return the active edges """ # child_ids = self.get_children(node_ids, flatten=True) def _get_subgraph_layer2_edges(chunk_ids) -> \ From e9649af2fc2ef71a6cedcc36aef006845070556f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 14 Aug 2019 20:00:02 -0400 Subject: [PATCH 0121/1097] get active edges --- pychunkedgraph/backend/chunkedgraph.py | 67 ++++++++++++---------- pychunkedgraph/backend/utils/edge_utils.py | 41 ++++++++++++- 2 files changed, 76 insertions(+), 32 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3ab8b15e6..4f5c86e39 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -45,7 +45,7 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple -from .utils.edge_utils import concatenate_chunk_edges +from .utils.edge_utils import concatenate_chunk_edges, filter_edges, flatten_parents_children from pychunkedgraph.io.edge_storage import get_chunk_edges HOME = os.path.expanduser("~") @@ -3083,12 +3083,14 @@ def _get_subgraph_layer2_edges(node_ids) -> \ def get_subgraph_edges_v2( - self, agglomeration_id: np.uint64, + self, + agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, - cv_threads = 1, - verbose: bool = True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + cv_threads=1, + verbose: bool = True, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ 1. get level 2 children ids belonging to the agglomeration 2. get relevant chunk ids from level 2 ids @@ -3098,47 +3100,50 @@ def get_subgraph_edges_v2( 6. for each edge (v1,v2) - active if parent(v1) == parent(v2), inactive otherwise 7. return the active edges """ - # child_ids = self.get_children(node_ids, flatten=True) - def _get_subgraph_layer2_edges(chunk_ids) -> \ - Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: + + def _read_edges( + chunk_ids + ) -> Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( self.edges_dir, [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], - cv_threads) + cv_threads, + ) timings = {} - timings['total'] = time.time() + timings["total"] = time.time() - timings['determine_chunks_ids'] = time.time() + timings["determine_chunks_ids"] = time.time() bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) - # Layer 3+ - child_ids = self._get_subgraph_higher_layer_nodes( - node_id=agglomeration_id, bounding_box=bounding_box, - return_layers=[2], verbose=verbose)[2] - - child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) - chunk_ids = np.unique(child_chunk_ids) - - # chunk_ids = np.array([self.get_chunk_id(None, 1, *chunk) for chunk in chunks]) - timings['determine_chunks_ids'] = time.time() - timings['determine_chunks_ids'] - - timings['reading_edges'] = time.time() + layer_nodes_d = self._get_subgraph_higher_layer_nodes( + node_id=agglomeration_id, + bounding_box=bounding_box, + return_layers=[2], + verbose=verbose, + ) + chunk_ids = np.unique(self.get_chunk_ids_from_node_ids(layer_nodes_d[2])) + timings["determine_chunks_ids"] = time.time() - timings["determine_chunks_ids"] + + timings["reading_edges"] = time.time() cg_threads = 4 chunk_edge_dicts = mu.multithread_func( - _get_subgraph_layer2_edges, + _read_edges, np.array_split(chunk_ids, cg_threads), - n_threads=cg_threads, debug=False) - timings['reading_edges'] = time.time() - timings['reading_edges'] + n_threads=cg_threads, + debug=False, + ) + timings["reading_edges"] = time.time() - timings["reading_edges"] edges_dict = concatenate_chunk_edges(chunk_edge_dicts) + children_d = self.get_children(layer_nodes_d[2]) - timings['filtering_edges'] = time.time() - edges = filter_edges(, edges_dict) - timings['filtering_edges'] = time.time() - timings['filtering_edges'] - - timings['total'] = time.time() - timings['total'] + timings["filtering_edges"] = time.time() + edges = filter_edges(sv_ids, edges_dict) + edges = get_active_edges(edges, children_d) + timings["filtering_edges"] = time.time() - timings["filtering_edges"] - return timings, edges, affinities, areas + timings["total"] = time.time() - timings["total"] + return timings, edges.get_pairs(), edges.affinities, edges.areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 07327be47..d78875310 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -9,7 +9,7 @@ def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: - """combine edge_dicts of all chunks into one edge_dict""" + """combine edge_dicts of multiple chunks into one edge_dict""" edges_dict = {} for edge_type in [IN_CHUNK, BT_CHUNK, CX_CHUNK]: sv_ids1 = [] @@ -45,3 +45,42 @@ def filter_edges(node_ids: np.ndarray, edges_dict: dict) -> Edges: affinities.append(edges.affinities[filtered]) areas.append(edges.areas[filtered]) return Edges(ids1, ids2, affinities, areas) + + +def flatten_parents_children(children_d: dict) -> [np.ndarray, np.ndarray]: + """ + given a dictionary - d["parent_id"] = [children] + return [[parent_id]*len(children), children] + """ + parent_ids = [] + child_ids = [] + for parent_id, children in children_d.items(): + parent_ids.append([parent_id] * children.size) + child_ids.append(children) + parent_ids = np.concatenate(parent_ids) + child_ids = np.concatenate(child_ids) + return parent_ids, child_ids + + +def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: + """ + get edges [(v1, v2) ...] where parent(v1) == parent(v2) + assume connected if v1 and v2 belong to same connected component + """ + l2_ids, sv_ids = flatten_parents_children(parent_children_d) + child_parent_d = {k: v for k, v in zip(sv_ids, l2_ids)} + + sv_ids1 = edges.node_ids1 + sv_ids2 = edges.node_ids2 + affinities = edges.affinities + areas = edges.areas + parent_ids1 = np.array([child_parent_d[sv_id] for sv_id in sv_ids1]) + parent_ids2 = np.array([child_parent_d[sv_id] for sv_id in sv_ids2]) + + sv_ids1 = sv_ids1[parent_ids1 == parent_ids2] + sv_ids2 = sv_ids2[parent_ids1 == parent_ids2] + affinities = affinities[parent_ids1 == parent_ids2] + areas = areas[parent_ids1 == parent_ids2] + + return Edges(sv_ids1, sv_ids2, affinities, areas) + From 553cd6915f0dae0a2b22759d7d568356443b12a6 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 14 Aug 2019 20:12:24 -0400 Subject: [PATCH 0122/1097] edges dir as arguemtn --- pychunkedgraph/backend/chunkedgraph.py | 3 ++- pychunkedgraph/backend/utils/edge_utils.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 4f5c86e39..de307d09a 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3085,6 +3085,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ def get_subgraph_edges_v2( self, agglomeration_id: np.uint64, + edges_dir: str, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3105,7 +3106,7 @@ def _read_edges( chunk_ids ) -> Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( - self.edges_dir, + edges_dir, [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], cv_threads, ) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index d78875310..baa52537f 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -83,4 +83,3 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: areas = areas[parent_ids1 == parent_ids2] return Edges(sv_ids1, sv_ids2, affinities, areas) - From 4b332ca028e99f29d01df0ba673f41060c066fdc Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 14 Aug 2019 21:05:05 -0400 Subject: [PATCH 0123/1097] fix: convert view to list --- pychunkedgraph/backend/chunkedgraph.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index de307d09a..cde9bb805 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -45,7 +45,8 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple -from .utils.edge_utils import concatenate_chunk_edges, filter_edges, flatten_parents_children +from .utils.edge_utils import ( + concatenate_chunk_edges, filter_edges, flatten_parents_children, get_active_edges) from pychunkedgraph.io.edge_storage import get_chunk_edges HOME = os.path.expanduser("~") @@ -3126,7 +3127,7 @@ def _read_edges( timings["determine_chunks_ids"] = time.time() - timings["determine_chunks_ids"] timings["reading_edges"] = time.time() - cg_threads = 4 + cg_threads = 1 chunk_edge_dicts = mu.multithread_func( _read_edges, np.array_split(chunk_ids, cg_threads), @@ -3137,6 +3138,7 @@ def _read_edges( edges_dict = concatenate_chunk_edges(chunk_edge_dicts) children_d = self.get_children(layer_nodes_d[2]) + sv_ids = np.concatenate(list(children_d.values())) timings["filtering_edges"] = time.time() edges = filter_edges(sv_ids, edges_dict) From e3ddacff6726a90492ffb8a162a7561354ee62aa Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 14 Aug 2019 21:59:56 -0400 Subject: [PATCH 0124/1097] use sv id if parent does not exist to make sure unique --- pychunkedgraph/backend/utils/edge_utils.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index baa52537f..3f0ad38a0 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -39,11 +39,18 @@ def filter_edges(node_ids: np.ndarray, edges_dict: dict) -> Edges: areas = [] for edge_type in [IN_CHUNK, BT_CHUNK, CX_CHUNK]: edges = edges_dict[edge_type] - filtered = edges.node_ids1 == node_ids - ids1.append(edges.node_ids1[filtered]) - ids2.append(edges.node_ids2[filtered]) - affinities.append(edges.affinities[filtered]) - areas.append(edges.areas[filtered]) + xsorted = np.argsort(edges.node_ids1) + indices = np.searchsorted(edges.node_ids1[xsorted], node_ids) + indices = indices[indices < xsorted.size] + + ids1.append(edges.node_ids1[indices]) + ids2.append(edges.node_ids2[indices]) + affinities.append(edges.affinities[indices]) + areas.append(edges.areas[indices]) + ids1 = np.concatenate(ids1) + ids2 = np.concatenate(ids2) + affinities = np.concatenate(affinities) + areas = np.concatenate(areas) return Edges(ids1, ids2, affinities, areas) @@ -74,8 +81,8 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: sv_ids2 = edges.node_ids2 affinities = edges.affinities areas = edges.areas - parent_ids1 = np.array([child_parent_d[sv_id] for sv_id in sv_ids1]) - parent_ids2 = np.array([child_parent_d[sv_id] for sv_id in sv_ids2]) + parent_ids1 = np.array([child_parent_d.get(sv_id, sv_id) for sv_id in sv_ids1]) + parent_ids2 = np.array([child_parent_d.get(sv_id, sv_id) for sv_id in sv_ids2]) sv_ids1 = sv_ids1[parent_ids1 == parent_ids2] sv_ids2 = sv_ids2[parent_ids1 == parent_ids2] From e74726bf9850423f5176c78384f3574d9cb52da2 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 09:54:02 -0400 Subject: [PATCH 0125/1097] utils refactor --- pychunkedgraph/backend/chunkedgraph.py | 2 +- pychunkedgraph/backend/utils/edge_utils.py | 5 ++-- pychunkedgraph/utils/general.py | 33 ++++++++++++++++++---- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index cde9bb805..3b6c0e6a8 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -46,7 +46,7 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple from .utils.edge_utils import ( - concatenate_chunk_edges, filter_edges, flatten_parents_children, get_active_edges) + concatenate_chunk_edges, filter_edges, get_active_edges) from pychunkedgraph.io.edge_storage import get_chunk_edges HOME = os.path.expanduser("~") diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 3f0ad38a0..bfa29627b 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -5,6 +5,8 @@ from typing import Tuple import numpy as np + +from pychunkedgraph.utils.general import reverse_dictionary from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK @@ -74,8 +76,7 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: get edges [(v1, v2) ...] where parent(v1) == parent(v2) assume connected if v1 and v2 belong to same connected component """ - l2_ids, sv_ids = flatten_parents_children(parent_children_d) - child_parent_d = {k: v for k, v in zip(sv_ids, l2_ids)} + child_parent_d = reverse_dictionary(parent_children_d) sv_ids1 = edges.node_ids1 sv_ids2 = edges.node_ids2 diff --git a/pychunkedgraph/utils/general.py b/pychunkedgraph/utils/general.py index 108762a2b..ca8da04b2 100644 --- a/pychunkedgraph/utils/general.py +++ b/pychunkedgraph/utils/general.py @@ -1,12 +1,19 @@ +""" +generic helper funtions +""" + +import numpy as np import redis import functools + def redis_job(redis_url, redis_channel): - ''' + """ Decorator factory Returns a decorator that connects to a redis instance and publish a message (return value of the function) when the job is done. - ''' + """ + def redis_job_decorator(func): r = redis.Redis.from_url(redis_url) @@ -16,7 +23,23 @@ def wrapper(*args, **kwargs): if not job_result: job_result = str(job_result) r.publish(redis_channel, job_result) - + return wrapper - - return redis_job_decorator \ No newline at end of file + + return redis_job_decorator + + +def reverse_dictionary(dictionary): + """ + given a dictionary - {key1 : [item1, item2 ...], key2 : [ite3, item4 ...]} + return {item1: key1, item2: key1, item3: key2, item4: key2 } + """ + keys = [] + vals = [] + for key, values in dictionary.items(): + keys.append([key] * len(values)) + vals.append(values) + keys = np.concatenate(keys) + vals = np.concatenate(vals) + + return {k: v for k, v in zip(vals, keys)} From 766714dad953e284b2b0982e18917245340b1605 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 10:23:56 -0400 Subject: [PATCH 0126/1097] fix cannot use decompressobj multiple times --- pychunkedgraph/backend/utils/serializers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/utils/serializers.py b/pychunkedgraph/backend/utils/serializers.py index 287e9efce..0ac2f13f0 100644 --- a/pychunkedgraph/backend/utils/serializers.py +++ b/pychunkedgraph/backend/utils/serializers.py @@ -13,7 +13,7 @@ def __init__(self, serializer, deserializer, basetype=Any, compression_level=Non self._decompressor = None if compression_level: self._compressor = zstd.ZstdCompressor(level=compression_level) - self._decompressor = zstd.ZstdDecompressor().decompressobj() + self._decompressor = zstd.ZstdDecompressor() def serialize(self, obj): content = self._serializer(obj) @@ -23,7 +23,7 @@ def serialize(self, obj): def deserialize(self, obj): if self._decompressor: - obj = self._decompressor.decompress(obj) + obj = self._decompressor.decompressobj().decompress(obj) return self._deserializer(obj) @property From 0f60676ec19f01d4c40232b2464b9adfd899b5c9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 10:30:31 -0400 Subject: [PATCH 0127/1097] remove redundant function --- pychunkedgraph/backend/utils/edge_utils.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index bfa29627b..1fd8ee509 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -56,21 +56,6 @@ def filter_edges(node_ids: np.ndarray, edges_dict: dict) -> Edges: return Edges(ids1, ids2, affinities, areas) -def flatten_parents_children(children_d: dict) -> [np.ndarray, np.ndarray]: - """ - given a dictionary - d["parent_id"] = [children] - return [[parent_id]*len(children), children] - """ - parent_ids = [] - child_ids = [] - for parent_id, children in children_d.items(): - parent_ids.append([parent_id] * children.size) - child_ids.append(children) - parent_ids = np.concatenate(parent_ids) - child_ids = np.concatenate(child_ids) - return parent_ids, child_ids - - def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: """ get edges [(v1, v2) ...] where parent(v1) == parent(v2) From 916f96415114f5213bd0ec19b8debe39942e87d4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 12:07:48 -0400 Subject: [PATCH 0128/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 29 +++++++++++++++++++-- pychunkedgraph/backend/utils/column_keys.py | 5 ++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3b6c0e6a8..c0515ab53 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -68,6 +68,7 @@ def __init__(self, chunk_size: Tuple[np.uint64, np.uint64, np.uint64] = None, fan_out: Optional[np.uint64] = None, use_skip_connections: Optional[bool] = True, + edge_dir: Optional[str] = None, s_bits_atomic_layer: Optional[np.uint64] = 8, n_bits_root_counter: Optional[np.uint64] = 0, n_layers: Optional[np.uint64] = None, @@ -128,6 +129,9 @@ def __init__(self, self._chunk_size = self.check_and_write_table_parameters( column_keys.GraphSettings.ChunkSize, chunk_size, required=True, is_new=is_new) + self._edge_dir = self.check_and_write_table_parameters( + column_keys.GraphSettings.EdgeDir, edge_dir, + required=False, is_new=is_new) self._dataset_info["graph"] = {"chunk_size": self.chunk_size} @@ -3028,6 +3032,27 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, :return: edge list """ + if self._edge_dir: + return get_subgraph_edges_cs + return get_subgraph_edges_bt + + + def get_subgraph_edges_bt(self, agglomeration_id: np.uint64, + bounding_box: Optional[Sequence[Sequence[int]]] = None, + bb_is_coordinate: bool = False, + connected_edges=True, + verbose: bool = True + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ Return all atomic edges between supervoxels belonging to the + specified agglomeration ID within the defined bounding box + + :param agglomeration_id: int + :param bounding_box: [[x_l, y_l, z_l], [x_h, y_h, z_h]] + :param bb_is_coordinate: bool + :param verbose: bool + :return: edge list + """ + def _get_subgraph_layer2_edges(node_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return self.get_subgraph_chunk(node_ids, @@ -3080,10 +3105,10 @@ def _get_subgraph_layer2_edges(node_ids) -> \ (2, (time.time() - time_start) * 1000, n_child_ids, this_n_threads)) - return edges, affinities, areas + return edges, affinities, areas - def get_subgraph_edges_v2( + def get_subgraph_edges_cs( self, agglomeration_id: np.uint64, edges_dir: str, diff --git a/pychunkedgraph/backend/utils/column_keys.py b/pychunkedgraph/backend/utils/column_keys.py index 2231e020d..a31e9e8a2 100644 --- a/pychunkedgraph/backend/utils/column_keys.py +++ b/pychunkedgraph/backend/utils/column_keys.py @@ -179,6 +179,11 @@ class GraphSettings: family_id='0', serializer=serializers.NumPyValue(dtype=basetypes.SKIPCONNECTIONS)) + EdgeDir = _Column( + key=b'edge_dir', + family_id='0', + serializer=serializers.String('utf-8')) + class OperationLogs: OperationID = _Column( key=b'operation_id', From 6272f4288e231ca545cd90be346d1a2c54520214 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 13:33:20 -0400 Subject: [PATCH 0129/1097] property edge storage directory --- pychunkedgraph/backend/chunkedgraph.py | 39 ++++++++++---------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index c0515ab53..7d1010872 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -233,6 +233,10 @@ def bitmasks(self) -> Dict[int, int]: def cv_mesh_path(self) -> str: return "%s/%s" % (self._cv_path, self._mesh_dir) + @property + def cv_edges_path(self) -> str: + return self._edge_dir + @property def dataset_info(self) -> object: return self._dataset_info @@ -3033,25 +3037,13 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, """ if self._edge_dir: - return get_subgraph_edges_cs - return get_subgraph_edges_bt - - - def get_subgraph_edges_bt(self, agglomeration_id: np.uint64, - bounding_box: Optional[Sequence[Sequence[int]]] = None, - bb_is_coordinate: bool = False, - connected_edges=True, - verbose: bool = True - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ Return all atomic edges between supervoxels belonging to the - specified agglomeration ID within the defined bounding box - - :param agglomeration_id: int - :param bounding_box: [[x_l, y_l, z_l], [x_h, y_h, z_h]] - :param bb_is_coordinate: bool - :param verbose: bool - :return: edge list - """ + return self.get_subgraph_edges_v2( + agglomeration_id, + bounding_box=bounding_box, + bb_is_coordinate=bb_is_coordinate, + connected_edges=connected_edges, + verbose=verbose + ) def _get_subgraph_layer2_edges(node_ids) -> \ Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: @@ -3105,13 +3097,12 @@ def _get_subgraph_layer2_edges(node_ids) -> \ (2, (time.time() - time_start) * 1000, n_child_ids, this_n_threads)) - return edges, affinities, areas + return edges, affinities, areas - def get_subgraph_edges_cs( + def get_subgraph_edges_v2( self, agglomeration_id: np.uint64, - edges_dir: str, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, @@ -3132,7 +3123,7 @@ def _read_edges( chunk_ids ) -> Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: return get_chunk_edges( - edges_dir, + self._edge_dir, [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], cv_threads, ) @@ -3171,7 +3162,7 @@ def _read_edges( timings["filtering_edges"] = time.time() - timings["filtering_edges"] timings["total"] = time.time() - timings["total"] - return timings, edges.get_pairs(), edges.affinities, edges.areas + return edges.get_pairs(), edges.affinities, edges.areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, From 3adaa7963de1132b44dc9c2d30659190aa726c94 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 14:56:47 -0400 Subject: [PATCH 0130/1097] remove timing --- pychunkedgraph/backend/chunkedgraph.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 7d1010872..e4662f1ac 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3128,10 +3128,6 @@ def _read_edges( cv_threads, ) - timings = {} - timings["total"] = time.time() - - timings["determine_chunks_ids"] = time.time() bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) layer_nodes_d = self._get_subgraph_higher_layer_nodes( node_id=agglomeration_id, @@ -3140,9 +3136,6 @@ def _read_edges( verbose=verbose, ) chunk_ids = np.unique(self.get_chunk_ids_from_node_ids(layer_nodes_d[2])) - timings["determine_chunks_ids"] = time.time() - timings["determine_chunks_ids"] - - timings["reading_edges"] = time.time() cg_threads = 1 chunk_edge_dicts = mu.multithread_func( _read_edges, @@ -3150,18 +3143,13 @@ def _read_edges( n_threads=cg_threads, debug=False, ) - timings["reading_edges"] = time.time() - timings["reading_edges"] edges_dict = concatenate_chunk_edges(chunk_edge_dicts) children_d = self.get_children(layer_nodes_d[2]) sv_ids = np.concatenate(list(children_d.values())) - timings["filtering_edges"] = time.time() edges = filter_edges(sv_ids, edges_dict) edges = get_active_edges(edges, children_d) - timings["filtering_edges"] = time.time() - timings["filtering_edges"] - - timings["total"] = time.time() - timings["total"] return edges.get_pairs(), edges.affinities, edges.areas From d085e81f54e490411ffbf1b269421da7f7838c58 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 15:10:15 -0400 Subject: [PATCH 0131/1097] remove unused import --- pychunkedgraph/backend/utils/edge_utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 1fd8ee509..5783a3ebb 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -2,8 +2,6 @@ helper functions for edge stuff """ -from typing import Tuple - import numpy as np from pychunkedgraph.utils.general import reverse_dictionary @@ -59,7 +57,7 @@ def filter_edges(node_ids: np.ndarray, edges_dict: dict) -> Edges: def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: """ get edges [(v1, v2) ...] where parent(v1) == parent(v2) - assume connected if v1 and v2 belong to same connected component + -> assume active if v1 and v2 belong to same connected component """ child_parent_d = reverse_dictionary(parent_children_d) From aa994ccbf329ffc4dc36ba4d5a427eaf0c9c3d68 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 15:17:41 -0400 Subject: [PATCH 0132/1097] add edge_dir argument for ingestion --- pychunkedgraph/backend/chunkedgraph.py | 4 +--- pychunkedgraph/ingest/ingestion_utils.py | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index e4662f1ac..e162fb672 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3098,8 +3098,7 @@ def _get_subgraph_layer2_edges(node_ids) -> \ n_child_ids, this_n_threads)) return edges, affinities, areas - - + def get_subgraph_edges_v2( self, agglomeration_id: np.uint64, @@ -3152,7 +3151,6 @@ def _read_edges( edges = get_active_edges(edges, children_d) return edges.get_pairs(), edges.affinities, edges.areas - def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, diff --git a/pychunkedgraph/ingest/ingestion_utils.py b/pychunkedgraph/ingest/ingestion_utils.py index fdf3c3f4d..bf635ee7e 100644 --- a/pychunkedgraph/ingest/ingestion_utils.py +++ b/pychunkedgraph/ingest/ingestion_utils.py @@ -16,7 +16,7 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, cg_mesh_dir, use_skip_connections=True, s_bits_atomic_layer=None, n_bits_root_counter=8, fan_out=2, instance_id=None, - project_id=None): + project_id=None, edge_dir = None): """ Initalizes a chunkedgraph on BigTable :param cg_table_id: str @@ -69,7 +69,8 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, "use_skip_connections": use_skip_connections, "s_bits_atomic_layer": s_bits_atomic_layer, "n_bits_root_counter": n_bits_root_counter, - "is_new": True} + "is_new": True, + "edge_dir": edge_dir} if instance_id is not None: kwargs["instance_id"] = instance_id From 049be43f75674f420627f875a8eed298ca097e5f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 15 Aug 2019 15:33:22 -0400 Subject: [PATCH 0133/1097] undo comment --- pychunkedgraph/backend/initialization/create.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/backend/initialization/create.py b/pychunkedgraph/backend/initialization/create.py index 8e8d95826..76f626585 100644 --- a/pychunkedgraph/backend/initialization/create.py +++ b/pychunkedgraph/backend/initialization/create.py @@ -146,11 +146,11 @@ def _process_component( u_cce_layers = np.unique(cce_layers) val_dict = {column_keys.Hierarchy.Child: node_ids} - # for cc_layer in u_cce_layers: - # layer_out_edges = chunk_out_edges[cce_layers == cc_layer] - # if layer_out_edges.size: - # col = column_keys.Connectivity.CrossChunkEdge[cc_layer] - # val_dict[col] = layer_out_edges + for cc_layer in u_cce_layers: + layer_out_edges = chunk_out_edges[cce_layers == cc_layer] + if layer_out_edges.size: + col = column_keys.Connectivity.CrossChunkEdge[cc_layer] + val_dict[col] = layer_out_edges r_key = serializers.serialize_uint64(parent_id) rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) From 71e63e7d2abdeaec98791f52a129a3cec7a7b5e3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 21 Aug 2019 15:57:02 -0400 Subject: [PATCH 0134/1097] wip --- pychunkedgraph/backend/utils/edge_utils.py | 2 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 5783a3ebb..16fa376fb 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -4,7 +4,7 @@ import numpy as np -from pychunkedgraph.utils.general import reverse_dictionary +from ...utils.general import reverse_dictionary from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index a91dafcab..70f7cfbd0 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -217,7 +217,7 @@ def _create_atomic_chunk(im_info, chunk_coord, edge_dir): return create_atomic_chunk(imanager, chunk_coord, edge_dir) -def create_atomic_chunk(imanager, chunk_coord, edge_dir): +def create_atomic_chunk(imanager, chunk_coord): """ Creates single atomic chunk""" chunk_coord = np.array(list(chunk_coord), dtype=np.int) From 8a7e5dbcf18e6a77e44ba51ddfe8856af9c455dd Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 22 Aug 2019 09:56:29 -0400 Subject: [PATCH 0135/1097] optional flag to get active edges --- pychunkedgraph/backend/chunkedgraph.py | 34 ++++++++++++++++---------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index e162fb672..2bb892e8e 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3038,7 +3038,7 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, if self._edge_dir: return self.get_subgraph_edges_v2( - agglomeration_id, + np.array(agglomeration_id), bounding_box=bounding_box, bb_is_coordinate=bb_is_coordinate, connected_edges=connected_edges, @@ -3101,21 +3101,23 @@ def _get_subgraph_layer2_edges(node_ids) -> \ def get_subgraph_edges_v2( self, - agglomeration_id: np.uint64, + agglomeration_ids: np.ndarray, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, connected_edges=True, cv_threads=1, + active_edges=True, verbose: bool = True, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ - 1. get level 2 children ids belonging to the agglomeration + 1. get level 2 children ids belonging to the agglomerations 2. get relevant chunk ids from level 2 ids 3. read edges from cloud storage 4. get supervoxel ids from level 2 ids 5. filter the edges with supervoxel ids - 6. for each edge (v1,v2) - active if parent(v1) == parent(v2), inactive otherwise - 7. return the active edges + 6. optioanlly for each edge (v1,v2) active + if parent(v1) == parent(v2) inactive otherwise + 7. return the edges """ def _read_edges( @@ -3128,13 +3130,18 @@ def _read_edges( ) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) - layer_nodes_d = self._get_subgraph_higher_layer_nodes( - node_id=agglomeration_id, - bounding_box=bounding_box, - return_layers=[2], - verbose=verbose, - ) - chunk_ids = np.unique(self.get_chunk_ids_from_node_ids(layer_nodes_d[2])) + + chunk_ids = [] + for agglomeration_id in agglomeration_ids: + layer_nodes_d = self._get_subgraph_higher_layer_nodes( + node_id=agglomeration_id, + bounding_box=bounding_box, + return_layers=[2], + verbose=verbose, + ) + chunk_ids.append(self.get_chunk_ids_from_node_ids(layer_nodes_d[2])) + + chunk_ids = np.unique(np.concatenate(chunk_ids)) cg_threads = 1 chunk_edge_dicts = mu.multithread_func( _read_edges, @@ -3148,7 +3155,8 @@ def _read_edges( sv_ids = np.concatenate(list(children_d.values())) edges = filter_edges(sv_ids, edges_dict) - edges = get_active_edges(edges, children_d) + if active_edges: + edges = get_active_edges(edges, children_d) return edges.get_pairs(), edges.affinities, edges.areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, From 311c44e6b0557038b3f5d8bfcc42f803f4bdd8c4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 22 Aug 2019 10:58:35 -0400 Subject: [PATCH 0136/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 2bb892e8e..0ea6b5401 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3038,7 +3038,7 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, if self._edge_dir: return self.get_subgraph_edges_v2( - np.array(agglomeration_id), + np.array([agglomeration_id]), bounding_box=bounding_box, bb_is_coordinate=bb_is_coordinate, connected_edges=connected_edges, @@ -3130,8 +3130,8 @@ def _read_edges( ) bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) - - chunk_ids = [] + + level2_ids = [] for agglomeration_id in agglomeration_ids: layer_nodes_d = self._get_subgraph_higher_layer_nodes( node_id=agglomeration_id, @@ -3139,19 +3139,20 @@ def _read_edges( return_layers=[2], verbose=verbose, ) - chunk_ids.append(self.get_chunk_ids_from_node_ids(layer_nodes_d[2])) - - chunk_ids = np.unique(np.concatenate(chunk_ids)) + level2_ids.append(layer_nodes_d[2]) + + level2_ids = np.concatenate(level2_ids) + chunk_ids = self.get_chunk_ids_from_node_ids(level2_ids) cg_threads = 1 chunk_edge_dicts = mu.multithread_func( _read_edges, - np.array_split(chunk_ids, cg_threads), + np.array_split(np.unique(chunk_ids), cg_threads), n_threads=cg_threads, debug=False, ) edges_dict = concatenate_chunk_edges(chunk_edge_dicts) - children_d = self.get_children(layer_nodes_d[2]) + children_d = self.get_children(level2_ids) sv_ids = np.concatenate(list(children_d.values())) edges = filter_edges(sv_ids, edges_dict) From d45b5b9d5131b591bc0efcc14c7c55150b2d5d1a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 22 Aug 2019 15:31:21 -0400 Subject: [PATCH 0137/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 4 + pychunkedgraph/backend/utils/serializers.py | 8 +- pychunkedgraph/ingest/cli.py | 39 +++++- pychunkedgraph/ingest/ingestion_utils.py | 4 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 129 ++++---------------- 5 files changed, 74 insertions(+), 110 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 0ea6b5401..467e6b215 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -932,6 +932,7 @@ def read_byte_rows( # Deserialize cells for row_key, column_dict in rows.items(): for column, cell_entries in column_dict.items(): + # print(column.key) for cell_entry in cell_entries: cell_entry.value = column.deserialize(cell_entry.value) # If no column array was requested, reattach single column's values directly to the row @@ -1775,6 +1776,7 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], + parent_chunk_coord: Sequence[int] = None, time_stamp: Optional[datetime.datetime] = None, verbose: bool = True, n_threads: int = 20) -> None: """ Creates the abstract nodes for a given chunk in a given layer @@ -2072,6 +2074,8 @@ def _write_out_connected_components(args) -> None: if verbose: self.logger.debug("Time writing %d connected components in layer %d: %.3fs" % (len(ccs), layer_id, time.time() - time_start)) + print(str(parent_chunk_coord)) + return str(parent_chunk_coord) def get_atomic_cross_edge_dict(self, node_id: np.uint64, layer_ids: Sequence[int] = None): diff --git a/pychunkedgraph/backend/utils/serializers.py b/pychunkedgraph/backend/utils/serializers.py index 0ac2f13f0..2334fc904 100644 --- a/pychunkedgraph/backend/utils/serializers.py +++ b/pychunkedgraph/backend/utils/serializers.py @@ -12,18 +12,18 @@ def __init__(self, serializer, deserializer, basetype=Any, compression_level=Non self._compressor = None self._decompressor = None if compression_level: - self._compressor = zstd.ZstdCompressor(level=compression_level) - self._decompressor = zstd.ZstdDecompressor() + self._compressor = compression_level + self._decompressor = True def serialize(self, obj): content = self._serializer(obj) if self._compressor: - return self._compressor.compress(content) + return zstd.ZstdCompressor(level=self._compressor).compress(content) return content def deserialize(self, obj): if self._decompressor: - obj = self._decompressor.decompressobj().decompress(obj) + obj = zstd.ZstdDecompressor().decompressobj().decompress(obj) return self._deserializer(obj) @property diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a2ed00ad9..b1dca28db 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -1,6 +1,7 @@ """ cli for running ingest """ +from collections import defaultdict import click from flask import current_app @@ -10,11 +11,17 @@ ingest_cli = AppGroup("ingest") +tasks = defaultdict(int) + def handle_job_result(*args, **kwargs): """handle worker return""" - with open("results.txt", "a") as results_f: - results_f.write(f"{str(args[0]['data'])}\n") + p_chunk_coord = str(args[0]['data']) + tasks[p_chunk_coord] += 1 + if tasks[p_chunk_coord] == 8: + print(f"{p_chunk_coord} done") + with open("results.txt", "a") as results_f: + results_f.write(f"{p_chunk_coord}\n") @ingest_cli.command("atomic") @@ -46,5 +53,33 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=-1 ) +@ingest_cli.command("layer") +@click.argument("storage_path", type=str) +@click.argument("ws_cv_path", type=str) +@click.argument("cg_table_id", type=str) +@click.argument("layer_id", type=int) +def create_abstract(storage_path, ws_cv_path, cg_table_id, layer_id=3): + """ + run ingestion job + eg: flask ingest layer \ + gs://ranl/scratch/pinky100_ca_com/agg \ + gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ + akhilesh-pinky100-compressed \ + 3 + """ + assert layer_id > 2 + chunk_pubsub = current_app.redis.pubsub() + chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) + chunk_pubsub.run_in_thread(sleep_time=0.1) + + ingest_into_chunkedgraph( + storage_path=storage_path, + ws_cv_path=ws_cv_path, + cg_table_id=cg_table_id, + start_layer=layer_id, + is_new=False + ) + + def init_ingest_cmds(app): app.cli.add_command(ingest_cli) diff --git a/pychunkedgraph/ingest/ingestion_utils.py b/pychunkedgraph/ingest/ingestion_utils.py index bf635ee7e..5553615f2 100644 --- a/pychunkedgraph/ingest/ingestion_utils.py +++ b/pychunkedgraph/ingest/ingestion_utils.py @@ -16,7 +16,7 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, cg_mesh_dir, use_skip_connections=True, s_bits_atomic_layer=None, n_bits_root_counter=8, fan_out=2, instance_id=None, - project_id=None, edge_dir = None): + project_id=None, edge_dir=None, is_new=True): """ Initalizes a chunkedgraph on BigTable :param cg_table_id: str @@ -69,7 +69,7 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, "use_skip_connections": use_skip_connections, "s_bits_atomic_layer": s_bits_atomic_layer, "n_bits_root_counter": n_bits_root_counter, - "is_new": True, + "is_new": is_new, "edge_dir": edge_dir} if instance_id is not None: diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 70f7cfbd0..4dcda09ea 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -37,37 +37,14 @@ def ingest_into_chunkedgraph( chunk_size=[512, 512, 128], use_skip_connections=True, fan_out=2, - aff_dtype=np.float32, size=None, instance_id=None, project_id=None, start_layer=1, edge_dir=None, n_chunks=None, + is_new=True ): - """ Creates a chunkedgraph from a Ran Agglomerattion - - :param storage_path: str - Google cloud bucket path (agglomeration) - example: gs://ranl-scratch/minnie_test_2 - :param ws_cv_path: str - Google cloud bucket path (watershed segmentation) - example: gs://microns-seunglab/minnie_v0/minnie10/ws_minnie_test_2/agg - :param cg_table_id: str - chunkedgraph table name - :param fan_out: int - fan out of chunked graph (2 == Octree) - :param aff_dtype: np.dtype - affinity datatype (np.float32 or np.float64) - :param instance_id: str - Google instance id - :param project_id: str - Google project id - :param start_layer: int - :param n_threads: list of ints - number of threads to use - :return: - """ storage_path = storage_path.strip("/") ws_cv_path = ws_cv_path.strip("/") @@ -85,9 +62,11 @@ def ingest_into_chunkedgraph( fan_out=fan_out, instance_id=instance_id, project_id=project_id, + edge_dir=edge_dir, + is_new=is_new ) - im = ingestionmanager.IngestionManager( + imanager = ingestionmanager.IngestionManager( storage_path=storage_path, cg_table_id=cg_table_id, n_layers=n_layers_agg, @@ -96,46 +75,12 @@ def ingest_into_chunkedgraph( data_version=4, ) - # if start_layer < 3: - create_atomic_chunks(im, edge_dir, n_chunks) - # create_abstract_layers(im, n_threads=n_threads[1], start_layer=start_layer) - - return im - - -def create_abstract_layers(im, start_layer=3, n_threads=1): - """ Creates abstract of chunkedgraph (> 2) - - :param im: IngestionManager - :param n_threads: int - number of threads to use - :return: - """ if start_layer < 3: - start_layer = 3 - - assert start_layer < int(im.cg.n_layers + 1) - - for layer_id in range(start_layer, int(im.cg.n_layers + 1)): - create_layer(im, layer_id, n_threads=n_threads) - + create_atomic_chunks(imanager, n_chunks) + create_layer(imanager, 3) -def create_layer(im, layer_id, block_size=100, n_threads=1): - """ Creates abstract layer of chunkedgraph - - Abstract layers have to be build in sequence. Abstract layers are all layers - above the first layer (1). `create_atomic_chunks` creates layer 2 as well. - Hence, this function is responsible for every creating layers > 2. - - :param im: IngestionManager - :param layer_id: int - > 2 - :param n_threads: int - number of threads to use - :return: - """ - assert layer_id > 2 +def create_layer(im, layer_id, block_size=100): child_chunk_coords = im.chunk_coords // im.cg.fan_out ** (layer_id - 3) child_chunk_coords = child_chunk_coords.astype(np.int) child_chunk_coords = np.unique(child_chunk_coords, axis=0) @@ -147,55 +92,36 @@ def create_layer(im, layer_id, block_size=100, n_threads=1): ) im_info = im.get_serialized_info() - multi_args = [] # Randomize chunks order = np.arange(len(parent_chunk_coords), dtype=np.int) np.random.shuffle(order) # Block chunks - block_size = min(block_size, int(np.ceil(len(order) / n_threads / 3))) n_blocks = int(len(order) / block_size) blocks = np.array_split(order, n_blocks) - for i_block, block in enumerate(blocks): - chunks = [] + jobs = 0 + for block in blocks: for idx in block: - chunks.append(child_chunk_coords[inds == idx]) - - multi_args.append([im_info, layer_id, len(order), n_blocks, i_block, chunks]) - - if n_threads == 1: - mu.multiprocess_func( - _create_layers, - multi_args, - n_threads=n_threads, - verbose=True, - debug=n_threads == 1, - ) - else: - mu.multisubprocess_func( - _create_layers, multi_args, n_threads=n_threads, suffix=f"{layer_id}" - ) - + jobs += 1 + parent_chunk_coord = parent_chunk_coords[idx] + current_app.test_q.enqueue( + _create_layer, + job_timeout="59m", + args=(im_info, layer_id, child_chunk_coords[inds == idx], parent_chunk_coord)) + print(f"{jobs} jobs queued") -def _create_layers(args): - """ Multiprocessing helper for create_layer """ - im_info, layer_id, n_chunks, n_blocks, i_block, chunks = args - im = ingestionmanager.IngestionManager(**im_info) - for i_chunk, child_chunk_coords in enumerate(chunks): - time_start = time.time() - - im.cg.add_layer(layer_id, child_chunk_coords, n_threads=8, verbose=True) - - print( - f"Layer {layer_id} - Job {i_block + 1} / {n_blocks} - " - f"{i_chunk + 1} / {len(chunks)} -- %.3fs" % (time.time() - time_start) - ) +@redis_job(REDIS_URL, "ingest_channel") +def _create_layer(im_info, layer_id, child_chunk_coords, parent_chunk_coord): + imanager = ingestionmanager.IngestionManager(**im_info) + return imanager.cg.add_layer( + layer_id, child_chunk_coords, + parent_chunk_coord=parent_chunk_coord) -def create_atomic_chunks(im, edge_dir, n_chunks): +def create_atomic_chunks(im, n_chunks): """ Creates all atomic chunks""" chunk_coords = list(im.chunk_coord_gen) np.random.shuffle(chunk_coords) @@ -206,21 +132,20 @@ def create_atomic_chunks(im, edge_dir, n_chunks): current_app.test_q.enqueue( _create_atomic_chunk, job_timeout="59m", - args=(im.get_serialized_info(), chunk_coord, edge_dir), + args=(im.get_serialized_info(), chunk_coord), ) @redis_job(REDIS_URL, "ingest_channel") -def _create_atomic_chunk(im_info, chunk_coord, edge_dir): +def _create_atomic_chunk(im_info, chunk_coord): """ Multiprocessing helper for create_atomic_chunks """ imanager = ingestionmanager.IngestionManager(**im_info) - return create_atomic_chunk(imanager, chunk_coord, edge_dir) + return create_atomic_chunk(imanager, chunk_coord) def create_atomic_chunk(imanager, chunk_coord): """ Creates single atomic chunk""" chunk_coord = np.array(list(chunk_coord), dtype=np.int) - edge_dict = collect_edge_data(imanager, chunk_coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) mapping = collect_agglomeration_data(imanager, chunk_coord) @@ -242,7 +167,7 @@ def create_atomic_chunk(imanager, chunk_coord): no_edges = no_edges and not sv_ids1.size # if not no_edges: - # put_chunk_edges(edge_dir, chunk_coord, chunk_edges, ZSTD_COMPRESSION_LEVEL) + # put_chunk_edges(cg.edge_dir, chunk_coord, chunk_edges, ZSTD_COMPRESSION_LEVEL) add_atomic_edges(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) # to track workers completion From 8b726a422a45d5f0db4bfb3df4b66614c4843e76 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 22 Aug 2019 21:55:19 -0400 Subject: [PATCH 0138/1097] redis status cli --- pychunkedgraph/app/__init__.py | 4 ++- pychunkedgraph/app/redis_cli.py | 43 +++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 pychunkedgraph/app/redis_cli.py diff --git a/pychunkedgraph/app/__init__.py b/pychunkedgraph/app/__init__.py index 88c769f5c..a90281987 100644 --- a/pychunkedgraph/app/__init__.py +++ b/pychunkedgraph/app/__init__.py @@ -17,6 +17,7 @@ from pychunkedgraph.logging import jsonformatter # from pychunkedgraph.app import manifest_app_blueprint +from .redis_cli import init_redis_cmds from ..ingest.cli import init_ingest_cmds os.environ['TRAVIS_BRANCH'] = "IDONTKNOWWHYINEEDTHIS" @@ -78,4 +79,5 @@ def configure_app(app): app.redis = redis.Redis.from_url(app.config['REDIS_URL']) app.test_q = Queue('test', connection=app.redis) with app.app_context(): - init_ingest_cmds(app) \ No newline at end of file + init_ingest_cmds(app) + init_redis_cmds(app) \ No newline at end of file diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py new file mode 100644 index 000000000..75d3b305a --- /dev/null +++ b/pychunkedgraph/app/redis_cli.py @@ -0,0 +1,43 @@ +""" +cli for redis jobs +""" +import os + +import click +from redis import Redis +from rq import Queue, Worker +from flask import current_app +from flask.cli import AppGroup + + +REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") +REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") +REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") +REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" + +redis_cli = AppGroup("redis") +connection=Redis( + host=os.environ['REDIS_SERVICE_HOST'],port=6379,db=0, password='dev') + +@redis_cli.command("status") +@click.argument("queue", type=str, default="test") +def get_status(queue="test"): + q=Queue(queue, connection=connection) + workers = Worker.all(queue=q) + print(f"Queue name \t: {queue}") + print(f"Jobs queued \t: {len(q)}") + print(f"Workers count \t: {len(workers)}") + print(f"Jobs failed \t: {q.failed_job_registry.count}") + + +@redis_cli.command("empty") +@click.argument("queue", type=str) +def empty_queue(queue): + q=Queue(queue, connection=connection) + job_count = len(q) + q.empty() + print(f"{job_count} jobs removed from {queue}.") + + +def init_redis_cmds(app): + app.cli.add_command(redis_cli) From 18d5c3cf5514f5c5fc7d6dba43b80f7c3c5bb39c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 23 Aug 2019 11:40:02 -0400 Subject: [PATCH 0139/1097] job dependency --- pychunkedgraph/backend/chunkedgraph.py | 5 +-- pychunkedgraph/ingest/cli.py | 34 +++++++++++------- pychunkedgraph/ingest/ran_ingestion_v2.py | 43 +++++------------------ 3 files changed, 33 insertions(+), 49 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 467e6b215..8ff96dac2 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2074,8 +2074,9 @@ def _write_out_connected_components(args) -> None: if verbose: self.logger.debug("Time writing %d connected components in layer %d: %.3fs" % (len(ccs), layer_id, time.time() - time_start)) - print(str(parent_chunk_coord)) - return str(parent_chunk_coord) + + result = np.concatenate([[layer_id], parent_chunk_coord]) + return result.tobytes() def get_atomic_cross_edge_dict(self, node_id: np.uint64, layer_ids: Sequence[int] = None): diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index b1dca28db..ba4f70a2e 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -3,25 +3,33 @@ """ from collections import defaultdict +import numpy as np import click from flask import current_app from flask.cli import AppGroup from .ran_ingestion_v2 import ingest_into_chunkedgraph +from .ran_ingestion_v2 import queue_parent ingest_cli = AppGroup("ingest") - -tasks = defaultdict(int) +imanager = None +tasks = defaultdict(list) def handle_job_result(*args, **kwargs): """handle worker return""" - p_chunk_coord = str(args[0]['data']) - tasks[p_chunk_coord] += 1 - if tasks[p_chunk_coord] == 8: - print(f"{p_chunk_coord} done") - with open("results.txt", "a") as results_f: - results_f.write(f"{p_chunk_coord}\n") + result = np.frombuffer(args[0]['data'], dtype=int) + layer_id = result[0] + chunk_coord = result[1:] + p_chunk_coord = chunk_coord // 2 + + tasks[str(p_chunk_coord)].append(chunk_coord) + if len(tasks[str(p_chunk_coord)]) == 8: + print(f"{p_chunk_coord} children done") + queue_parent(imanager, layer_id+1, p_chunk_coord, tasks.pop(str(p_chunk_coord))) + + with open("results.txt", "a") as results_f: + results_f.write(f"{chunk_coord}:{p_chunk_coord}:{len(tasks[str(p_chunk_coord)])}") @ingest_cli.command("atomic") @@ -29,16 +37,15 @@ def handle_job_result(*args, **kwargs): @click.argument("ws_cv_path", type=str) @click.argument("edge_dir", type=str) @click.argument("cg_table_id", type=str) -@click.argument("n_chunks", type=int) -def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=-1): +@click.argument("n_chunks", type=int, default=None) +def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=None): """ run ingestion job eg: flask ingest atomic \ gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-test/edges/pinky-ingest-test \ - akhilesh-ingest-test \ - 1 + pinky100-compressed-all-layers """ chunk_pubsub = current_app.redis.pubsub() chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) @@ -72,7 +79,8 @@ def create_abstract(storage_path, ws_cv_path, cg_table_id, layer_id=3): chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) chunk_pubsub.run_in_thread(sleep_time=0.1) - ingest_into_chunkedgraph( + global imanager + imanager = ingest_into_chunkedgraph( storage_path=storage_path, ws_cv_path=ws_cv_path, cg_table_id=cg_table_id, diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 4dcda09ea..27662f057 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -75,42 +75,16 @@ def ingest_into_chunkedgraph( data_version=4, ) - if start_layer < 3: - create_atomic_chunks(imanager, n_chunks) - create_layer(imanager, 3) + create_atomic_chunks(imanager, n_chunks) + return imanager -def create_layer(im, layer_id, block_size=100): - child_chunk_coords = im.chunk_coords // im.cg.fan_out ** (layer_id - 3) - child_chunk_coords = child_chunk_coords.astype(np.int) - child_chunk_coords = np.unique(child_chunk_coords, axis=0) - - parent_chunk_coords = child_chunk_coords // im.cg.fan_out - parent_chunk_coords = parent_chunk_coords.astype(np.int) - parent_chunk_coords, inds = np.unique( - parent_chunk_coords, axis=0, return_inverse=True - ) - +def queue_parent(im, layer_id, parent_chunk_coord, child_chunk_coords): im_info = im.get_serialized_info() - - # Randomize chunks - order = np.arange(len(parent_chunk_coords), dtype=np.int) - np.random.shuffle(order) - - # Block chunks - n_blocks = int(len(order) / block_size) - blocks = np.array_split(order, n_blocks) - - jobs = 0 - for block in blocks: - for idx in block: - jobs += 1 - parent_chunk_coord = parent_chunk_coords[idx] - current_app.test_q.enqueue( - _create_layer, - job_timeout="59m", - args=(im_info, layer_id, child_chunk_coords[inds == idx], parent_chunk_coord)) - print(f"{jobs} jobs queued") + current_app.test_q.enqueue( + _create_layer, + job_timeout="59m", + args=(im_info, layer_id, child_chunk_coords, parent_chunk_coord)) @redis_job(REDIS_URL, "ingest_channel") @@ -171,7 +145,8 @@ def create_atomic_chunk(imanager, chunk_coord): add_atomic_edges(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) # to track workers completion - return str(chunk_coord) + result = np.concatenate([[2], chunk_coord]) + return result.tobytes() def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): From 5874946edfb0c056648463cb144db9f15504ff8f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 23 Aug 2019 14:58:13 -0400 Subject: [PATCH 0140/1097] more helper functions --- pychunkedgraph/app/redis_cli.py | 22 ++++++++++++++ pychunkedgraph/backend/chunkedgraph.py | 2 +- pychunkedgraph/ingest/cli.py | 37 +++-------------------- pychunkedgraph/ingest/ran_ingestion_v2.py | 6 +++- 4 files changed, 33 insertions(+), 34 deletions(-) diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py index 75d3b305a..d012f5979 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/redis_cli.py @@ -6,6 +6,7 @@ import click from redis import Redis from rq import Queue, Worker +from rq.job import Job from flask import current_app from flask.cli import AppGroup @@ -30,6 +31,27 @@ def get_status(queue="test"): print(f"Jobs failed \t: {q.failed_job_registry.count}") +@redis_cli.command("failed_ids") +@click.argument("queue", type=str) +def failed_jobs(queue): + q=Queue(queue, connection=connection) + ids = q.failed_job_registry.get_job_ids() + print("\n".join(ids)) + + +@redis_cli.command("failed_info") +@click.argument("queue", type=str) +@click.argument("id", type=str) +def failed_job_info(queue, id): + j=Job.fetch(id,connection=connection) + print("kwargs") + print(j.kwargs) + print("args") + print(j.args) + print("exception") + print(j.exc_info) + + @redis_cli.command("empty") @click.argument("queue", type=str) def empty_queue(queue): diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 8ff96dac2..b0a3df6fd 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2076,7 +2076,7 @@ def _write_out_connected_components(args) -> None: (len(ccs), layer_id, time.time() - time_start)) result = np.concatenate([[layer_id], parent_chunk_coord]) - return result.tobytes() + return result.tobytes() def get_atomic_cross_edge_dict(self, node_id: np.uint64, layer_ids: Sequence[int] = None): diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index ba4f70a2e..337a10955 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -24,12 +24,13 @@ def handle_job_result(*args, **kwargs): p_chunk_coord = chunk_coord // 2 tasks[str(p_chunk_coord)].append(chunk_coord) - if len(tasks[str(p_chunk_coord)]) == 8: + n_children = len(tasks[str(p_chunk_coord)]) + if n_children == 8: print(f"{p_chunk_coord} children done") queue_parent(imanager, layer_id+1, p_chunk_coord, tasks.pop(str(p_chunk_coord))) with open("results.txt", "a") as results_f: - results_f.write(f"{chunk_coord}:{p_chunk_coord}:{len(tasks[str(p_chunk_coord)])}") + results_f.write(f"{chunk_coord}:{p_chunk_coord}:{n_children}\n") @ingest_cli.command("atomic") @@ -51,41 +52,13 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=No chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) chunk_pubsub.run_in_thread(sleep_time=0.1) - ingest_into_chunkedgraph( - storage_path=storage_path, - ws_cv_path=ws_cv_path, - cg_table_id=cg_table_id, - edge_dir=edge_dir, - n_chunks=n_chunks, - ) - - -@ingest_cli.command("layer") -@click.argument("storage_path", type=str) -@click.argument("ws_cv_path", type=str) -@click.argument("cg_table_id", type=str) -@click.argument("layer_id", type=int) -def create_abstract(storage_path, ws_cv_path, cg_table_id, layer_id=3): - """ - run ingestion job - eg: flask ingest layer \ - gs://ranl/scratch/pinky100_ca_com/agg \ - gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ - akhilesh-pinky100-compressed \ - 3 - """ - assert layer_id > 2 - chunk_pubsub = current_app.redis.pubsub() - chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) - chunk_pubsub.run_in_thread(sleep_time=0.1) - global imanager imanager = ingest_into_chunkedgraph( storage_path=storage_path, ws_cv_path=ws_cv_path, cg_table_id=cg_table_id, - start_layer=layer_id, - is_new=False + edge_dir=edge_dir, + n_chunks=n_chunks, ) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 27662f057..fd25a7adc 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -15,6 +15,8 @@ import zstandard as zstd from flask import current_app from multiwrapper import multiprocessing_utils as mu +from rq import Queue +from redis import Redis from ..utils.general import redis_job from . import ingestionmanager, ingestion_utils as iu @@ -81,7 +83,9 @@ def ingest_into_chunkedgraph( def queue_parent(im, layer_id, parent_chunk_coord, child_chunk_coords): im_info = im.get_serialized_info() - current_app.test_q.enqueue( + connection = Redis(host=os.environ['REDIS_SERVICE_HOST'],port=6379,db=0, password='dev') + test_q = Queue('test', connection=connection) + test_q.enqueue( _create_layer, job_timeout="59m", args=(im_info, layer_id, child_chunk_coords, parent_chunk_coord)) From 7c1e62b32f7889ade915ea05f21e11ba2fb1dcac Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 23 Aug 2019 22:22:57 -0400 Subject: [PATCH 0141/1097] remove old code --- pychunkedgraph/backend/chunkedgraph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index b0a3df6fd..893cde9c5 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -27,7 +27,6 @@ MulticutOperation, SplitOperation, ) -from pychunkedgraph.io.storage import get_chunk_edges from pychunkedgraph.io.edge_storage import get_chunk_edges # from pychunkedgraph.meshing import meshgen From 411e1dda509505727efe4653c2c0506365b464b0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 24 Aug 2019 00:15:01 -0400 Subject: [PATCH 0142/1097] fix example command --- pychunkedgraph/ingest/cli.py | 39 +++++++++++++++++------ pychunkedgraph/ingest/ran_ingestion_v2.py | 8 +++++ 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 337a10955..fb6f66112 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -14,23 +14,43 @@ ingest_cli = AppGroup("ingest") imanager = None tasks = defaultdict(list) +layer_parent_children_counts = {} + + +def _get_children_count(chunk_coord, layer_id): + global imanager + child_chunk_coords = imanager.chunk_coords // imanager.cg.fan_out ** (layer_id - 3) + child_chunk_coords = child_chunk_coords.astype(np.int) + child_chunk_coords = np.unique(child_chunk_coords, axis=0) + + p_chunk_coords = child_chunk_coords // imanager.cg.fan_out + p_chunk_coords = p_chunk_coords.astype(np.int) + p_chunk_coords, counts = np.unique(p_chunk_coords, axis=0, return_counts=True) + + return dict(zip([str(coord) for coord in p_chunk_coords], counts)) def handle_job_result(*args, **kwargs): """handle worker return""" + global imanager result = np.frombuffer(args[0]['data'], dtype=int) - layer_id = result[0] + layer_id = result[0] + 1 chunk_coord = result[1:] p_chunk_coord = chunk_coord // 2 - tasks[str(p_chunk_coord)].append(chunk_coord) - n_children = len(tasks[str(p_chunk_coord)]) - if n_children == 8: - print(f"{p_chunk_coord} children done") - queue_parent(imanager, layer_id+1, p_chunk_coord, tasks.pop(str(p_chunk_coord))) + children_count = len(tasks[str(p_chunk_coord)]) + + if not layer_id in layer_parent_children_counts: + layer_parent_children_counts[layer_id] = _get_children_count(p_chunk_coord, layer_id) + n_children = layer_parent_children_counts[layer_id][str(p_chunk_coord)] + + if children_count == n_children: + queue_parent(imanager, layer_id, p_chunk_coord, tasks.pop(str(p_chunk_coord))) + with open("completed.txt", "a") as completed_f: + completed_f.write(f"{p_chunk_coord}:{children_count}\n") with open("results.txt", "a") as results_f: - results_f.write(f"{chunk_coord}:{p_chunk_coord}:{n_children}\n") + results_f.write(f"{chunk_coord}:{p_chunk_coord}:{children_count}\n") @ingest_cli.command("atomic") @@ -45,8 +65,9 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=No eg: flask ingest atomic \ gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ - gs://akhilesh-test/edges/pinky-ingest-test \ - pinky100-compressed-all-layers + gs://akhilesh-test/edges/pinky100-ingest \ + akhilesh-pinky100 \ + 71400 """ chunk_pubsub = current_app.redis.pubsub() chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index fd25a7adc..26457e957 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -77,6 +77,14 @@ def ingest_into_chunkedgraph( data_version=4, ) + # 85, 51, 17 - 3 71400 + # 42, 25, 8 - 4 8400 + # 21, 12, 4 - 5 1008 + # 10, 6, 2 - 6 120 + # 5, 3, 1 - 7 15 + # 2, 1, 0 - 8 2 + # 1, 0, 0 - 9 1 + create_atomic_chunks(imanager, n_chunks) return imanager From 792a0d6cc1c0b61aaebc26bb8c3c1905842bd4f0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 24 Aug 2019 11:56:19 -0400 Subject: [PATCH 0143/1097] wip --- .../backend/initialization/create.py | 1 - pychunkedgraph/ingest/cli.py | 22 ++++++++++--------- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 -- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/pychunkedgraph/backend/initialization/create.py b/pychunkedgraph/backend/initialization/create.py index 76f626585..c433c9bc0 100644 --- a/pychunkedgraph/backend/initialization/create.py +++ b/pychunkedgraph/backend/initialization/create.py @@ -48,7 +48,6 @@ def add_atomic_edges( graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) ccs = connected_components(graph) - print(chunk_coord) parent_chunk_id = cg_instance.get_chunk_id( layer=2, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] ) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index fb6f66112..68938aa5b 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -9,6 +9,7 @@ from flask.cli import AppGroup from .ran_ingestion_v2 import ingest_into_chunkedgraph +from .ran_ingestion_v2 import create_atomic_chunks from .ran_ingestion_v2 import queue_parent ingest_cli = AppGroup("ingest") @@ -17,8 +18,7 @@ layer_parent_children_counts = {} -def _get_children_count(chunk_coord, layer_id): - global imanager +def _get_children_count(imanager, chunk_coord, layer_id): child_chunk_coords = imanager.chunk_coords // imanager.cg.fan_out ** (layer_id - 3) child_chunk_coords = child_chunk_coords.astype(np.int) child_chunk_coords = np.unique(child_chunk_coords, axis=0) @@ -32,7 +32,7 @@ def _get_children_count(chunk_coord, layer_id): def handle_job_result(*args, **kwargs): """handle worker return""" - global imanager + global layer_parent_children_counts result = np.frombuffer(args[0]['data'], dtype=int) layer_id = result[0] + 1 chunk_coord = result[1:] @@ -41,16 +41,16 @@ def handle_job_result(*args, **kwargs): children_count = len(tasks[str(p_chunk_coord)]) if not layer_id in layer_parent_children_counts: - layer_parent_children_counts[layer_id] = _get_children_count(p_chunk_coord, layer_id) + layer_parent_children_counts[layer_id] = _get_children_count(imanager, p_chunk_coord, layer_id) n_children = layer_parent_children_counts[layer_id][str(p_chunk_coord)] if children_count == n_children: - queue_parent(imanager, layer_id, p_chunk_coord, tasks.pop(str(p_chunk_coord))) + children = tasks.pop(str(p_chunk_coord)) + queue_parent(imanager, layer_id, p_chunk_coord, children) with open("completed.txt", "a") as completed_f: - completed_f.write(f"{p_chunk_coord}:{children_count}\n") - - with open("results.txt", "a") as results_f: - results_f.write(f"{chunk_coord}:{p_chunk_coord}:{children_count}\n") + completed_f.write(f"{p_chunk_coord}:{children_count}\n") + with open("children.txt", "a") as completed_f: + completed_f.write("\n".join(f"{str(child)}:{layer_id}" for child in children)) @ingest_cli.command("atomic") @@ -71,7 +71,6 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=No """ chunk_pubsub = current_app.redis.pubsub() chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) - chunk_pubsub.run_in_thread(sleep_time=0.1) global imanager imanager = ingest_into_chunkedgraph( @@ -81,6 +80,9 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=No edge_dir=edge_dir, n_chunks=n_chunks, ) + print(type(imanager)) + create_atomic_chunks(imanager, n_chunks) + chunk_pubsub.run_in_thread(sleep_time=0.1) def init_ingest_cmds(app): diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 26457e957..c02b77018 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -84,8 +84,6 @@ def ingest_into_chunkedgraph( # 5, 3, 1 - 7 15 # 2, 1, 0 - 8 2 # 1, 0, 0 - 9 1 - - create_atomic_chunks(imanager, n_chunks) return imanager From 10e547499c32d9a9c34d451f473d9700d7ff4d1d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 24 Aug 2019 12:07:47 -0400 Subject: [PATCH 0144/1097] wip --- pychunkedgraph/ingest/cli.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 68938aa5b..82f48ab69 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -18,7 +18,7 @@ layer_parent_children_counts = {} -def _get_children_count(imanager, chunk_coord, layer_id): +def _get_children_count(chunk_coord, layer_id): child_chunk_coords = imanager.chunk_coords // imanager.cg.fan_out ** (layer_id - 3) child_chunk_coords = child_chunk_coords.astype(np.int) child_chunk_coords = np.unique(child_chunk_coords, axis=0) @@ -36,12 +36,13 @@ def handle_job_result(*args, **kwargs): result = np.frombuffer(args[0]['data'], dtype=int) layer_id = result[0] + 1 chunk_coord = result[1:] - p_chunk_coord = chunk_coord // 2 + p_chunk_coord = chunk_coord // imanager.cg.fan_out tasks[str(p_chunk_coord)].append(chunk_coord) children_count = len(tasks[str(p_chunk_coord)]) if not layer_id in layer_parent_children_counts: - layer_parent_children_counts[layer_id] = _get_children_count(imanager, p_chunk_coord, layer_id) + layer_parent_children_counts[layer_id] = _get_children_count(p_chunk_coord, layer_id) + print(layer_parent_children_counts[layer_id]) n_children = layer_parent_children_counts[layer_id][str(p_chunk_coord)] if children_count == n_children: @@ -80,9 +81,8 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=No edge_dir=edge_dir, n_chunks=n_chunks, ) - print(type(imanager)) - create_atomic_chunks(imanager, n_chunks) chunk_pubsub.run_in_thread(sleep_time=0.1) + create_atomic_chunks(imanager, n_chunks) def init_ingest_cmds(app): From 79756a902a00eee1a41c6853ce4c3c6754d93b36 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 24 Aug 2019 16:49:21 -0400 Subject: [PATCH 0145/1097] wip temp helpers --- pychunkedgraph/ingest/cli.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 82f48ab69..6ce5ff36f 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -16,6 +16,7 @@ imanager = None tasks = defaultdict(list) layer_parent_children_counts = {} +layer_counts = defaultdict(int) def _get_children_count(chunk_coord, layer_id): @@ -42,16 +43,22 @@ def handle_job_result(*args, **kwargs): if not layer_id in layer_parent_children_counts: layer_parent_children_counts[layer_id] = _get_children_count(p_chunk_coord, layer_id) - print(layer_parent_children_counts[layer_id]) + p_keys = layer_parent_children_counts[layer_id].keys() + n_parents = len(p_keys) + total_children = sum([layer_parent_children_counts[layer_id][k] for k in p_keys]) + print(layer_id, n_parents, total_children) n_children = layer_parent_children_counts[layer_id][str(p_chunk_coord)] if children_count == n_children: children = tasks.pop(str(p_chunk_coord)) queue_parent(imanager, layer_id, p_chunk_coord, children) - with open("completed.txt", "a") as completed_f: - completed_f.write(f"{p_chunk_coord}:{children_count}\n") - with open("children.txt", "a") as completed_f: - completed_f.write("\n".join(f"{str(child)}:{layer_id}" for child in children)) + layer_counts[layer_id] += len(children) + with open("layers.txt", "w") as layers_f: + layers_f.write("\n".join([f"{str(layer)}: {layer_counts[layer]}" for layer in layer_counts])) + # with open("completed.txt", "a") as completed_f: + # completed_f.write(f"{p_chunk_coord}:{children_count}:{layer_id}\n") + # with open("children.txt", "a") as completed_f: + # completed_f.write("\n".join(f"{str(child)}:{layer_id}" for child in children)) @ingest_cli.command("atomic") From 8d40341f3f77b5221a55945a423250e87d0733e1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 24 Aug 2019 20:42:11 -0400 Subject: [PATCH 0146/1097] debugging --- pychunkedgraph/ingest/cli.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 6ce5ff36f..f10eb5828 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -16,7 +16,10 @@ imanager = None tasks = defaultdict(list) layer_parent_children_counts = {} -layer_counts = defaultdict(int) +layer_counts_parents = defaultdict(int) +layer_counts_children = defaultdict(int) + +listened_to = defaultdict(int) def _get_children_count(chunk_coord, layer_id): @@ -33,7 +36,7 @@ def _get_children_count(chunk_coord, layer_id): def handle_job_result(*args, **kwargs): """handle worker return""" - global layer_parent_children_counts + global layer_parent_children_counts, layer_counts_children, layer_counts_parents, listened_to result = np.frombuffer(args[0]['data'], dtype=int) layer_id = result[0] + 1 chunk_coord = result[1:] @@ -41,6 +44,11 @@ def handle_job_result(*args, **kwargs): tasks[str(p_chunk_coord)].append(chunk_coord) children_count = len(tasks[str(p_chunk_coord)]) + with open("layers_listened.txt", "w") as layers_f: + listened_to[layer_id-1] += 1 + layers_f.write( + "\n".join([f"{str(layer)}:{listened_to[layer]}" for layer in listened_to])) + if not layer_id in layer_parent_children_counts: layer_parent_children_counts[layer_id] = _get_children_count(p_chunk_coord, layer_id) p_keys = layer_parent_children_counts[layer_id].keys() @@ -52,13 +60,11 @@ def handle_job_result(*args, **kwargs): if children_count == n_children: children = tasks.pop(str(p_chunk_coord)) queue_parent(imanager, layer_id, p_chunk_coord, children) - layer_counts[layer_id] += len(children) - with open("layers.txt", "w") as layers_f: - layers_f.write("\n".join([f"{str(layer)}: {layer_counts[layer]}" for layer in layer_counts])) - # with open("completed.txt", "a") as completed_f: - # completed_f.write(f"{p_chunk_coord}:{children_count}:{layer_id}\n") - # with open("children.txt", "a") as completed_f: - # completed_f.write("\n".join(f"{str(child)}:{layer_id}" for child in children)) + layer_counts_children[layer_id] += len(children) + layer_counts_parents[layer_id] += 1 + with open("layers_completed.txt", "w") as layers_f: + layers_f.write( + "\n".join([f"{str(layer)}:{layer_counts_parents[layer]}:{layer_counts_children[layer]}" for layer in layer_counts_children])) @ingest_cli.command("atomic") From cc9d510a25bfbd9e683d7ed57cd5f3ca12e2826f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 24 Aug 2019 21:37:46 -0400 Subject: [PATCH 0147/1097] rafactor to layer by layer --- pychunkedgraph/ingest/cli.py | 66 +++----------- pychunkedgraph/ingest/ran_ingestion_v2.py | 100 ++++++++++++---------- 2 files changed, 65 insertions(+), 101 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index f10eb5828..1b5cea110 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -13,58 +13,18 @@ from .ran_ingestion_v2 import queue_parent ingest_cli = AppGroup("ingest") -imanager = None -tasks = defaultdict(list) -layer_parent_children_counts = {} -layer_counts_parents = defaultdict(int) -layer_counts_children = defaultdict(int) - -listened_to = defaultdict(int) - - -def _get_children_count(chunk_coord, layer_id): - child_chunk_coords = imanager.chunk_coords // imanager.cg.fan_out ** (layer_id - 3) - child_chunk_coords = child_chunk_coords.astype(np.int) - child_chunk_coords = np.unique(child_chunk_coords, axis=0) - - p_chunk_coords = child_chunk_coords // imanager.cg.fan_out - p_chunk_coords = p_chunk_coords.astype(np.int) - p_chunk_coords, counts = np.unique(p_chunk_coords, axis=0, return_counts=True) - - return dict(zip([str(coord) for coord in p_chunk_coords], counts)) +task_count = 0 def handle_job_result(*args, **kwargs): """handle worker return""" - global layer_parent_children_counts, layer_counts_children, layer_counts_parents, listened_to + global task_count result = np.frombuffer(args[0]['data'], dtype=int) - layer_id = result[0] + 1 - chunk_coord = result[1:] - p_chunk_coord = chunk_coord // imanager.cg.fan_out - tasks[str(p_chunk_coord)].append(chunk_coord) - children_count = len(tasks[str(p_chunk_coord)]) - - with open("layers_listened.txt", "w") as layers_f: - listened_to[layer_id-1] += 1 - layers_f.write( - "\n".join([f"{str(layer)}:{listened_to[layer]}" for layer in listened_to])) + layer = result[0] + task_count += 1 - if not layer_id in layer_parent_children_counts: - layer_parent_children_counts[layer_id] = _get_children_count(p_chunk_coord, layer_id) - p_keys = layer_parent_children_counts[layer_id].keys() - n_parents = len(p_keys) - total_children = sum([layer_parent_children_counts[layer_id][k] for k in p_keys]) - print(layer_id, n_parents, total_children) - n_children = layer_parent_children_counts[layer_id][str(p_chunk_coord)] - - if children_count == n_children: - children = tasks.pop(str(p_chunk_coord)) - queue_parent(imanager, layer_id, p_chunk_coord, children) - layer_counts_children[layer_id] += len(children) - layer_counts_parents[layer_id] += 1 - with open("layers_completed.txt", "w") as layers_f: - layers_f.write( - "\n".join([f"{str(layer)}:{layer_counts_parents[layer]}:{layer_counts_children[layer]}" for layer in layer_counts_children])) + with open(f"completed_{layer}.txt", "w") as completed_f: + completed_f.write(task_count) @ingest_cli.command("atomic") @@ -72,8 +32,8 @@ def handle_job_result(*args, **kwargs): @click.argument("ws_cv_path", type=str) @click.argument("edge_dir", type=str) @click.argument("cg_table_id", type=str) -@click.argument("n_chunks", type=int, default=None) -def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=None): +@click.argument("layer", type=int, default=None) +def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir, layer): """ run ingestion job eg: flask ingest atomic \ @@ -81,21 +41,19 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir=None, n_chunks=No gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-test/edges/pinky100-ingest \ akhilesh-pinky100 \ - 71400 + 2 """ chunk_pubsub = current_app.redis.pubsub() chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) + chunk_pubsub.run_in_thread(sleep_time=0.1) - global imanager - imanager = ingest_into_chunkedgraph( + ingest_into_chunkedgraph( storage_path=storage_path, ws_cv_path=ws_cv_path, cg_table_id=cg_table_id, edge_dir=edge_dir, - n_chunks=n_chunks, + layer=layer ) - chunk_pubsub.run_in_thread(sleep_time=0.1) - create_atomic_chunks(imanager, n_chunks) def init_ingest_cmds(app): diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index c02b77018..6008ae3f0 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -42,7 +42,7 @@ def ingest_into_chunkedgraph( size=None, instance_id=None, project_id=None, - start_layer=1, + layer=1, edge_dir=None, n_chunks=None, is_new=True @@ -77,47 +77,53 @@ def ingest_into_chunkedgraph( data_version=4, ) - # 85, 51, 17 - 3 71400 - # 42, 25, 8 - 4 8400 - # 21, 12, 4 - 5 1008 - # 10, 6, 2 - 6 120 - # 5, 3, 1 - 7 15 - # 2, 1, 0 - 8 2 - # 1, 0, 0 - 9 1 - return imanager + if layer < 3: + create_atomic_chunks(imanager) + else: + create_layer(imanager, layer) + + +def create_layer(imanager, layer_id): + child_chunk_coords = imanager.chunk_coords // imanager.cg.fan_out ** (layer_id - 3) + child_chunk_coords = child_chunk_coords.astype(np.int) + child_chunk_coords = np.unique(child_chunk_coords, axis=0) + + parent_chunk_coords = child_chunk_coords // imanager.cg.fan_out + parent_chunk_coords = parent_chunk_coords.astype(np.int) + parent_chunk_coords, indices = np.unique(parent_chunk_coords, axis=0, + return_inverse=True) + order = np.arange(len(parent_chunk_coords), dtype=np.int) + np.random.shuffle(order) -def queue_parent(im, layer_id, parent_chunk_coord, child_chunk_coords): - im_info = im.get_serialized_info() - connection = Redis(host=os.environ['REDIS_SERVICE_HOST'],port=6379,db=0, password='dev') - test_q = Queue('test', connection=connection) - test_q.enqueue( - _create_layer, - job_timeout="59m", - args=(im_info, layer_id, child_chunk_coords, parent_chunk_coord)) + for parent_idx in order: + children = child_chunk_coords[indices == parent_idx] + current_app.test_q.enqueue( + _create_layer, + job_timeout="59m", + args=(imanager.get_serialized_info(), layer_id, children), + ) + print(f"Queued jobs: {len(current_app.test_q)}") @redis_job(REDIS_URL, "ingest_channel") -def _create_layer(im_info, layer_id, child_chunk_coords, parent_chunk_coord): +def _create_layer(im_info, layer_id, child_chunk_coords): imanager = ingestionmanager.IngestionManager(**im_info) - return imanager.cg.add_layer( - layer_id, child_chunk_coords, - parent_chunk_coord=parent_chunk_coord) + imanager.cg.add_layer(layer_id, child_chunk_coords, n_threads=2) -def create_atomic_chunks(im, n_chunks): +def create_atomic_chunks(imanager): """ Creates all atomic chunks""" - chunk_coords = list(im.chunk_coord_gen) + chunk_coords = list(imanager.chunk_coord_gen) np.random.shuffle(chunk_coords) - print(len(chunk_coords)) - - for chunk_coord in chunk_coords[:n_chunks]: + for chunk_coord in chunk_coords: current_app.test_q.enqueue( _create_atomic_chunk, job_timeout="59m", - args=(im.get_serialized_info(), chunk_coord), + args=(imanager.get_serialized_info(), chunk_coord), ) + print(f"Queued jobs: {len(current_app.test_q)}") @redis_job(REDIS_URL, "ingest_channel") @@ -159,10 +165,10 @@ def create_atomic_chunk(imanager, chunk_coord): return result.tobytes() -def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): +def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): """ Computes chunk coordinates that compute data between the named chunks - :param im: IngestionManagaer + :param imanager: IngestionManagaer :param chunk_coord_a: np.ndarray array of three ints :param chunk_coord_b: np.ndarray @@ -193,7 +199,7 @@ def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): if [dx, dy, dz][dir_dim] == 0: continue - if im.is_out_of_bounce(c_chunk_coord): + if imanager.is_out_of_bounce(c_chunk_coord): continue c_chunk_coords.append(c_chunk_coord) @@ -201,10 +207,10 @@ def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): return c_chunk_coords -def collect_edge_data(im, chunk_coord): +def collect_edge_data(imanager, chunk_coord): """ Loads edge for single chunk - :param im: IngestionManager + :param imanager: IngestionManager :param chunk_coord: np.ndarray array of three ints :param aff_dtype: np.dtype @@ -213,11 +219,11 @@ def collect_edge_data(im, chunk_coord): """ subfolder = "chunked_rg" - base_path = f"{im.storage_path}/{subfolder}/" + base_path = f"{imanager.storage_path}/{subfolder}/" chunk_coord = np.array(chunk_coord) - chunk_id = im.cg.get_chunk_id( + chunk_id = imanager.cg.get_chunk_id( layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] ) @@ -227,7 +233,7 @@ def collect_edge_data(im, chunk_coord): for y in [chunk_coord[1] - 1, chunk_coord[1]]: for z in [chunk_coord[2] - 1, chunk_coord[2]]: - if im.is_out_of_bounce(np.array([x, y, z])): + if imanager.is_out_of_bounce(np.array([x, y, z])): continue # EDGES WITHIN CHUNKS @@ -240,18 +246,18 @@ def collect_edge_data(im, chunk_coord): diff[dim] = d adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = im.cg.get_chunk_id( + adjacent_chunk_id = imanager.cg.get_chunk_id( layer=1, x=adjacent_chunk_coord[0], y=adjacent_chunk_coord[1], z=adjacent_chunk_coord[2], ) - if im.is_out_of_bounce(adjacent_chunk_coord): + if imanager.is_out_of_bounce(adjacent_chunk_coord): continue c_chunk_coords = _get_cont_chunk_coords( - im, chunk_coord, adjacent_chunk_coord + imanager, chunk_coord, adjacent_chunk_coord ) larger_id = np.max([chunk_id, adjacent_chunk_id]) @@ -293,10 +299,10 @@ def collect_edge_data(im, chunk_coord): continue if swap[file["filename"]]: - this_dtype = [im.edge_dtype[1], im.edge_dtype[0]] + im.edge_dtype[2:] + this_dtype = [imanager.edge_dtype[1], imanager.edge_dtype[0]] + imanager.edge_dtype[2:] content = np.frombuffer(file["content"], dtype=this_dtype) else: - content = np.frombuffer(file["content"], dtype=im.edge_dtype) + content = np.frombuffer(file["content"], dtype=imanager.edge_dtype) data.append(content) @@ -334,25 +340,25 @@ def _read_agg_files(filenames, base_path): return edge_list -def collect_agglomeration_data(im, chunk_coord): +def collect_agglomeration_data(imanager, chunk_coord): """ Collects agglomeration information & builds connected component mapping - :param im: IngestionManager + :param imanager: IngestionManager :param chunk_coord: np.ndarray array of three ints :return: dictionary """ subfolder = "remap" - base_path = f"{im.storage_path}/{subfolder}/" + base_path = f"{imanager.storage_path}/{subfolder}/" chunk_coord = np.array(chunk_coord) - chunk_id = im.cg.get_chunk_id( + chunk_id = imanager.cg.get_chunk_id( layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] ) filenames = [] - for mip_level in range(0, int(im.n_layers - 1)): + for mip_level in range(0, int(imanager.n_layers - 1)): x, y, z = np.array(chunk_coord / 2 ** mip_level, dtype=np.int) filenames.append(f"done_{mip_level}_{x}_{y}_{z}_{chunk_id}.data.zst") @@ -363,14 +369,14 @@ def collect_agglomeration_data(im, chunk_coord): adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = im.cg.get_chunk_id( + adjacent_chunk_id = imanager.cg.get_chunk_id( layer=1, x=adjacent_chunk_coord[0], y=adjacent_chunk_coord[1], z=adjacent_chunk_coord[2], ) - for mip_level in range(0, int(im.n_layers - 1)): + for mip_level in range(0, int(imanager.n_layers - 1)): x, y, z = np.array(adjacent_chunk_coord / 2 ** mip_level, dtype=np.int) filenames.append( f"done_{mip_level}_{x}_{y}_{z}_{adjacent_chunk_id}.data.zst" From b376dd85d9e5e9c410c8fa473b71fdeac3bfae3b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 24 Aug 2019 21:52:12 -0400 Subject: [PATCH 0148/1097] updates --- pychunkedgraph/ingest/cli.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 1b5cea110..bffc0f58e 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -9,8 +9,6 @@ from flask.cli import AppGroup from .ran_ingestion_v2 import ingest_into_chunkedgraph -from .ran_ingestion_v2 import create_atomic_chunks -from .ran_ingestion_v2 import queue_parent ingest_cli = AppGroup("ingest") task_count = 0 @@ -24,10 +22,10 @@ def handle_job_result(*args, **kwargs): task_count += 1 with open(f"completed_{layer}.txt", "w") as completed_f: - completed_f.write(task_count) + completed_f.write(str(task_count)) -@ingest_cli.command("atomic") +@ingest_cli.command("table") @click.argument("storage_path", type=str) @click.argument("ws_cv_path", type=str) @click.argument("edge_dir", type=str) @@ -36,7 +34,7 @@ def handle_job_result(*args, **kwargs): def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir, layer): """ run ingestion job - eg: flask ingest atomic \ + eg: flask ingest table \ gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-test/edges/pinky100-ingest \ From 6c632ae8b290f466a43b0e98d5af1f2c8e766193 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 25 Aug 2019 10:56:27 -0400 Subject: [PATCH 0149/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 2 +- pychunkedgraph/ingest/cli.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 893cde9c5..66fc6604e 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2074,7 +2074,7 @@ def _write_out_connected_components(args) -> None: self.logger.debug("Time writing %d connected components in layer %d: %.3fs" % (len(ccs), layer_id, time.time() - time_start)) - result = np.concatenate([[layer_id], parent_chunk_coord]) + result = np.concatenate([[layer_id], [0]]) return result.tobytes() def get_atomic_cross_edge_dict(self, node_id: np.uint64, diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index bffc0f58e..df471e5df 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -17,7 +17,7 @@ def handle_job_result(*args, **kwargs): """handle worker return""" global task_count - result = np.frombuffer(args[0]['data'], dtype=int) + result = np.frombuffer(args[0]['data'], dtype=np.int32) layer = result[0] task_count += 1 From a5b433d2df84707a83c3cab2fc3aceb79ee59b56 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 26 Aug 2019 11:20:31 -0400 Subject: [PATCH 0150/1097] remove assert --- pychunkedgraph/app/app_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/app/app_utils.py b/pychunkedgraph/app/app_utils.py index 84b73e23e..7cd29d67c 100644 --- a/pychunkedgraph/app/app_utils.py +++ b/pychunkedgraph/app/app_utils.py @@ -47,8 +47,8 @@ def get_datastore_client(config): def get_cg(table_id): - assert table_id.startswith("fly") or table_id.startswith("golden") or \ - table_id.startswith("pinky100_rv") + # assert table_id.startswith("fly") or table_id.startswith("golden") or \ + # table_id.startswith("pinky100_rv") if table_id not in cache: instance_id = current_app.config['CHUNKGRAPH_INSTANCE_ID'] From ce9679c80cbf2676d0d9c43e34d24b51431f00f8 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 26 Aug 2019 13:56:20 -0400 Subject: [PATCH 0151/1097] remove redundant flag for column compression --- pychunkedgraph/backend/utils/serializers.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/backend/utils/serializers.py b/pychunkedgraph/backend/utils/serializers.py index 2334fc904..4d0ca2153 100644 --- a/pychunkedgraph/backend/utils/serializers.py +++ b/pychunkedgraph/backend/utils/serializers.py @@ -9,20 +9,16 @@ def __init__(self, serializer, deserializer, basetype=Any, compression_level=Non self._serializer = serializer self._deserializer = deserializer self._basetype = basetype - self._compressor = None - self._decompressor = None - if compression_level: - self._compressor = compression_level - self._decompressor = True + self._compression_level = compression_level def serialize(self, obj): content = self._serializer(obj) - if self._compressor: - return zstd.ZstdCompressor(level=self._compressor).compress(content) + if self._compression_level: + return zstd.ZstdCompressor(level=self._compression_level).compress(content) return content def deserialize(self, obj): - if self._decompressor: + if self._compression_level: obj = zstd.ZstdDecompressor().decompressobj().decompress(obj) return self._deserializer(obj) From 5e3d32947aa0abeb55afbb6b4a70c56ef5b0c5d3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 26 Aug 2019 16:02:17 -0400 Subject: [PATCH 0152/1097] fix: use only active edges to get connected components --- pychunkedgraph/ingest/ran_ingestion_v2.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 6008ae3f0..7883b3d87 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -21,7 +21,7 @@ from ..utils.general import redis_job from . import ingestionmanager, ingestion_utils as iu from ..backend.initialization.create import add_atomic_edges -from ..backend.definitions.edges import Edges, TYPES as EDGE_TYPES +from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES from ..backend.utils import basetypes from ..io.edge_storage import put_chunk_edges @@ -139,7 +139,7 @@ def create_atomic_chunk(imanager, chunk_coord): edge_dict = collect_edge_data(imanager, chunk_coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) mapping = collect_agglomeration_data(imanager, chunk_coord) - _, isolated_ids = define_active_edges(edge_dict, mapping) + active_edge_d, isolated_ids = define_active_edges(edge_dict, mapping) # flag to check if chunk has edges # avoid writing to cloud storage if there are no edges @@ -147,12 +147,15 @@ def create_atomic_chunk(imanager, chunk_coord): no_edges = True chunk_edges = {} for edge_type in EDGE_TYPES: - sv_ids1 = edge_dict[edge_type]["sv1"] - sv_ids2 = edge_dict[edge_type]["sv2"] - - ones = np.ones(len(sv_ids1)) - affinities = edge_dict[edge_type].get("aff", float("inf") * ones) - areas = edge_dict[edge_type].get("area", ones) + active = active_edge_d[edge_type] + sv_ids1 = edge_dict[edge_type]["sv1"][active] + sv_ids2 = edge_dict[edge_type]["sv2"][active] + + areas = np.ones(len(sv_ids1)) + affinities = float("inf") * areas + if not edge_type == CX_CHUNK: + affinities = edge_dict[edge_type]["aff"] + areas = edge_dict[edge_type]["area"] chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) no_edges = no_edges and not sv_ids1.size From e5aa9aa3640921ff547609a7a1ba1d1c1e8c9311 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 10:18:42 -0400 Subject: [PATCH 0153/1097] fix: isolate active edges, formatting, unused variables --- pychunkedgraph/backend/chunkedgraph.py | 5 +-- pychunkedgraph/backend/definitions/edges.py | 1 + pychunkedgraph/ingest/cli.py | 2 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 49 +++++++++++++-------- 4 files changed, 34 insertions(+), 23 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 66fc6604e..baaa7a211 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1775,7 +1775,6 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], - parent_chunk_coord: Sequence[int] = None, time_stamp: Optional[datetime.datetime] = None, verbose: bool = True, n_threads: int = 20) -> None: """ Creates the abstract nodes for a given chunk in a given layer @@ -2074,8 +2073,8 @@ def _write_out_connected_components(args) -> None: self.logger.debug("Time writing %d connected components in layer %d: %.3fs" % (len(ccs), layer_id, time.time() - time_start)) - result = np.concatenate([[layer_id], [0]]) - return result.tobytes() + # to track worker completion + return str(layer_id) def get_atomic_cross_edge_dict(self, node_id: np.uint64, layer_ids: Sequence[int] = None): diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index f6481a15e..acab4663e 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -19,6 +19,7 @@ def __init__( affinities: np.ndarray, areas: np.ndarray, ): + assert node_ids1.size == node_ids2.size == affinities.size == areas.size self.node_ids1 = node_ids1 self.node_ids2 = node_ids2 self.affinities = affinities diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index df471e5df..a4036e9a4 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -38,7 +38,7 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir, layer): gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-test/edges/pinky100-ingest \ - akhilesh-pinky100 \ + akhilesh-pinky100-1 \ 2 """ chunk_pubsub = current_app.redis.pubsub() diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 7883b3d87..6a7708a7f 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -45,7 +45,7 @@ def ingest_into_chunkedgraph( layer=1, edge_dir=None, n_chunks=None, - is_new=True + is_new=True, ): storage_path = storage_path.strip("/") ws_cv_path = ws_cv_path.strip("/") @@ -65,7 +65,7 @@ def ingest_into_chunkedgraph( instance_id=instance_id, project_id=project_id, edge_dir=edge_dir, - is_new=is_new + is_new=is_new, ) imanager = ingestionmanager.IngestionManager( @@ -90,12 +90,14 @@ def create_layer(imanager, layer_id): parent_chunk_coords = child_chunk_coords // imanager.cg.fan_out parent_chunk_coords = parent_chunk_coords.astype(np.int) - parent_chunk_coords, indices = np.unique(parent_chunk_coords, axis=0, - return_inverse=True) + parent_chunk_coords, indices = np.unique( + parent_chunk_coords, axis=0, return_inverse=True + ) order = np.arange(len(parent_chunk_coords), dtype=np.int) np.random.shuffle(order) + print(f"Chunk count: {len(order)}") for parent_idx in order: children = child_chunk_coords[indices == parent_idx] current_app.test_q.enqueue( @@ -117,6 +119,7 @@ def create_atomic_chunks(imanager): chunk_coords = list(imanager.chunk_coord_gen) np.random.shuffle(chunk_coords) + print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: current_app.test_q.enqueue( _create_atomic_chunk, @@ -128,44 +131,49 @@ def create_atomic_chunks(imanager): @redis_job(REDIS_URL, "ingest_channel") def _create_atomic_chunk(im_info, chunk_coord): - """ Multiprocessing helper for create_atomic_chunks """ + """ helper for create_atomic_chunks """ imanager = ingestionmanager.IngestionManager(**im_info) return create_atomic_chunk(imanager, chunk_coord) -def create_atomic_chunk(imanager, chunk_coord): +def create_atomic_chunk(imanager, coord): """ Creates single atomic chunk""" - chunk_coord = np.array(list(chunk_coord), dtype=np.int) - edge_dict = collect_edge_data(imanager, chunk_coord) + coord = np.array(list(coord), dtype=np.int) + edge_dict = collect_edge_data(imanager, coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) - mapping = collect_agglomeration_data(imanager, chunk_coord) + mapping = collect_agglomeration_data(imanager, coord) active_edge_d, isolated_ids = define_active_edges(edge_dict, mapping) # flag to check if chunk has edges # avoid writing to cloud storage if there are no edges # unnecessary write operation no_edges = True - chunk_edges = {} + chunk_edges_all = {} + chunk_edges_active = {} for edge_type in EDGE_TYPES: active = active_edge_d[edge_type] - sv_ids1 = edge_dict[edge_type]["sv1"][active] - sv_ids2 = edge_dict[edge_type]["sv2"][active] - + sv_ids1 = edge_dict[edge_type]["sv1"] + sv_ids2 = edge_dict[edge_type]["sv2"] areas = np.ones(len(sv_ids1)) affinities = float("inf") * areas if not edge_type == CX_CHUNK: affinities = edge_dict[edge_type]["aff"] areas = edge_dict[edge_type]["area"] + + chunk_edges_all[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + sv_ids1 = sv_ids1[active] + sv_ids2 = sv_ids2[active] + affinities = affinities[active] + areas = areas[active] chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) no_edges = no_edges and not sv_ids1.size # if not no_edges: - # put_chunk_edges(cg.edge_dir, chunk_coord, chunk_edges, ZSTD_COMPRESSION_LEVEL) - add_atomic_edges(imanager.cg, chunk_coord, chunk_edges, isolated=isolated_ids) + # put_chunk_edges(cg.edge_dir, coord, chunk_edges_all, ZSTD_COMPRESSION_LEVEL) + add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) - # to track workers completion - result = np.concatenate([[2], chunk_coord]) - return result.tobytes() + # to track workers completion, layer = 2 + return str(2) def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): @@ -302,7 +310,10 @@ def collect_edge_data(imanager, chunk_coord): continue if swap[file["filename"]]: - this_dtype = [imanager.edge_dtype[1], imanager.edge_dtype[0]] + imanager.edge_dtype[2:] + this_dtype = [ + imanager.edge_dtype[1], + imanager.edge_dtype[0], + ] + imanager.edge_dtype[2:] content = np.frombuffer(file["content"], dtype=this_dtype) else: content = np.frombuffer(file["content"], dtype=imanager.edge_dtype) From d055c21f1b3ab96548cd8098a133521fd343448e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 10:27:54 -0400 Subject: [PATCH 0154/1097] clean ugly code --- pychunkedgraph/app/redis_cli.py | 25 +++++++++++------------ pychunkedgraph/ingest/cli.py | 3 ++- pychunkedgraph/ingest/ran_ingestion_v2.py | 12 +++++------ pychunkedgraph/utils/general.py | 6 ++++++ 4 files changed, 25 insertions(+), 21 deletions(-) diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py index d012f5979..d628a15cf 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/redis_cli.py @@ -10,20 +10,19 @@ from flask import current_app from flask.cli import AppGroup +from ..utils.general import REDIS_HOST +from ..utils.general import REDIS_PORT +from ..utils.general import REDIS_PASSWORD -REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") -REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") -REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") -REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" redis_cli = AppGroup("redis") -connection=Redis( - host=os.environ['REDIS_SERVICE_HOST'],port=6379,db=0, password='dev') +connection = Redis(host=REDIS_HOST, port=REDIS_PORT, db=0, password=REDIS_PASSWORD) + @redis_cli.command("status") @click.argument("queue", type=str, default="test") def get_status(queue="test"): - q=Queue(queue, connection=connection) + q = Queue(queue, connection=connection) workers = Worker.all(queue=q) print(f"Queue name \t: {queue}") print(f"Jobs queued \t: {len(q)}") @@ -34,7 +33,7 @@ def get_status(queue="test"): @redis_cli.command("failed_ids") @click.argument("queue", type=str) def failed_jobs(queue): - q=Queue(queue, connection=connection) + q = Queue(queue, connection=connection) ids = q.failed_job_registry.get_job_ids() print("\n".join(ids)) @@ -43,19 +42,19 @@ def failed_jobs(queue): @click.argument("queue", type=str) @click.argument("id", type=str) def failed_job_info(queue, id): - j=Job.fetch(id,connection=connection) - print("kwargs") + j = Job.fetch(id, connection=connection) + print("KWARGS") print(j.kwargs) - print("args") + print("\nARGS") print(j.args) - print("exception") + print("\nEXCEPTION") print(j.exc_info) @redis_cli.command("empty") @click.argument("queue", type=str) def empty_queue(queue): - q=Queue(queue, connection=connection) + q = Queue(queue, connection=connection) job_count = len(q) q.empty() print(f"{job_count} jobs removed from {queue}.") diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a4036e9a4..17cd35ac7 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -9,6 +9,7 @@ from flask.cli import AppGroup from .ran_ingestion_v2 import ingest_into_chunkedgraph +from .ran_ingestion_v2 import INGEST_CHANNEL ingest_cli = AppGroup("ingest") task_count = 0 @@ -42,7 +43,7 @@ def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir, layer): 2 """ chunk_pubsub = current_app.redis.pubsub() - chunk_pubsub.subscribe(**{"ingest_channel": handle_job_result}) + chunk_pubsub.subscribe(**{INGEST_CHANNEL: handle_job_result}) chunk_pubsub.run_in_thread(sleep_time=0.1) ingest_into_chunkedgraph( diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 6a7708a7f..dfa143526 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -18,18 +18,16 @@ from rq import Queue from redis import Redis -from ..utils.general import redis_job +from ..utils.general import redis_job, REDIS_URL from . import ingestionmanager, ingestion_utils as iu from ..backend.initialization.create import add_atomic_edges from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES from ..backend.utils import basetypes from ..io.edge_storage import put_chunk_edges -REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") -REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") -REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") -REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" + ZSTD_COMPRESSION_LEVEL = 17 +INGEST_CHANNEL = "ingest" def ingest_into_chunkedgraph( @@ -108,7 +106,7 @@ def create_layer(imanager, layer_id): print(f"Queued jobs: {len(current_app.test_q)}") -@redis_job(REDIS_URL, "ingest_channel") +@redis_job(REDIS_URL, INGEST_CHANNEL) def _create_layer(im_info, layer_id, child_chunk_coords): imanager = ingestionmanager.IngestionManager(**im_info) imanager.cg.add_layer(layer_id, child_chunk_coords, n_threads=2) @@ -129,7 +127,7 @@ def create_atomic_chunks(imanager): print(f"Queued jobs: {len(current_app.test_q)}") -@redis_job(REDIS_URL, "ingest_channel") +@redis_job(REDIS_URL, INGEST_CHANNEL) def _create_atomic_chunk(im_info, chunk_coord): """ helper for create_atomic_chunks """ imanager = ingestionmanager.IngestionManager(**im_info) diff --git a/pychunkedgraph/utils/general.py b/pychunkedgraph/utils/general.py index ca8da04b2..14143f501 100644 --- a/pychunkedgraph/utils/general.py +++ b/pychunkedgraph/utils/general.py @@ -7,6 +7,12 @@ import functools +REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") +REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") +REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") +REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" + + def redis_job(redis_url, redis_channel): """ Decorator factory From b1970e5de9bace67d4eb2cf70f7f7422ac010acd Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 11:15:10 -0400 Subject: [PATCH 0155/1097] more refactor --- pychunkedgraph/backend/chunkedgraph.py | 5 +- .../backend/connectivity/__init__.py | 0 pychunkedgraph/backend/connectivity/search.py | 0 pychunkedgraph/backend/utils/column_keys.py | 5 ++ pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- .../io/{edge_storage.py => edges.py} | 59 ++++++++----------- 6 files changed, 34 insertions(+), 37 deletions(-) create mode 100644 pychunkedgraph/backend/connectivity/__init__.py create mode 100644 pychunkedgraph/backend/connectivity/search.py rename pychunkedgraph/io/{edge_storage.py => edges.py} (67%) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index baaa7a211..b15407a61 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -27,7 +27,7 @@ MulticutOperation, SplitOperation, ) -from pychunkedgraph.io.edge_storage import get_chunk_edges +from pychunkedgraph.io.edges import get_chunk_edges # from pychunkedgraph.meshing import meshgen from google.api_core.retry import Retry, if_exception_type @@ -46,7 +46,6 @@ from .utils.edge_utils import ( concatenate_chunk_edges, filter_edges, get_active_edges) -from pychunkedgraph.io.edge_storage import get_chunk_edges HOME = os.path.expanduser("~") N_DIGITS_UINT64 = len(str(np.iinfo(np.uint64).max)) @@ -3118,7 +3117,7 @@ def get_subgraph_edges_v2( 3. read edges from cloud storage 4. get supervoxel ids from level 2 ids 5. filter the edges with supervoxel ids - 6. optioanlly for each edge (v1,v2) active + 6. optionally for each edge (v1,v2) active if parent(v1) == parent(v2) inactive otherwise 7. return the edges """ diff --git a/pychunkedgraph/backend/connectivity/__init__.py b/pychunkedgraph/backend/connectivity/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pychunkedgraph/backend/connectivity/search.py b/pychunkedgraph/backend/connectivity/search.py new file mode 100644 index 000000000..e69de29bb diff --git a/pychunkedgraph/backend/utils/column_keys.py b/pychunkedgraph/backend/utils/column_keys.py index a31e9e8a2..6466c67da 100644 --- a/pychunkedgraph/backend/utils/column_keys.py +++ b/pychunkedgraph/backend/utils/column_keys.py @@ -110,6 +110,11 @@ class Connectivity: family_id='3', serializer=serializers.NumPyArray(dtype=basetypes.NODE_ID, shape=(-1, 2), compression_level=22)) + FakeEdges = _Column( + key=b'fake_edges', + family_id='0', + serializer=serializers.NumPyArray(dtype=basetypes.NODE_ID, shape=(-1, 2))) + class Hierarchy: Child = _Column( diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index dfa143526..2abf275db 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -23,7 +23,7 @@ from ..backend.initialization.create import add_atomic_edges from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES from ..backend.utils import basetypes -from ..io.edge_storage import put_chunk_edges +from ..io.edges import put_chunk_edges ZSTD_COMPRESSION_LEVEL = 17 diff --git a/pychunkedgraph/io/edge_storage.py b/pychunkedgraph/io/edges.py similarity index 67% rename from pychunkedgraph/io/edge_storage.py rename to pychunkedgraph/io/edges.py index f49acd14f..6a519cffb 100644 --- a/pychunkedgraph/io/edge_storage.py +++ b/pychunkedgraph/io/edges.py @@ -17,6 +17,24 @@ from .protobuf.chunkEdges_pb2 import EdgesMsg, ChunkEdgesMsg +def serialize(edges: Edges) -> EdgesMsg: + edges_proto = EdgesMsg() + edges_proto.node_ids1 = edges.node_ids1.astype(basetypes.NODE_ID).tobytes() + edges_proto.node_ids2 = edges.node_ids2.astype(basetypes.NODE_ID).tobytes() + edges_proto.affinities = edges.affinities.astype(basetypes.EDGE_AFFINITY).tobytes() + edges_proto.areas = edges.areas.astype(basetypes.EDGE_AREA).tobytes() + + return edges_proto + + +def deserialize(edges_message: EdgesMsg) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + sv_ids1 = np.frombuffer(edges_message.node_ids1, basetypes.NODE_ID) + sv_ids2 = np.frombuffer(edges_message.node_ids2, basetypes.NODE_ID) + affinities = np.frombuffer(edges_message.affinities, basetypes.EDGE_AFFINITY) + areas = np.frombuffer(edges_message.areas, basetypes.EDGE_AREA) + return Edges(sv_ids1, sv_ids2, affinities, areas) + + def _decompress_edges(content: bytes) -> dict: """ :param content: zstd compressed bytes @@ -25,15 +43,6 @@ def _decompress_edges(content: bytes) -> dict: :rtype: dict """ - def _get_edges( - edges_message: EdgesMsg - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - sv_ids1 = np.frombuffer(edges_message.node_ids1, basetypes.NODE_ID) - sv_ids2 = np.frombuffer(edges_message.node_ids2, basetypes.NODE_ID) - affinities = np.frombuffer(edges_message.affinities, basetypes.EDGE_AFFINITY) - areas = np.frombuffer(edges_message.areas, basetypes.EDGE_AREA) - return Edges(sv_ids1, sv_ids2, affinities, areas) - chunk_edges = ChunkEdgesMsg() zstd_decompressor_obj = zstd.ZstdDecompressor().decompressobj() file_content = zstd_decompressor_obj.decompress(content) @@ -41,10 +50,9 @@ def _get_edges( # in, between and cross edges_dict = {} - edges_dict[IN_CHUNK] = _get_edges(chunk_edges.in_chunk) - edges_dict[BT_CHUNK] = _get_edges(chunk_edges.between_chunk) - edges_dict[CX_CHUNK] = _get_edges(chunk_edges.cross_chunk) - + edges_dict[IN_CHUNK] = deserialize(chunk_edges.in_chunk) + edges_dict[BT_CHUNK] = deserialize(chunk_edges.between_chunk) + edges_dict[CX_CHUNK] = deserialize(chunk_edges.cross_chunk) return edges_dict @@ -90,39 +98,24 @@ def get_chunk_edges( def put_chunk_edges( - edges_dir: str, - chunk_coordinates: np.ndarray, - chunk_edges_raw, - compression_level: int, + edges_dir: str, chunk_coordinates: np.ndarray, edges_d, compression_level: int ) -> None: """ :param edges_dir: cloudvolume storage path :type str: :param chunk_coordinates: chunk coords x,y,z :type np.ndarray: - :param chunk_edges_raw: chunk_edges_raw with keys "in", "cross", "between" + :param edges_d: edges_d with keys "in", "cross", "between" :type dict: :param compression_level: zstandard compression level (1-22, higher - better ratio) :type int: :return None: """ - def _get_edges(edge_type: str) -> EdgesMsg: - edges = chunk_edges_raw[edge_type] - edges_proto = EdgesMsg() - edges_proto.node_ids1 = edges.node_ids1.astype(basetypes.NODE_ID).tobytes() - edges_proto.node_ids2 = edges.node_ids2.astype(basetypes.NODE_ID).tobytes() - edges_proto.affinities = edges.affinities.astype( - basetypes.EDGE_AFFINITY - ).tobytes() - edges_proto.areas = edges.areas.astype(basetypes.EDGE_AREA).tobytes() - - return edges_proto - chunk_edges = ChunkEdgesMsg() - chunk_edges.in_chunk.CopyFrom(_get_edges(IN_CHUNK)) - chunk_edges.between_chunk.CopyFrom(_get_edges(BT_CHUNK)) - chunk_edges.cross_chunk.CopyFrom(_get_edges(CX_CHUNK)) + chunk_edges.in_chunk.CopyFrom(serialize(edges_d[IN_CHUNK])) + chunk_edges.between_chunk.CopyFrom(serialize(edges_d[BT_CHUNK])) + chunk_edges.cross_chunk.CopyFrom(serialize(edges_d[CX_CHUNK])) cctx = zstd.ZstdCompressor(level=compression_level) chunk_str = "_".join(str(coord) for coord in chunk_coordinates) From 39d36cf32fb72d746bdac4f68e97baa409cfd72e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 14:23:50 -0400 Subject: [PATCH 0156/1097] wip: fake edges --- pychunkedgraph/backend/chunkedgraph.py | 6 +--- pychunkedgraph/backend/connectivity/search.py | 30 +++++++++++++++++++ pychunkedgraph/backend/graphoperation.py | 26 ++++++++++++++++ pychunkedgraph/backend/utils/edge_utils.py | 4 +++ pychunkedgraph/backend/utils/helpers.py | 14 +++++++++ pychunkedgraph/io/edges.py | 3 +- 6 files changed, 76 insertions(+), 7 deletions(-) create mode 100644 pychunkedgraph/backend/utils/helpers.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index b15407a61..f5167d3d7 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3042,9 +3042,7 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, return self.get_subgraph_edges_v2( np.array([agglomeration_id]), bounding_box=bounding_box, - bb_is_coordinate=bb_is_coordinate, - connected_edges=connected_edges, - verbose=verbose + bb_is_coordinate=bb_is_coordinate ) def _get_subgraph_layer2_edges(node_ids) -> \ @@ -3106,10 +3104,8 @@ def get_subgraph_edges_v2( agglomeration_ids: np.ndarray, bounding_box: Optional[Sequence[Sequence[int]]] = None, bb_is_coordinate: bool = False, - connected_edges=True, cv_threads=1, active_edges=True, - verbose: bool = True, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ 1. get level 2 children ids belonging to the agglomerations diff --git a/pychunkedgraph/backend/connectivity/search.py b/pychunkedgraph/backend/connectivity/search.py index e69de29bb..f0bb7265c 100644 --- a/pychunkedgraph/backend/connectivity/search.py +++ b/pychunkedgraph/backend/connectivity/search.py @@ -0,0 +1,30 @@ +from graph_tool.search import bfs_search +from graph_tool.search import BFSVisitor +from graph_tool.search import StopSearch + +from ..utils.basetypes import NODE_ID + + +class TargetVisitor(BFSVisitor): + def __init__(self, target): + self.target = target + + def discover_vertex(self, u: NODE_ID): + if u == self.target: + raise StopSearch + + +def check_reachability(g, sv1s, sv2s) -> List[bool]: + """ + for each pair (sv1, sv2) check if a path exists (BFS) + """ + + def _check_reachability(source, target): + try: + bfs_search(g, source, VisitorExample(target)) + except StopSearch: + return True + return False + + return [_check_reachability(source, target) for source, target in zip(sv1s, sv2s)] + diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 6537598cf..fd0026d2c 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -10,6 +10,9 @@ from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions from pychunkedgraph.backend.root_lock import RootLock from pychunkedgraph.backend.utils import basetypes, column_keys, serializers +from .utils.helpers import get_bounding_box +from .flatgraph_utils import build_gt_graph +from .utils.edge_utils import add_fake_edges if TYPE_CHECKING: from pychunkedgraph.backend.chunkedgraph import ChunkedGraph @@ -421,6 +424,29 @@ def _update_root_ids(self) -> np.ndarray: def _apply( self, *, operation_id, timestamp ) -> Tuple[np.ndarray, np.ndarray, List["bigtable.row.Row"]]: + # if there is no path between sv1 and sv2 in the given subgraph + # add "fake" edges, these are stored in a row per chunk + # if there is path do nothing, conitnue building the new hierarchy + if self.cg._edge_dir: + assert self.source_coords != None + assert self.sink_coords != None + + root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) + bbox = get_bounding_box(self.source_coords, self.sink_coords) + edges = self.cg.get_subgraph_edges_v2( + agglomeration_ids = root_ids, + bounding_box = bbox, + bb_is_coordinate = True, + cv_threads = 4, + active_edges = False + ) + + g, _, _, _ = build_gt_graph(edges, is_directed=False) + sv1s = self.added_edges[:, 0] + sv2s = self.added_edges[:, 1] + reachable = check_reachability(g, sv1s, sv2s) + add_fake_edges(self.added_edges[~reachable]) + new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, operation_id, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 16fa376fb..4d82aa440 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -74,3 +74,7 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: areas = areas[parent_ids1 == parent_ids2] return Edges(sv_ids1, sv_ids2, affinities, areas) + + +def add_fake_edges(edges: np.ndarray): + pass \ No newline at end of file diff --git a/pychunkedgraph/backend/utils/helpers.py b/pychunkedgraph/backend/utils/helpers.py new file mode 100644 index 000000000..1b7b56e09 --- /dev/null +++ b/pychunkedgraph/backend/utils/helpers.py @@ -0,0 +1,14 @@ +def get_bounding_box( + source_coords: Sequence[Sequence[int]], + sink_coords: Sequence[Sequence[int]], + bb_offset: Tuple[int, int, int] = (120, 120, 12), +): + bb_offset = np.array(list(bb_offset)) + source_coords = np.array(source_coords) + sink_coords = np.array(sink_coords) + + coords = np.concatenate([source_coords, sink_coords]) + bounding_box = [np.min(coords, axis=0), np.max(coords, axis=0)] + bounding_box[0] -= bb_offset + bounding_box[1] += bb_offset + return bounding_box diff --git a/pychunkedgraph/io/edges.py b/pychunkedgraph/io/edges.py index 6a519cffb..02abaab81 100644 --- a/pychunkedgraph/io/edges.py +++ b/pychunkedgraph/io/edges.py @@ -23,7 +23,6 @@ def serialize(edges: Edges) -> EdgesMsg: edges_proto.node_ids2 = edges.node_ids2.astype(basetypes.NODE_ID).tobytes() edges_proto.affinities = edges.affinities.astype(basetypes.EDGE_AFFINITY).tobytes() edges_proto.areas = edges.areas.astype(basetypes.EDGE_AREA).tobytes() - return edges_proto @@ -57,7 +56,7 @@ def _decompress_edges(content: bytes) -> dict: def get_chunk_edges( - edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads: int + edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads: int = 1 ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ :param edges_dir: cloudvolume storage path From 555a5546a2760b895de25a992dd7cdd0b8a58fae Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 15:23:47 -0400 Subject: [PATCH 0157/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 9 +++++---- pychunkedgraph/backend/chunkedgraph_edits.py | 7 ++++--- pychunkedgraph/backend/graphoperation.py | 20 ++++++++++---------- pychunkedgraph/backend/utils/edge_utils.py | 2 +- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index f5167d3d7..c0ba99a01 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3041,8 +3041,8 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, if self._edge_dir: return self.get_subgraph_edges_v2( np.array([agglomeration_id]), - bounding_box=bounding_box, - bb_is_coordinate=bb_is_coordinate + bbox=bounding_box, + bbox_is_coordinate=bb_is_coordinate ) def _get_subgraph_layer2_edges(node_ids) -> \ @@ -3102,8 +3102,8 @@ def _get_subgraph_layer2_edges(node_ids) -> \ def get_subgraph_edges_v2( self, agglomeration_ids: np.ndarray, - bounding_box: Optional[Sequence[Sequence[int]]] = None, - bb_is_coordinate: bool = False, + bbox: Optional[Sequence[Sequence[int]]] = None, + bbox_is_coordinate: bool = False, cv_threads=1, active_edges=True, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -3116,6 +3116,7 @@ def get_subgraph_edges_v2( 6. optionally for each edge (v1,v2) active if parent(v1) == parent(v2) inactive otherwise 7. return the edges + TODO read fake edges """ def _read_edges( diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 5f42d9b99..7b2b048d4 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -227,9 +227,10 @@ def _read_cc_edges_thread(node_ids): cross_chunk_edge_dict, time_stamp)) - # Write atomic nodes - rows.extend(_write_atomic_merge_edges(cg, atomic_edges, affinities, areas, - time_stamp=time_stamp)) + if not self.cg._edge_dir: + # Write atomic nodes + rows.extend(_write_atomic_merge_edges(cg, atomic_edges, affinities, areas, + time_stamp=time_stamp)) # Propagate changes up the tree if cg.n_layers > 2: diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index fd0026d2c..95a090915 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -10,6 +10,7 @@ from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions from pychunkedgraph.backend.root_lock import RootLock from pychunkedgraph.backend.utils import basetypes, column_keys, serializers +from .connectivity.search import check_reachability from .utils.helpers import get_bounding_box from .flatgraph_utils import build_gt_graph from .utils.edge_utils import add_fake_edges @@ -329,6 +330,9 @@ def execute(self) -> "GraphEditOperation.Result": :return: Result of successful graph operation :rtype: GraphEditOperation.Result """ + return self._apply( + operation_id=root_lock.operation_id, timestamp=timestamp + ) root_ids = self._update_root_ids() with RootLock(self.cg, root_ids) as root_lock: @@ -426,26 +430,22 @@ def _apply( ) -> Tuple[np.ndarray, np.ndarray, List["bigtable.row.Row"]]: # if there is no path between sv1 and sv2 in the given subgraph # add "fake" edges, these are stored in a row per chunk - # if there is path do nothing, conitnue building the new hierarchy + # if there is a path do nothing, continue building the new hierarchy if self.cg._edge_dir: assert self.source_coords != None assert self.sink_coords != None - root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) - bbox = get_bounding_box(self.source_coords, self.sink_coords) edges = self.cg.get_subgraph_edges_v2( agglomeration_ids = root_ids, - bounding_box = bbox, - bb_is_coordinate = True, + bbox = get_bounding_box(self.source_coords, self.sink_coords), + bbox_is_coordinate = True, cv_threads = 4, active_edges = False ) - + return edges g, _, _, _ = build_gt_graph(edges, is_directed=False) - sv1s = self.added_edges[:, 0] - sv2s = self.added_edges[:, 1] - reachable = check_reachability(g, sv1s, sv2s) - add_fake_edges(self.added_edges[~reachable]) + reachable = check_reachability(g, self.added_edges[:,0], self.added_edges[:,1]) + add_fake_edges(self.added_edges[~reachable], timestamp=timestamp) new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 4d82aa440..fbff84f4b 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -76,5 +76,5 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: return Edges(sv_ids1, sv_ids2, affinities, areas) -def add_fake_edges(edges: np.ndarray): +def add_fake_edges(edges: np.ndarray, timestamp): pass \ No newline at end of file From 4c1b8736aabac01c53ea36b7bcc5ff744dc974ca Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 18:06:43 -0400 Subject: [PATCH 0158/1097] wip: import fixes --- pychunkedgraph/backend/chunkedgraph.py | 4 ++-- pychunkedgraph/backend/connectivity/search.py | 2 ++ pychunkedgraph/backend/utils/helpers.py | 4 ++++ pychunkedgraph/utils/general.py | 4 +++- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index c0ba99a01..49f15defe 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3128,7 +3128,7 @@ def _read_edges( cv_threads, ) - bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) + bounding_box = self.normalize_bounding_box(bbox, bbox_is_coordinate) level2_ids = [] for agglomeration_id in agglomeration_ids: @@ -3136,7 +3136,7 @@ def _read_edges( node_id=agglomeration_id, bounding_box=bounding_box, return_layers=[2], - verbose=verbose, + verbose=False ) level2_ids.append(layer_nodes_d[2]) diff --git a/pychunkedgraph/backend/connectivity/search.py b/pychunkedgraph/backend/connectivity/search.py index f0bb7265c..c0e246565 100644 --- a/pychunkedgraph/backend/connectivity/search.py +++ b/pychunkedgraph/backend/connectivity/search.py @@ -1,3 +1,5 @@ +from typing import List + from graph_tool.search import bfs_search from graph_tool.search import BFSVisitor from graph_tool.search import StopSearch diff --git a/pychunkedgraph/backend/utils/helpers.py b/pychunkedgraph/backend/utils/helpers.py index 1b7b56e09..e7e201012 100644 --- a/pychunkedgraph/backend/utils/helpers.py +++ b/pychunkedgraph/backend/utils/helpers.py @@ -1,3 +1,7 @@ +from typing import Sequence, Tuple + +import numpy as np + def get_bounding_box( source_coords: Sequence[Sequence[int]], sink_coords: Sequence[Sequence[int]], diff --git a/pychunkedgraph/utils/general.py b/pychunkedgraph/utils/general.py index 14143f501..65643db7a 100644 --- a/pychunkedgraph/utils/general.py +++ b/pychunkedgraph/utils/general.py @@ -2,9 +2,11 @@ generic helper funtions """ +import os +import functools + import numpy as np import redis -import functools REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") From 0739a297019bbacd1d1ab3d8206333c286857775 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 19:03:22 -0400 Subject: [PATCH 0159/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 1 - pychunkedgraph/backend/graphoperation.py | 13 ++++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 49f15defe..2a70238e5 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -930,7 +930,6 @@ def read_byte_rows( # Deserialize cells for row_key, column_dict in rows.items(): for column, cell_entries in column_dict.items(): - # print(column.key) for cell_entry in cell_entries: cell_entry.value = column.deserialize(cell_entry.value) # If no column array was requested, reattach single column's values directly to the row diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 95a090915..04f7ef60f 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -330,9 +330,6 @@ def execute(self) -> "GraphEditOperation.Result": :return: Result of successful graph operation :rtype: GraphEditOperation.Result """ - return self._apply( - operation_id=root_lock.operation_id, timestamp=timestamp - ) root_ids = self._update_root_ids() with RootLock(self.cg, root_ids) as root_lock: @@ -341,6 +338,10 @@ def execute(self) -> "GraphEditOperation.Result": root_lock.locked_root_ids, lock_operation_ids ) + return self._apply( + operation_id=root_lock.operation_id, timestamp=timestamp + ) + new_root_ids, new_lvl2_ids, rows = self._apply( operation_id=root_lock.operation_id, timestamp=timestamp ) @@ -432,13 +433,11 @@ def _apply( # add "fake" edges, these are stored in a row per chunk # if there is a path do nothing, continue building the new hierarchy if self.cg._edge_dir: - assert self.source_coords != None - assert self.sink_coords != None + # assert self.source_coords != None + # assert self.sink_coords != None root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) edges = self.cg.get_subgraph_edges_v2( agglomeration_ids = root_ids, - bbox = get_bounding_box(self.source_coords, self.sink_coords), - bbox_is_coordinate = True, cv_threads = 4, active_edges = False ) From 269a2a7bd225f2fb36c2984be7b7e2f15d10ce7b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 27 Aug 2019 20:55:20 -0400 Subject: [PATCH 0160/1097] wip: fake edges --- pychunkedgraph/backend/definitions/edges.py | 9 ++++++--- pychunkedgraph/backend/graphoperation.py | 22 ++++++++------------- pychunkedgraph/backend/utils/edge_utils.py | 9 +++++++-- pychunkedgraph/io/edges.py | 4 +++- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index acab4663e..f007bda4b 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -2,6 +2,7 @@ Classes and types for edges """ +from typing import Optional import numpy as np @@ -16,10 +17,12 @@ def __init__( self, node_ids1: np.ndarray, node_ids2: np.ndarray, - affinities: np.ndarray, - areas: np.ndarray, + affinities: Optional[np.ndarray] = None, + areas: Optional[np.ndarray] = None, ): - assert node_ids1.size == node_ids2.size == affinities.size == areas.size + assert node_ids1.size == node_ids2.size + if affinities is not None: + assert node_ids1.size == affinities.size == areas.size self.node_ids1 = node_ids1 self.node_ids2 = node_ids2 self.affinities = affinities diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 04f7ef60f..a3136b632 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -10,10 +10,9 @@ from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions from pychunkedgraph.backend.root_lock import RootLock from pychunkedgraph.backend.utils import basetypes, column_keys, serializers -from .connectivity.search import check_reachability +from .connectivity.synthetic import add_fake_edges from .utils.helpers import get_bounding_box -from .flatgraph_utils import build_gt_graph -from .utils.edge_utils import add_fake_edges +from .utils.edge_utils import get_fake_edges if TYPE_CHECKING: from pychunkedgraph.backend.chunkedgraph import ChunkedGraph @@ -338,10 +337,6 @@ def execute(self) -> "GraphEditOperation.Result": root_lock.locked_root_ids, lock_operation_ids ) - return self._apply( - operation_id=root_lock.operation_id, timestamp=timestamp - ) - new_root_ids, new_lvl2_ids, rows = self._apply( operation_id=root_lock.operation_id, timestamp=timestamp ) @@ -433,18 +428,17 @@ def _apply( # add "fake" edges, these are stored in a row per chunk # if there is a path do nothing, continue building the new hierarchy if self.cg._edge_dir: - # assert self.source_coords != None - # assert self.sink_coords != None + assert self.source_coords != None + assert self.sink_coords != None root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) - edges = self.cg.get_subgraph_edges_v2( + subgraph_edges = self.cg.get_subgraph_edges_v2( agglomeration_ids = root_ids, + bbox = get_bounding_box(self.source_coords, self.sink_coords), + bbox_is_coordinate = True, cv_threads = 4, active_edges = False ) - return edges - g, _, _, _ = build_gt_graph(edges, is_directed=False) - reachable = check_reachability(g, self.added_edges[:,0], self.added_edges[:,1]) - add_fake_edges(self.added_edges[~reachable], timestamp=timestamp) + fake_edges = get_fake_edges(self.added_edges, subgraph_edges) new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index fbff84f4b..0eb960c0e 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -6,6 +6,9 @@ from ...utils.general import reverse_dictionary from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK +from ...io.edges import serialize as serialize_edges +from ..connectivity.search import check_reachability +from ..flatgraph_utils import build_gt_graph def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: @@ -76,5 +79,7 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: return Edges(sv_ids1, sv_ids2, affinities, areas) -def add_fake_edges(edges: np.ndarray, timestamp): - pass \ No newline at end of file +def get_fake_edges(added_edges, subgraph_edges): + graph, _, _, _ = build_gt_graph(subgraph_edges, is_directed=False) + reachable = check_reachability(graph, added_edges[:,0], added_edges[:,1]) + return added_edges[~reachable] \ No newline at end of file diff --git a/pychunkedgraph/io/edges.py b/pychunkedgraph/io/edges.py index 02abaab81..9def16ea5 100644 --- a/pychunkedgraph/io/edges.py +++ b/pychunkedgraph/io/edges.py @@ -17,10 +17,12 @@ from .protobuf.chunkEdges_pb2 import EdgesMsg, ChunkEdgesMsg -def serialize(edges: Edges) -> EdgesMsg: +def serialize(edges: Edges, only_ids: bool = False) -> EdgesMsg: edges_proto = EdgesMsg() edges_proto.node_ids1 = edges.node_ids1.astype(basetypes.NODE_ID).tobytes() edges_proto.node_ids2 = edges.node_ids2.astype(basetypes.NODE_ID).tobytes() + if only_ids: + return edges_proto edges_proto.affinities = edges.affinities.astype(basetypes.EDGE_AFFINITY).tobytes() edges_proto.areas = edges.areas.astype(basetypes.EDGE_AREA).tobytes() return edges_proto From 17fae2492526066153300a4216265008254572d7 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 28 Aug 2019 10:50:24 -0400 Subject: [PATCH 0161/1097] wip --- pychunkedgraph/backend/connectivity/search.py | 2 +- pychunkedgraph/backend/graphoperation.py | 31 +++++++++++++------ pychunkedgraph/backend/utils/edge_utils.py | 5 +-- 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/pychunkedgraph/backend/connectivity/search.py b/pychunkedgraph/backend/connectivity/search.py index c0e246565..2850335b5 100644 --- a/pychunkedgraph/backend/connectivity/search.py +++ b/pychunkedgraph/backend/connectivity/search.py @@ -23,7 +23,7 @@ def check_reachability(g, sv1s, sv2s) -> List[bool]: def _check_reachability(source, target): try: - bfs_search(g, source, VisitorExample(target)) + bfs_search(g, source, TargetVisitor(target)) except StopSearch: return True return False diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index a3136b632..994b1b339 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -1,6 +1,6 @@ import itertools from abc import ABC, abstractmethod -from collections import namedtuple +from collections import namedtuple, defaultdict from datetime import datetime from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Type, Union @@ -10,9 +10,8 @@ from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions from pychunkedgraph.backend.root_lock import RootLock from pychunkedgraph.backend.utils import basetypes, column_keys, serializers -from .connectivity.synthetic import add_fake_edges from .utils.helpers import get_bounding_box -from .utils.edge_utils import get_fake_edges +from .utils.edge_utils import flag_fake_edges if TYPE_CHECKING: from pychunkedgraph.backend.chunkedgraph import ChunkedGraph @@ -337,6 +336,10 @@ def execute(self) -> "GraphEditOperation.Result": root_lock.locked_root_ids, lock_operation_ids ) + return self._apply( + operation_id=root_lock.operation_id, timestamp=timestamp + ) + new_root_ids, new_lvl2_ids, rows = self._apply( operation_id=root_lock.operation_id, timestamp=timestamp ) @@ -428,17 +431,27 @@ def _apply( # add "fake" edges, these are stored in a row per chunk # if there is a path do nothing, continue building the new hierarchy if self.cg._edge_dir: - assert self.source_coords != None - assert self.sink_coords != None + # assert self.source_coords != None + # assert self.sink_coords != None root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) - subgraph_edges = self.cg.get_subgraph_edges_v2( + subgraph_edges, _, _ = self.cg.get_subgraph_edges_v2( agglomeration_ids = root_ids, - bbox = get_bounding_box(self.source_coords, self.sink_coords), - bbox_is_coordinate = True, cv_threads = 4, active_edges = False ) - fake_edges = get_fake_edges(self.added_edges, subgraph_edges) + fake_edges = flag_fake_edges(self.added_edges, subgraph_edges) + node_ids, r_indices = np.unique(fake_edges, return_inverse=True) + chunk_ids = self.cg.get_chunk_ids_from_node_ids(node_ids) + r_indices = r_indices.reshape(-1, 2) + chunk_ids_d = defaultdict(list) + for i, r_index in enumerate(r_indices): + sv1_index, sv2_index = r_index + chunk_ids_d[chunk_ids[sv1_index]].append(fake_edges[i]) + if chunk_ids[sv1_index] == chunk_ids[sv2_index]: + continue + chunk_ids_d[chunk_ids[sv2_index]].append(fake_edges[i][::-1]) + print("hihihihi") + return chunk_ids_d new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 0eb960c0e..df37ebebc 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -2,11 +2,12 @@ helper functions for edge stuff """ +from typing import Dict, List + import numpy as np from ...utils.general import reverse_dictionary from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK -from ...io.edges import serialize as serialize_edges from ..connectivity.search import check_reachability from ..flatgraph_utils import build_gt_graph @@ -79,7 +80,7 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: return Edges(sv_ids1, sv_ids2, affinities, areas) -def get_fake_edges(added_edges, subgraph_edges): +def flag_fake_edges(added_edges, subgraph_edges) -> List: graph, _, _, _ = build_gt_graph(subgraph_edges, is_directed=False) reachable = check_reachability(graph, added_edges[:,0], added_edges[:,1]) return added_edges[~reachable] \ No newline at end of file From 752a7c9e744437b622ecb37fc431f8f82b024c86 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 28 Aug 2019 14:17:16 -0400 Subject: [PATCH 0162/1097] wip --- pychunkedgraph/backend/connectivity/search.py | 33 +++++++++++++------ pychunkedgraph/backend/graphoperation.py | 1 - pychunkedgraph/backend/utils/edge_utils.py | 12 +++++-- 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/pychunkedgraph/backend/connectivity/search.py b/pychunkedgraph/backend/connectivity/search.py index 2850335b5..c7ee59ca1 100644 --- a/pychunkedgraph/backend/connectivity/search.py +++ b/pychunkedgraph/backend/connectivity/search.py @@ -1,5 +1,7 @@ +import random from typing import List +import numpy as np from graph_tool.search import bfs_search from graph_tool.search import BFSVisitor from graph_tool.search import StopSearch @@ -8,25 +10,36 @@ class TargetVisitor(BFSVisitor): - def __init__(self, target): + def __init__(self, target, reachable): self.target = target + self.reachable = reachable - def discover_vertex(self, u: NODE_ID): + def discover_vertex(self, u): if u == self.target: + self.reachable[u] = True raise StopSearch -def check_reachability(g, sv1s, sv2s) -> List[bool]: +def check_reachability(g, sv1s, sv2s, original_ids) -> np.ndarray: """ for each pair (sv1, sv2) check if a path exists (BFS) """ + # mapping from original ids to graph tool ids + original_ids_d = { + sv_id: index for sv_id, index in zip(original_ids, range(len(original_ids))) + } + reachable = g.new_vertex_property("bool", val=False) + print(g.vertex_properties) def _check_reachability(source, target): - try: - bfs_search(g, source, TargetVisitor(target)) - except StopSearch: - return True - return False - - return [_check_reachability(source, target) for source, target in zip(sv1s, sv2s)] + bfs_search(g, source, TargetVisitor(target, reachable)) + print(reachable[target]) + return reachable[target] + + return np.array( + [ + _check_reachability(original_ids_d[source], original_ids_d[target]) + for source, target in zip(sv1s, sv2s) + ] + ) diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 994b1b339..50e42ec1f 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -450,7 +450,6 @@ def _apply( if chunk_ids[sv1_index] == chunk_ids[sv2_index]: continue chunk_ids_d[chunk_ids[sv2_index]].append(fake_edges[i][::-1]) - print("hihihihi") return chunk_ids_d new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index df37ebebc..034625c79 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -81,6 +81,12 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: def flag_fake_edges(added_edges, subgraph_edges) -> List: - graph, _, _, _ = build_gt_graph(subgraph_edges, is_directed=False) - reachable = check_reachability(graph, added_edges[:,0], added_edges[:,1]) - return added_edges[~reachable] \ No newline at end of file + """run bfs to check if a path exists""" + self_edges = np.array([[node_id, node_id] for node_id in np.unique(added_edges)]) + subgraph_edges = np.concatenate([subgraph_edges, self_edges]) + + graph, _, _, original_ids = build_gt_graph(subgraph_edges, is_directed=False) + reachable = check_reachability( + graph, added_edges[:, 0], added_edges[:, 1], original_ids + ) + return added_edges[~reachable] From c3ea8b729aa5f0945b527cd39c392c84eda571a9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 28 Aug 2019 14:51:01 -0400 Subject: [PATCH 0163/1097] wip: fake edges cg rows --- pychunkedgraph/backend/connectivity/search.py | 9 +++---- pychunkedgraph/backend/graphoperation.py | 25 +++++++++++-------- pychunkedgraph/backend/utils/edge_utils.py | 13 +++++++++- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/backend/connectivity/search.py b/pychunkedgraph/backend/connectivity/search.py index c7ee59ca1..cff4b1251 100644 --- a/pychunkedgraph/backend/connectivity/search.py +++ b/pychunkedgraph/backend/connectivity/search.py @@ -16,7 +16,7 @@ def __init__(self, target, reachable): def discover_vertex(self, u): if u == self.target: - self.reachable[u] = True + self.reachable[u] = 1 raise StopSearch @@ -28,18 +28,17 @@ def check_reachability(g, sv1s, sv2s, original_ids) -> np.ndarray: original_ids_d = { sv_id: index for sv_id, index in zip(original_ids, range(len(original_ids))) } - reachable = g.new_vertex_property("bool", val=False) - print(g.vertex_properties) + reachable = g.new_vertex_property("int", val=0) def _check_reachability(source, target): bfs_search(g, source, TargetVisitor(target, reachable)) - print(reachable[target]) return reachable[target] return np.array( [ _check_reachability(original_ids_d[source], original_ids_d[target]) for source, target in zip(sv1s, sv2s) - ] + ], + dtype=bool, ) diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 50e42ec1f..c76565461 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -11,7 +11,8 @@ from pychunkedgraph.backend.root_lock import RootLock from pychunkedgraph.backend.utils import basetypes, column_keys, serializers from .utils.helpers import get_bounding_box -from .utils.edge_utils import flag_fake_edges +from .utils.edge_utils import filter_fake_edges +from .utils.edge_utils import map_edges_to_chunks if TYPE_CHECKING: from pychunkedgraph.backend.chunkedgraph import ChunkedGraph @@ -436,21 +437,23 @@ def _apply( root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) subgraph_edges, _, _ = self.cg.get_subgraph_edges_v2( agglomeration_ids = root_ids, + # bbox = get_bounding_box(self.source_coords, self.sink_coords), + # bbox_is_coordinate = True, cv_threads = 4, active_edges = False ) - fake_edges = flag_fake_edges(self.added_edges, subgraph_edges) + fake_edges = filter_fake_edges(self.added_edges, subgraph_edges) node_ids, r_indices = np.unique(fake_edges, return_inverse=True) - chunk_ids = self.cg.get_chunk_ids_from_node_ids(node_ids) r_indices = r_indices.reshape(-1, 2) - chunk_ids_d = defaultdict(list) - for i, r_index in enumerate(r_indices): - sv1_index, sv2_index = r_index - chunk_ids_d[chunk_ids[sv1_index]].append(fake_edges[i]) - if chunk_ids[sv1_index] == chunk_ids[sv2_index]: - continue - chunk_ids_d[chunk_ids[sv2_index]].append(fake_edges[i][::-1]) - return chunk_ids_d + chunk_ids = self.cg.get_chunk_ids_from_node_ids(node_ids) + chunk_edges_d = map_edges_to_chunks(fake_edges, chunk_ids, r_indices) + rows = [] + for chunk_id, fake_edges in chunk_edges_d: + val_d = { + column_keys.Connectivity.FakeEdges: fake_edges + } + rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) + new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 034625c79..2a3c08313 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -80,7 +80,7 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: return Edges(sv_ids1, sv_ids2, affinities, areas) -def flag_fake_edges(added_edges, subgraph_edges) -> List: +def filter_fake_edges(added_edges, subgraph_edges) -> List: """run bfs to check if a path exists""" self_edges = np.array([[node_id, node_id] for node_id in np.unique(added_edges)]) subgraph_edges = np.concatenate([subgraph_edges, self_edges]) @@ -90,3 +90,14 @@ def flag_fake_edges(added_edges, subgraph_edges) -> List: graph, added_edges[:, 0], added_edges[:, 1], original_ids ) return added_edges[~reachable] + + +def map_edges_to_chunks(edges, chunk_ids, r_indices): + chunk_ids_d = defaultdict(list) + for i, r_index in enumerate(r_indices): + sv1_index, sv2_index = r_index + chunk_ids_d[chunk_ids[sv1_index]].append(edges[i]) + if chunk_ids[sv1_index] == chunk_ids[sv2_index]: + continue + chunk_ids_d[chunk_ids[sv2_index]].append(edges[i][::-1]) + return chunk_ids_d From 1000200964953617ed49204521daab198bbeb072 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 28 Aug 2019 15:37:39 -0400 Subject: [PATCH 0164/1097] wip: write fake edges to cg --- pychunkedgraph/backend/chunkedgraph_edits.py | 9 ++++--- pychunkedgraph/backend/graphoperation.py | 26 +++++++++++--------- pychunkedgraph/backend/utils/edge_utils.py | 7 +++--- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 7b2b048d4..91c016626 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -227,7 +227,7 @@ def _read_cc_edges_thread(node_ids): cross_chunk_edge_dict, time_stamp)) - if not self.cg._edge_dir: + if not cg._edge_dir: # Write atomic nodes rows.extend(_write_atomic_merge_edges(cg, atomic_edges, affinities, areas, time_stamp=time_stamp)) @@ -360,9 +360,10 @@ def remove_edges(cg, operation_id: np.uint64, operation_id=operation_id, time_stamp=time_stamp)) - # Write atomic nodes - rows.extend(_write_atomic_split_edges(cg, atomic_edges, - time_stamp=time_stamp)) + if not cg._edge_dir: + # Write atomic nodes + rows.extend(_write_atomic_split_edges(cg, atomic_edges, + time_stamp=time_stamp)) # Propagate changes up the tree if cg.n_layers > 2: diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index c76565461..6a4d25af6 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -1,6 +1,6 @@ import itertools from abc import ABC, abstractmethod -from collections import namedtuple, defaultdict +from collections import namedtuple from datetime import datetime from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Type, Union @@ -428,17 +428,18 @@ def _update_root_ids(self) -> np.ndarray: def _apply( self, *, operation_id, timestamp ) -> Tuple[np.ndarray, np.ndarray, List["bigtable.row.Row"]]: - # if there is no path between sv1 and sv2 in the given subgraph - # add "fake" edges, these are stored in a row per chunk - # if there is a path do nothing, continue building the new hierarchy if self.cg._edge_dir: + # if there is no path between sv1 and sv2 in the given subgraph + # add "fake" edges, these are stored in a row per chunk + # if there is a path do nothing, continue building the new hierarchy + # TODO uncomment the following # assert self.source_coords != None # assert self.sink_coords != None root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) subgraph_edges, _, _ = self.cg.get_subgraph_edges_v2( agglomeration_ids = root_ids, # bbox = get_bounding_box(self.source_coords, self.sink_coords), - # bbox_is_coordinate = True, + # bbox_is_coordinate = True, cv_threads = 4, active_edges = False ) @@ -448,12 +449,15 @@ def _apply( chunk_ids = self.cg.get_chunk_ids_from_node_ids(node_ids) chunk_edges_d = map_edges_to_chunks(fake_edges, chunk_ids, r_indices) rows = [] - for chunk_id, fake_edges in chunk_edges_d: - val_d = { - column_keys.Connectivity.FakeEdges: fake_edges - } - rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) - + for chunk_id in chunk_edges_d: + print(chunk_id) + row_key = serializers.serialize_uint64(chunk_id) + fake_edges = chunk_edges_d[chunk_id] + val_d = {column_keys.Connectivity.FakeEdges: fake_edges} + rows.append(self.cg.mutate_row( + row_key, val_d, time_stamp=None)) + self.cg.bulk_write(rows) + return chunk_edges_d new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 2a3c08313..12d07588c 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -2,6 +2,7 @@ helper functions for edge stuff """ +from collections import defaultdict from typing import Dict, List import numpy as np @@ -89,10 +90,10 @@ def filter_fake_edges(added_edges, subgraph_edges) -> List: reachable = check_reachability( graph, added_edges[:, 0], added_edges[:, 1], original_ids ) - return added_edges[~reachable] + return added_edges[reachable] -def map_edges_to_chunks(edges, chunk_ids, r_indices): +def map_edges_to_chunks(edges, chunk_ids, r_indices) -> Dict: chunk_ids_d = defaultdict(list) for i, r_index in enumerate(r_indices): sv1_index, sv2_index = r_index @@ -100,4 +101,4 @@ def map_edges_to_chunks(edges, chunk_ids, r_indices): if chunk_ids[sv1_index] == chunk_ids[sv2_index]: continue chunk_ids_d[chunk_ids[sv2_index]].append(edges[i][::-1]) - return chunk_ids_d + return {chunk_id: np.array(chunk_ids_d[chunk_id]) for chunk_id in chunk_ids_d} From 955f98721e49571dd15997f8914405c6a1f42986 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 28 Aug 2019 15:51:43 -0400 Subject: [PATCH 0165/1097] wip: write fake edges to cg (almost done) --- pychunkedgraph/backend/graphoperation.py | 2 -- pychunkedgraph/backend/utils/edge_utils.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 6a4d25af6..542f47d9d 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -450,14 +450,12 @@ def _apply( chunk_edges_d = map_edges_to_chunks(fake_edges, chunk_ids, r_indices) rows = [] for chunk_id in chunk_edges_d: - print(chunk_id) row_key = serializers.serialize_uint64(chunk_id) fake_edges = chunk_edges_d[chunk_id] val_d = {column_keys.Connectivity.FakeEdges: fake_edges} rows.append(self.cg.mutate_row( row_key, val_d, time_stamp=None)) self.cg.bulk_write(rows) - return chunk_edges_d new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 12d07588c..2281c867b 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -90,7 +90,7 @@ def filter_fake_edges(added_edges, subgraph_edges) -> List: reachable = check_reachability( graph, added_edges[:, 0], added_edges[:, 1], original_ids ) - return added_edges[reachable] + return added_edges[~reachable] def map_edges_to_chunks(edges, chunk_ids, r_indices) -> Dict: From b6554dd5111e0f2d002f24281923bc314c1d009a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 09:29:09 -0400 Subject: [PATCH 0166/1097] wip: read fake edges --- pychunkedgraph/backend/chunkedgraph.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 2a70238e5..7bf9bb08e 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3156,7 +3156,15 @@ def _read_edges( edges = filter_edges(sv_ids, edges_dict) if active_edges: edges = get_active_edges(edges, children_d) - return edges.get_pairs(), edges.affinities, edges.areas + fake_edges = self.read_node_id_rows( + node_ids=chunk_ids, + columns=column_keys.Connectivity.FakeEdges) + fake_affinities = np.ones(len(fake_edges)) + fake_areas = np.ones(len(fake_edges)) + all_edges = np.concatenate([edges.get_pairs(), fake_edges]) + all_affinities = np.concatenate([edges.affinities, fake_affinities]) + all_areas = np.concatenate([edges.areas, fake_areas]) + return all_edges, all_affinities, all_areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, From b60331c883a89612b1a78850a247f9ea6181ff80 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 10:47:08 -0400 Subject: [PATCH 0167/1097] mode add_fake_edges to chunkedgraph_edits.py --- pychunkedgraph/backend/chunkedgraph.py | 10 +++-- pychunkedgraph/backend/chunkedgraph_edits.py | 42 ++++++++++++++++++ pychunkedgraph/backend/graphoperation.py | 45 ++++---------------- pychunkedgraph/backend/utils/helpers.py | 2 + 4 files changed, 60 insertions(+), 39 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 7bf9bb08e..5c617b19d 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3103,8 +3103,9 @@ def get_subgraph_edges_v2( agglomeration_ids: np.ndarray, bbox: Optional[Sequence[Sequence[int]]] = None, bbox_is_coordinate: bool = False, - cv_threads=1, - active_edges=True, + cv_threads = 1, + active_edges = True, + timestamp = None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ 1. get level 2 children ids belonging to the agglomerations @@ -3156,9 +3157,12 @@ def _read_edges( edges = filter_edges(sv_ids, edges_dict) if active_edges: edges = get_active_edges(edges, children_d) - fake_edges = self.read_node_id_rows( + + # include fake edges + chunk_fake_edges_d = self.read_node_id_rows( node_ids=chunk_ids, columns=column_keys.Connectivity.FakeEdges) + fake_edges = np.concatenate(list(chunk_fake_edges_d.values())) fake_affinities = np.ones(len(fake_edges)) fake_areas = np.ones(len(fake_edges)) all_edges = np.concatenate([edges.get_pairs(), fake_edges]) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 91c016626..b19b5c583 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -11,6 +11,9 @@ import get_google_compatible_time_stamp, combine_cross_chunk_edge_dicts from pychunkedgraph.backend.utils import column_keys, serializers from pychunkedgraph.backend import flatgraph_utils +from .utils.helpers import get_bounding_box +from .utils.edge_utils import filter_fake_edges +from .utils.edge_utils import map_edges_to_chunks def _write_atomic_merge_edges(cg, atomic_edges, affinities, areas, time_stamp): rows = [] @@ -245,6 +248,45 @@ def _read_cc_edges_thread(node_ids): return new_root_ids, list(lvl2_dict.keys()), rows +def add_fake_edges( + cg_instance, + operation_id: np.uint64, + added_edges: Sequence[Sequence[np.uint64]], + source_coords: Sequence[np.uint64], + sink_coords: Sequence[np.uint64], + timestamp: datetime.datetime) -> List["bigtable.row.Row"]: + """ + if there is no path between sv1 and sv2 in the given subgraph + add "fake" edges, these are stored in a row per chunk + """ + if cg_instance._edge_dir: + return [] + root_ids = np.unique(cg_instance.get_roots(added_edges.ravel())) + subgraph_edges, _, _ = cg_instance.get_subgraph_edges_v2( + agglomeration_ids = root_ids, + bbox = get_bounding_box(source_coords, sink_coords), + bbox_is_coordinate = True, + cv_threads = 4, + active_edges = False, + timestamp=timestamp + ) + fake_edges = filter_fake_edges(added_edges, subgraph_edges) + node_ids, r_indices = np.unique(fake_edges, return_inverse=True) + r_indices = r_indices.reshape(-1, 2) + chunk_ids = cg_instance.get_chunk_ids_from_node_ids(node_ids) + chunk_edges_d = map_edges_to_chunks(fake_edges, chunk_ids, r_indices) + rows = [] + for chunk_id in chunk_edges_d: + row_key = serializers.serialize_uint64(chunk_id) + fake_edges = chunk_edges_d[chunk_id] + val_d = { + column_keys.Connectivity.FakeEdges: fake_edges, + column_keys.OperationLogs.OperationID: operation_id} + rows.append(cg_instance.mutate_row( + row_key, val_d, time_stamp=timestamp)) + return rows + + def remove_edges(cg, operation_id: np.uint64, atomic_edges: Sequence[Sequence[np.uint64]], time_stamp: datetime.datetime): diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 542f47d9d..6a8705248 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -10,9 +10,6 @@ from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions from pychunkedgraph.backend.root_lock import RootLock from pychunkedgraph.backend.utils import basetypes, column_keys, serializers -from .utils.helpers import get_bounding_box -from .utils.edge_utils import filter_fake_edges -from .utils.edge_utils import map_edges_to_chunks if TYPE_CHECKING: from pychunkedgraph.backend.chunkedgraph import ChunkedGraph @@ -337,10 +334,6 @@ def execute(self) -> "GraphEditOperation.Result": root_lock.locked_root_ids, lock_operation_ids ) - return self._apply( - operation_id=root_lock.operation_id, timestamp=timestamp - ) - new_root_ids, new_lvl2_ids, rows = self._apply( operation_id=root_lock.operation_id, timestamp=timestamp ) @@ -428,35 +421,14 @@ def _update_root_ids(self) -> np.ndarray: def _apply( self, *, operation_id, timestamp ) -> Tuple[np.ndarray, np.ndarray, List["bigtable.row.Row"]]: - if self.cg._edge_dir: - # if there is no path between sv1 and sv2 in the given subgraph - # add "fake" edges, these are stored in a row per chunk - # if there is a path do nothing, continue building the new hierarchy - # TODO uncomment the following - # assert self.source_coords != None - # assert self.sink_coords != None - root_ids = np.unique(self.cg.get_roots(self.added_edges.ravel())) - subgraph_edges, _, _ = self.cg.get_subgraph_edges_v2( - agglomeration_ids = root_ids, - # bbox = get_bounding_box(self.source_coords, self.sink_coords), - # bbox_is_coordinate = True, - cv_threads = 4, - active_edges = False - ) - fake_edges = filter_fake_edges(self.added_edges, subgraph_edges) - node_ids, r_indices = np.unique(fake_edges, return_inverse=True) - r_indices = r_indices.reshape(-1, 2) - chunk_ids = self.cg.get_chunk_ids_from_node_ids(node_ids) - chunk_edges_d = map_edges_to_chunks(fake_edges, chunk_ids, r_indices) - rows = [] - for chunk_id in chunk_edges_d: - row_key = serializers.serialize_uint64(chunk_id) - fake_edges = chunk_edges_d[chunk_id] - val_d = {column_keys.Connectivity.FakeEdges: fake_edges} - rows.append(self.cg.mutate_row( - row_key, val_d, time_stamp=None)) - self.cg.bulk_write(rows) - + fake_edge_rows = cg_edits.add_fake_edges( + self.cg, + operation_id, + self.added_edges, + self.source_coords, + self.sink_coords, + timestamp=timestamp + ) new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, operation_id, @@ -464,6 +436,7 @@ def _apply( time_stamp=timestamp, affinities=self.affinities, ) + rows.extend(fake_edge_rows) return new_root_ids, new_lvl2_ids, rows def _create_log_record(self, *, operation_id, timestamp, new_root_ids) -> "bigtable.row.Row": diff --git a/pychunkedgraph/backend/utils/helpers.py b/pychunkedgraph/backend/utils/helpers.py index e7e201012..32bba7bb5 100644 --- a/pychunkedgraph/backend/utils/helpers.py +++ b/pychunkedgraph/backend/utils/helpers.py @@ -7,6 +7,8 @@ def get_bounding_box( sink_coords: Sequence[Sequence[int]], bb_offset: Tuple[int, int, int] = (120, 120, 12), ): + if not source_coords: + return None bb_offset = np.array(list(bb_offset)) source_coords = np.array(source_coords) sink_coords = np.array(sink_coords) From 9cefe781fec54d06b77893934d187c349ce71b09 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 10:58:07 -0400 Subject: [PATCH 0168/1097] type annotations --- pychunkedgraph/backend/chunkedgraph_edits.py | 1 + pychunkedgraph/backend/graphoperation.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index b19b5c583..0ec860212 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -250,6 +250,7 @@ def _read_cc_edges_thread(node_ids): def add_fake_edges( cg_instance, + *, operation_id: np.uint64, added_edges: Sequence[Sequence[np.uint64]], source_coords: Sequence[np.uint64], diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index 6a8705248..f9fb00618 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -423,10 +423,10 @@ def _apply( ) -> Tuple[np.ndarray, np.ndarray, List["bigtable.row.Row"]]: fake_edge_rows = cg_edits.add_fake_edges( self.cg, - operation_id, - self.added_edges, - self.source_coords, - self.sink_coords, + operation_id=operation_id, + added_edges=self.added_edges, + source_coords=self.source_coords, + sink_coords=self.sink_coords, timestamp=timestamp ) new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( From b6f2b87e682a77316e94287ca7cd2559e7f5aa00 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 11:40:48 -0400 Subject: [PATCH 0169/1097] change Edges defintion --- pychunkedgraph/backend/definitions/edges.py | 18 ++++++++++++++---- pychunkedgraph/backend/utils/edge_utils.py | 10 +++++++--- pychunkedgraph/ingest/ran_ingestion_v2.py | 8 ++++++-- pychunkedgraph/io/edges.py | 2 +- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index f007bda4b..c55e33e3d 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -11,24 +11,34 @@ CX_CHUNK = "cross" TYPES = [IN_CHUNK, BT_CHUNK, CX_CHUNK] +DEFAULT_AFFINITY = np.finfo(np.float32).tiny +DEFAULT_AREA = np.finfo(np.float32).tiny + class Edges: def __init__( self, node_ids1: np.ndarray, node_ids2: np.ndarray, + *, affinities: Optional[np.ndarray] = None, areas: Optional[np.ndarray] = None, ): assert node_ids1.size == node_ids2.size - if affinities is not None: - assert node_ids1.size == affinities.size == areas.size self.node_ids1 = node_ids1 self.node_ids2 = node_ids2 - self.affinities = affinities - self.areas = areas self._as_pairs = None + self.affinities = np.ones(len(self.node_ids1)) * DEFAULT_AFFINITY + if affinities is not None: + assert node_ids1.size == affinities.size + self.affinities = affinities + + self.areas = np.ones(len(self.node_ids1)) * DEFAULT_AREA + if areas is not None: + assert node_ids1.size == areas.size + self.areas = affinities + def get_pairs(self): """ return numpy array of edge pairs [[sv1, sv2] ... ] diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 2281c867b..b14a3be77 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -32,7 +32,11 @@ def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: sv_ids2 = np.concatenate(sv_ids2) affinities = np.concatenate(affinities) areas = np.concatenate(areas) - edges_dict[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + edges_dict[edge_type] = Edges( + sv_ids1, + sv_ids2, + affinities=affinities, + areas=areas) return edges_dict @@ -56,7 +60,7 @@ def filter_edges(node_ids: np.ndarray, edges_dict: dict) -> Edges: ids2 = np.concatenate(ids2) affinities = np.concatenate(affinities) areas = np.concatenate(areas) - return Edges(ids1, ids2, affinities, areas) + return Edges(ids1, ids2, affinities=affinities, areas=areas) def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: @@ -78,7 +82,7 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: affinities = affinities[parent_ids1 == parent_ids2] areas = areas[parent_ids1 == parent_ids2] - return Edges(sv_ids1, sv_ids2, affinities, areas) + return Edges(sv_ids1, sv_ids2, affinities=affinities, areas=areas) def filter_fake_edges(added_edges, subgraph_edges) -> List: diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 2abf275db..7dbcf9a9a 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -158,12 +158,16 @@ def create_atomic_chunk(imanager, coord): affinities = edge_dict[edge_type]["aff"] areas = edge_dict[edge_type]["area"] - chunk_edges_all[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + chunk_edges_all[edge_type] = Edges( + sv_ids1, sv_ids2, affinities=affinities, areas=areas + ) sv_ids1 = sv_ids1[active] sv_ids2 = sv_ids2[active] affinities = affinities[active] areas = areas[active] - chunk_edges[edge_type] = Edges(sv_ids1, sv_ids2, affinities, areas) + chunk_edges[edge_type] = Edges( + sv_ids1, sv_ids2, affinities=affinities, areas=areas + ) no_edges = no_edges and not sv_ids1.size # if not no_edges: diff --git a/pychunkedgraph/io/edges.py b/pychunkedgraph/io/edges.py index 9def16ea5..e4e542e06 100644 --- a/pychunkedgraph/io/edges.py +++ b/pychunkedgraph/io/edges.py @@ -33,7 +33,7 @@ def deserialize(edges_message: EdgesMsg) -> Tuple[np.ndarray, np.ndarray, np.nda sv_ids2 = np.frombuffer(edges_message.node_ids2, basetypes.NODE_ID) affinities = np.frombuffer(edges_message.affinities, basetypes.EDGE_AFFINITY) areas = np.frombuffer(edges_message.areas, basetypes.EDGE_AREA) - return Edges(sv_ids1, sv_ids2, affinities, areas) + return Edges(sv_ids1, sv_ids2, affinities=affinities, areas=areas) def _decompress_edges(content: bytes) -> dict: From 0e83c6a3fcbde566f0f48db65a3f2540374efeb1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 12:07:30 -0400 Subject: [PATCH 0170/1097] __add__ operator for Edges --- pychunkedgraph/backend/chunkedgraph.py | 16 ++++++++-------- pychunkedgraph/backend/definitions/edges.py | 10 +++++++++- pychunkedgraph/backend/utils/edge_utils.py | 2 +- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 5c617b19d..fd834d770 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3150,21 +3150,21 @@ def _read_edges( debug=False, ) + # include fake edges + chunk_fake_edges_d = self.read_node_id_rows( + node_ids=chunk_ids, + columns=column_keys.Connectivity.FakeEdges) + fake_edges = np.concatenate(list(chunk_fake_edges_d.values())) + edges_dict = concatenate_chunk_edges(chunk_edge_dicts) children_d = self.get_children(level2_ids) sv_ids = np.concatenate(list(children_d.values())) - edges = filter_edges(sv_ids, edges_dict) + edges = sum(edges_dict.values()) + edges = filter_edges(sv_ids, edges) if active_edges: edges = get_active_edges(edges, children_d) - # include fake edges - chunk_fake_edges_d = self.read_node_id_rows( - node_ids=chunk_ids, - columns=column_keys.Connectivity.FakeEdges) - fake_edges = np.concatenate(list(chunk_fake_edges_d.values())) - fake_affinities = np.ones(len(fake_edges)) - fake_areas = np.ones(len(fake_edges)) all_edges = np.concatenate([edges.get_pairs(), fake_edges]) all_affinities = np.concatenate([edges.affinities, fake_affinities]) all_areas = np.concatenate([edges.areas, fake_areas]) diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index c55e33e3d..4081a3d48 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -37,7 +37,15 @@ def __init__( self.areas = np.ones(len(self.node_ids1)) * DEFAULT_AREA if areas is not None: assert node_ids1.size == areas.size - self.areas = affinities + self.areas = affinities + + def __add__(self, other:Edges) -> Edges: + """add two Edges instances""" + node_ids1 = np.concatenate([self.node_ids1, other.node_ids1]) + node_ids2 = np.concatenate([self.node_ids1, other.node_ids1]) + affinities = np.concatenate([self.node_ids1, other.node_ids1]) + areas = np.concatenate([self.node_ids1, other.node_ids1]) + return Edges(node_ids1, node_ids2, affinities=affinities, areas=areas) def get_pairs(self): """ diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index b14a3be77..dbc4642bd 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -40,7 +40,7 @@ def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: return edges_dict -def filter_edges(node_ids: np.ndarray, edges_dict: dict) -> Edges: +def filter_edges(node_ids: np.ndarray, edges: Edges) -> Edges: """find edges for the given node_ids from the dict""" ids1 = [] ids2 = [] From b4dfada01d69fddbf0113eb97c8960c521ea31eb Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 13:31:43 -0400 Subject: [PATCH 0171/1097] include fake edges when reading subgraph edges --- pychunkedgraph/backend/chunkedgraph.py | 9 ++++--- pychunkedgraph/backend/chunkedgraph_edits.py | 4 +-- pychunkedgraph/backend/utils/edge_utils.py | 28 +++++++------------- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index fd834d770..7ecba864e 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3103,9 +3103,9 @@ def get_subgraph_edges_v2( agglomeration_ids: np.ndarray, bbox: Optional[Sequence[Sequence[int]]] = None, bbox_is_coordinate: bool = False, - cv_threads = 1, - active_edges = True, - timestamp = None + cv_threads: int = 1, + active_edges: bool = True, + timestamp: datetime.datetime = None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ 1. get level 2 children ids belonging to the agglomerations @@ -3154,7 +3154,8 @@ def _read_edges( chunk_fake_edges_d = self.read_node_id_rows( node_ids=chunk_ids, columns=column_keys.Connectivity.FakeEdges) - fake_edges = np.concatenate(list(chunk_fake_edges_d.values())) + fake_edges = np.concatenate(list(chunk_fake_edges_d.values())) + fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) edges_dict = concatenate_chunk_edges(chunk_edge_dicts) children_d = self.get_children(level2_ids) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 0ec860212..d3738343e 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -257,8 +257,8 @@ def add_fake_edges( sink_coords: Sequence[np.uint64], timestamp: datetime.datetime) -> List["bigtable.row.Row"]: """ - if there is no path between sv1 and sv2 in the given subgraph - add "fake" edges, these are stored in a row per chunk + if there is no path between sv1 and sv2 (from added_edges) + in the subgraph, add "fake" edges, these are stored in a row per chunk """ if cg_instance._edge_dir: return [] diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index dbc4642bd..db4677468 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -41,25 +41,15 @@ def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: def filter_edges(node_ids: np.ndarray, edges: Edges) -> Edges: - """find edges for the given node_ids from the dict""" - ids1 = [] - ids2 = [] - affinities = [] - areas = [] - for edge_type in [IN_CHUNK, BT_CHUNK, CX_CHUNK]: - edges = edges_dict[edge_type] - xsorted = np.argsort(edges.node_ids1) - indices = np.searchsorted(edges.node_ids1[xsorted], node_ids) - indices = indices[indices < xsorted.size] - - ids1.append(edges.node_ids1[indices]) - ids2.append(edges.node_ids2[indices]) - affinities.append(edges.affinities[indices]) - areas.append(edges.areas[indices]) - ids1 = np.concatenate(ids1) - ids2 = np.concatenate(ids2) - affinities = np.concatenate(affinities) - areas = np.concatenate(areas) + """find edges for the given node_ids""" + xsorted = np.argsort(edges.node_ids1) + indices = np.searchsorted(edges.node_ids1[xsorted], node_ids) + indices = indices[indices < xsorted.size] + + ids1 = edges.node_ids1[indices] + ids2 = edges.node_ids2[indices] + affinities = edges.affinities[indices] + areas = edges.areas[indices] return Edges(ids1, ids2, affinities=affinities, areas=areas) From 36e611eaf2275f8ec2343b55e4729febb8585463 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 15:40:03 -0400 Subject: [PATCH 0172/1097] add fake edges mostly done --- pychunkedgraph/backend/chunkedgraph.py | 25 ++++++++++++--------- pychunkedgraph/backend/definitions/edges.py | 5 ++++- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 7ecba864e..017fd5300 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -12,6 +12,7 @@ import logging from itertools import chain +from functools import reduce from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ @@ -43,7 +44,7 @@ from google.cloud.bigtable.column_family import MaxVersionsGCRule from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple - +from .definitions.edges import Edges from .utils.edge_utils import ( concatenate_chunk_edges, filter_edges, get_active_edges) @@ -3129,7 +3130,6 @@ def _read_edges( ) bounding_box = self.normalize_bounding_box(bbox, bbox_is_coordinate) - level2_ids = [] for agglomeration_id in agglomeration_ids: layer_nodes_d = self._get_subgraph_higher_layer_nodes( @@ -3149,27 +3149,30 @@ def _read_edges( n_threads=cg_threads, debug=False, ) - # include fake edges chunk_fake_edges_d = self.read_node_id_rows( node_ids=chunk_ids, columns=column_keys.Connectivity.FakeEdges) - fake_edges = np.concatenate(list(chunk_fake_edges_d.values())) - fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) - + fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) + if fake_edges: + fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) + edges_dict = concatenate_chunk_edges(chunk_edge_dicts) children_d = self.get_children(level2_ids) sv_ids = np.concatenate(list(children_d.values())) - edges = sum(edges_dict.values()) + edges = reduce(lambda x, y: x+y, edges_dict.values()) edges = filter_edges(sv_ids, edges) + if active_edges: edges = get_active_edges(edges, children_d) - all_edges = np.concatenate([edges.get_pairs(), fake_edges]) - all_affinities = np.concatenate([edges.affinities, fake_affinities]) - all_areas = np.concatenate([edges.areas, fake_areas]) - return all_edges, all_affinities, all_areas + if fake_edges: + all_edges = np.concatenate([edges.get_pairs(), fake_edges.get_pairs()]) + all_affinities = np.concatenate([edges.affinities, fake_edges.affinities]) + all_areas = np.concatenate([edges.areas, fake_edges.areas]) + return all_edges, all_affinities, all_areas + return edges.get_pairs(), edges.affinities, edges.areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index 4081a3d48..b0410b33c 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -39,7 +39,7 @@ def __init__( assert node_ids1.size == areas.size self.areas = affinities - def __add__(self, other:Edges) -> Edges: + def __add__(self, other): """add two Edges instances""" node_ids1 = np.concatenate([self.node_ids1, other.node_ids1]) node_ids2 = np.concatenate([self.node_ids1, other.node_ids1]) @@ -47,6 +47,9 @@ def __add__(self, other:Edges) -> Edges: areas = np.concatenate([self.node_ids1, other.node_ids1]) return Edges(node_ids1, node_ids2, affinities=affinities, areas=areas) + def __len__(self): + return len(self.node_ids1) + def get_pairs(self): """ return numpy array of edge pairs [[sv1, sv2] ... ] From a95ebca0dde0fc993d1dbd10edd52ded647152a9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 15:46:43 -0400 Subject: [PATCH 0173/1097] fix: apply filter to fake edges as well --- pychunkedgraph/backend/chunkedgraph.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 017fd5300..9d48f21c9 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3154,24 +3154,19 @@ def _read_edges( node_ids=chunk_ids, columns=column_keys.Connectivity.FakeEdges) fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) - if fake_edges: - fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) edges_dict = concatenate_chunk_edges(chunk_edge_dicts) children_d = self.get_children(level2_ids) sv_ids = np.concatenate(list(children_d.values())) edges = reduce(lambda x, y: x+y, edges_dict.values()) + if fake_edges: + fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) + edges = edges + fake_edges edges = filter_edges(sv_ids, edges) if active_edges: edges = get_active_edges(edges, children_d) - - if fake_edges: - all_edges = np.concatenate([edges.get_pairs(), fake_edges.get_pairs()]) - all_affinities = np.concatenate([edges.affinities, fake_edges.affinities]) - all_areas = np.concatenate([edges.areas, fake_edges.areas]) - return all_edges, all_affinities, all_areas return edges.get_pairs(), edges.affinities, edges.areas def get_subgraph_nodes(self, agglomeration_id: np.uint64, From e3880a691b6062d8c52f03c62760c97202629dcc Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 16:05:20 -0400 Subject: [PATCH 0174/1097] formatting and documentation --- pychunkedgraph/backend/utils/edge_utils.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index db4677468..1e6383fab 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -33,10 +33,8 @@ def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: affinities = np.concatenate(affinities) areas = np.concatenate(areas) edges_dict[edge_type] = Edges( - sv_ids1, - sv_ids2, - affinities=affinities, - areas=areas) + sv_ids1, sv_ids2, affinities=affinities, areas=areas + ) return edges_dict @@ -79,7 +77,7 @@ def filter_fake_edges(added_edges, subgraph_edges) -> List: """run bfs to check if a path exists""" self_edges = np.array([[node_id, node_id] for node_id in np.unique(added_edges)]) subgraph_edges = np.concatenate([subgraph_edges, self_edges]) - + graph, _, _, original_ids = build_gt_graph(subgraph_edges, is_directed=False) reachable = check_reachability( graph, added_edges[:, 0], added_edges[:, 1], original_ids @@ -87,7 +85,13 @@ def filter_fake_edges(added_edges, subgraph_edges) -> List: return added_edges[~reachable] -def map_edges_to_chunks(edges, chunk_ids, r_indices) -> Dict: +def map_edges_to_chunks( + edges: np.ndarray, chunk_ids: np.ndarray, r_indices: np.ndarray +) -> Dict: + """ + maps a list of edges to corresponding chunks + returns a dictionary {chuunk_id: [edges that are part of this chunk]} + """ chunk_ids_d = defaultdict(list) for i, r_index in enumerate(r_indices): sv1_index, sv2_index = r_index From 3f740340d30c30fc1d88b64035f586e3d5b9db86 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 18:57:41 -0400 Subject: [PATCH 0175/1097] += operator in Edges class --- pychunkedgraph/backend/chunkedgraph.py | 2 +- pychunkedgraph/backend/definitions/edges.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 9d48f21c9..ad331af0c 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3162,7 +3162,7 @@ def _read_edges( edges = reduce(lambda x, y: x+y, edges_dict.values()) if fake_edges: fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) - edges = edges + fake_edges + edges += fake_edges edges = filter_edges(sv_ids, edges) if active_edges: diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index b0410b33c..0c2c1ff7a 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -47,6 +47,13 @@ def __add__(self, other): areas = np.concatenate([self.node_ids1, other.node_ids1]) return Edges(node_ids1, node_ids2, affinities=affinities, areas=areas) + def __iadd__(self, other): + self.node_ids1 = np.concatenate([self.node_ids1, other.node_ids1]) + self.node_ids2 = np.concatenate([self.node_ids2, other.node_ids2]) + self.affinities = np.concatenate([self.affinities, other.affinities]) + self.areas = np.concatenate([self.areas, other.areas]) + return self + def __len__(self): return len(self.node_ids1) From aded0a8c80ca3de93aa7cddf61706a73c29946dc Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 19:01:16 -0400 Subject: [PATCH 0176/1097] rearrange for cleaner code --- pychunkedgraph/backend/chunkedgraph.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index ad331af0c..080b17851 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3149,17 +3149,17 @@ def _read_edges( n_threads=cg_threads, debug=False, ) - # include fake edges - chunk_fake_edges_d = self.read_node_id_rows( - node_ids=chunk_ids, - columns=column_keys.Connectivity.FakeEdges) - fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) edges_dict = concatenate_chunk_edges(chunk_edge_dicts) children_d = self.get_children(level2_ids) sv_ids = np.concatenate(list(children_d.values())) - edges = reduce(lambda x, y: x+y, edges_dict.values()) + + # include fake edges + chunk_fake_edges_d = self.read_node_id_rows( + node_ids=chunk_ids, + columns=column_keys.Connectivity.FakeEdges) + fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) if fake_edges: fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) edges += fake_edges From 5d1829855b52c80878daaccfbd87d7b9c9ace47e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 19:10:00 -0400 Subject: [PATCH 0177/1097] remove old TODO --- pychunkedgraph/backend/chunkedgraph.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 080b17851..4a6f12326 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3111,13 +3111,12 @@ def get_subgraph_edges_v2( """ 1. get level 2 children ids belonging to the agglomerations 2. get relevant chunk ids from level 2 ids - 3. read edges from cloud storage + 3. read edges from cloud storage (include fake edges from big table) 4. get supervoxel ids from level 2 ids 5. filter the edges with supervoxel ids 6. optionally for each edge (v1,v2) active if parent(v1) == parent(v2) inactive otherwise 7. return the edges - TODO read fake edges """ def _read_edges( @@ -3139,8 +3138,8 @@ def _read_edges( verbose=False ) level2_ids.append(layer_nodes_d[2]) - level2_ids = np.concatenate(level2_ids) + chunk_ids = self.get_chunk_ids_from_node_ids(level2_ids) cg_threads = 1 chunk_edge_dicts = mu.multithread_func( @@ -3149,12 +3148,8 @@ def _read_edges( n_threads=cg_threads, debug=False, ) - edges_dict = concatenate_chunk_edges(chunk_edge_dicts) - children_d = self.get_children(level2_ids) - sv_ids = np.concatenate(list(children_d.values())) edges = reduce(lambda x, y: x+y, edges_dict.values()) - # include fake edges chunk_fake_edges_d = self.read_node_id_rows( node_ids=chunk_ids, @@ -3163,8 +3158,10 @@ def _read_edges( if fake_edges: fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) edges += fake_edges + + children_d = self.get_children(level2_ids) + sv_ids = np.concatenate(list(children_d.values())) edges = filter_edges(sv_ids, edges) - if active_edges: edges = get_active_edges(edges, children_d) return edges.get_pairs(), edges.affinities, edges.areas From 37c52945461ee70fbd455d3dba023cfb1df5f4d1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 20:05:40 -0400 Subject: [PATCH 0178/1097] type annotations and better docs --- pychunkedgraph/backend/chunkedgraph.py | 10 +++------- pychunkedgraph/backend/utils/edge_utils.py | 8 ++++---- pychunkedgraph/io/edges.py | 10 ++++------ 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 4a6f12326..465c25c1f 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -3116,24 +3116,20 @@ def get_subgraph_edges_v2( 5. filter the edges with supervoxel ids 6. optionally for each edge (v1,v2) active if parent(v1) == parent(v2) inactive otherwise - 7. return the edges """ - def _read_edges( - chunk_ids - ) -> Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: + def _read_edges(chunk_ids) -> dict: return get_chunk_edges( self._edge_dir, [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], cv_threads, ) - bounding_box = self.normalize_bounding_box(bbox, bbox_is_coordinate) level2_ids = [] for agglomeration_id in agglomeration_ids: layer_nodes_d = self._get_subgraph_higher_layer_nodes( node_id=agglomeration_id, - bounding_box=bounding_box, + bounding_box=self.normalize_bounding_box(bbox, bbox_is_coordinate), return_layers=[2], verbose=False ) @@ -3155,7 +3151,7 @@ def _read_edges( node_ids=chunk_ids, columns=column_keys.Connectivity.FakeEdges) fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) - if fake_edges: + if fake_edges.size: fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) edges += fake_edges diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 1e6383fab..733d781ea 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -13,7 +13,7 @@ from ..flatgraph_utils import build_gt_graph -def concatenate_chunk_edges(chunk_edge_dicts: list) -> dict: +def concatenate_chunk_edges(chunk_edge_dicts: List) -> Dict: """combine edge_dicts of multiple chunks into one edge_dict""" edges_dict = {} for edge_type in [IN_CHUNK, BT_CHUNK, CX_CHUNK]: @@ -51,7 +51,7 @@ def filter_edges(node_ids: np.ndarray, edges: Edges) -> Edges: return Edges(ids1, ids2, affinities=affinities, areas=areas) -def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: +def get_active_edges(edges: Edges, parent_children_d: Dict) -> Edges: """ get edges [(v1, v2) ...] where parent(v1) == parent(v2) -> assume active if v1 and v2 belong to same connected component @@ -73,7 +73,7 @@ def get_active_edges(edges: Edges, parent_children_d: dict) -> Edges: return Edges(sv_ids1, sv_ids2, affinities=affinities, areas=areas) -def filter_fake_edges(added_edges, subgraph_edges) -> List: +def filter_fake_edges(added_edges: np.ndarray, subgraph_edges: np.ndarray) -> List: """run bfs to check if a path exists""" self_edges = np.array([[node_id, node_id] for node_id in np.unique(added_edges)]) subgraph_edges = np.concatenate([subgraph_edges, self_edges]) @@ -90,7 +90,7 @@ def map_edges_to_chunks( ) -> Dict: """ maps a list of edges to corresponding chunks - returns a dictionary {chuunk_id: [edges that are part of this chunk]} + returns a dictionary {chunk_id: [edges that are part of this chunk]} """ chunk_ids_d = defaultdict(list) for i, r_index in enumerate(r_indices): diff --git a/pychunkedgraph/io/edges.py b/pychunkedgraph/io/edges.py index e4e542e06..ad959884a 100644 --- a/pychunkedgraph/io/edges.py +++ b/pychunkedgraph/io/edges.py @@ -17,12 +17,10 @@ from .protobuf.chunkEdges_pb2 import EdgesMsg, ChunkEdgesMsg -def serialize(edges: Edges, only_ids: bool = False) -> EdgesMsg: +def serialize(edges: Edges) -> EdgesMsg: edges_proto = EdgesMsg() edges_proto.node_ids1 = edges.node_ids1.astype(basetypes.NODE_ID).tobytes() edges_proto.node_ids2 = edges.node_ids2.astype(basetypes.NODE_ID).tobytes() - if only_ids: - return edges_proto edges_proto.affinities = edges.affinities.astype(basetypes.EDGE_AFFINITY).tobytes() edges_proto.areas = edges.areas.astype(basetypes.EDGE_AREA).tobytes() return edges_proto @@ -36,7 +34,7 @@ def deserialize(edges_message: EdgesMsg) -> Tuple[np.ndarray, np.ndarray, np.nda return Edges(sv_ids1, sv_ids2, affinities=affinities, areas=areas) -def _decompress_edges(content: bytes) -> dict: +def _decompress_edges(content: bytes) -> Dict: """ :param content: zstd compressed bytes :type bytes: @@ -59,7 +57,7 @@ def _decompress_edges(content: bytes) -> dict: def get_chunk_edges( edges_dir: str, chunks_coordinates: List[np.ndarray], cv_threads: int = 1 -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: +) -> Dict: """ :param edges_dir: cloudvolume storage path :type str: @@ -67,7 +65,7 @@ def get_chunk_edges( :type List[np.ndarray]: :param cv_threads: cloudvolume storage client thread count :type int: - :return: edges, affinities, areas + :return: dictionary {"edge_type": Edges} :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ fnames = [] From 85da300987ab9e7c81a862a06bedc721e3780d77 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 29 Aug 2019 21:12:29 -0400 Subject: [PATCH 0179/1097] more type annotations and docs --- pychunkedgraph/backend/connectivity/search.py | 5 ++++- pychunkedgraph/backend/definitions/edges.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/connectivity/search.py b/pychunkedgraph/backend/connectivity/search.py index cff4b1251..bd3faf227 100644 --- a/pychunkedgraph/backend/connectivity/search.py +++ b/pychunkedgraph/backend/connectivity/search.py @@ -20,8 +20,11 @@ def discover_vertex(self, u): raise StopSearch -def check_reachability(g, sv1s, sv2s, original_ids) -> np.ndarray: +def check_reachability(g, sv1s: np.ndarray, sv2s: np.ndarray, original_ids: np.ndarray) -> np.ndarray: """ + g: graph tool Graph instance with ids 0 to N-1 where N = vertex count + original_ids: sorted ChunkedGraph supervoxel ids + (to identify corresponding ids in graph tool) for each pair (sv1, sv2) check if a path exists (BFS) """ # mapping from original ids to graph tool ids diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index 0c2c1ff7a..6fc3e2ca2 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -57,7 +57,7 @@ def __iadd__(self, other): def __len__(self): return len(self.node_ids1) - def get_pairs(self): + def get_pairs(self) -> np.ndarray: """ return numpy array of edge pairs [[sv1, sv2] ... ] """ From bacad5d7f8cc49fc8724b9c1de0063203c3f325d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 3 Sep 2019 10:15:55 -0400 Subject: [PATCH 0180/1097] code review changes and remove examples package --- pychunkedgraph/examples/__init__.py | 76 -------- .../examples/parallel_test/__init__.py | 0 pychunkedgraph/examples/parallel_test/main.py | 37 ---- .../examples/parallel_test/tasks.py | 21 --- .../ingest/initialization/__init__.py | 3 + .../ingest/initialization/create.py | 175 ++++++++++++++++++ pychunkedgraph/ingest/ran_ingestion_v2.py | 4 +- pychunkedgraph/meshing/meshgen.py | 2 +- pychunkedgraph/utils/general.py | 33 ---- pychunkedgraph/utils/redis.py | 35 ++++ run_dev_cli.py | 10 - 11 files changed, 216 insertions(+), 180 deletions(-) delete mode 100644 pychunkedgraph/examples/__init__.py delete mode 100644 pychunkedgraph/examples/parallel_test/__init__.py delete mode 100644 pychunkedgraph/examples/parallel_test/main.py delete mode 100644 pychunkedgraph/examples/parallel_test/tasks.py create mode 100644 pychunkedgraph/ingest/initialization/__init__.py create mode 100644 pychunkedgraph/ingest/initialization/create.py create mode 100644 pychunkedgraph/utils/redis.py delete mode 100644 run_dev_cli.py diff --git a/pychunkedgraph/examples/__init__.py b/pychunkedgraph/examples/__init__.py deleted file mode 100644 index 5223e6144..000000000 --- a/pychunkedgraph/examples/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -from flask import Flask -from flask.logging import default_handler -from flask_cors import CORS -import sys -import logging -import os -import time -import json -import numpy as np -import datetime -from pychunkedgraph.app import config -import redis -from rq import Queue - -from pychunkedgraph.examples.parallel_test.main import init_parallel_test_cmds -from pychunkedgraph.meshing.meshing_test_temp import init_mesh_cmds - -# from pychunkedgraph.app import app_blueprint -from pychunkedgraph.app import cg_app_blueprint, meshing_app_blueprint -from pychunkedgraph.logging import jsonformatter -# from pychunkedgraph.app import manifest_app_blueprint -os.environ['TRAVIS_BRANCH'] = "IDONTKNOWWHYINEEDTHIS" - - -class CustomJsonEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, np.ndarray): - return obj.tolist() - elif isinstance(obj, datetime.datetime): - return obj.__str__() - return json.JSONEncoder.default(self, obj) - - -def create_example_app(test_config=None): - app = Flask(__name__) - app.json_encoder = CustomJsonEncoder - - configure_app(app) - - app.register_blueprint(cg_app_blueprint.bp) - app.register_blueprint(meshing_app_blueprint.bp) - # app.register_blueprint(manifest_app_blueprint.bp) - - with app.app_context(): - init_parallel_test_cmds(app) - init_mesh_cmds(app) - - return app - - -def configure_app(app): - # Load logging scheme from config.py - app_settings = os.getenv('APP_SETTINGS') - if not app_settings: - app.config.from_object(config.BaseConfig) - else: - app.config.from_object(app_settings) - - - # Configure logging - # handler = logging.FileHandler(app.config['LOGGING_LOCATION']) - handler = logging.StreamHandler(sys.stdout) - handler.setLevel(app.config['LOGGING_LEVEL']) - formatter = jsonformatter.JsonFormatter( - fmt=app.config['LOGGING_FORMAT'], - datefmt=app.config['LOGGING_DATEFORMAT']) - formatter.converter = time.gmtime - handler.setFormatter(formatter) - app.logger.removeHandler(default_handler) - app.logger.addHandler(handler) - app.logger.setLevel(app.config['LOGGING_LEVEL']) - app.logger.propagate = False - - if app.config['USE_REDIS_JOBS']: - app.redis = redis.Redis.from_url(app.config['REDIS_URL']) - app.test_q = Queue('test' ,connection=app.redis) \ No newline at end of file diff --git a/pychunkedgraph/examples/parallel_test/__init__.py b/pychunkedgraph/examples/parallel_test/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/pychunkedgraph/examples/parallel_test/main.py b/pychunkedgraph/examples/parallel_test/main.py deleted file mode 100644 index 18aef6c6b..000000000 --- a/pychunkedgraph/examples/parallel_test/main.py +++ /dev/null @@ -1,37 +0,0 @@ -import click -import redis - -from flask import current_app -from flask.cli import AppGroup -from pychunkedgraph.examples.parallel_test.tasks import independent_task - -ingest_cli = AppGroup('parallel') - -def handler(*args, **kwargs): - ''' - Message handler function, called by redis - when a message is received on pubsub channel - ''' - print(args) - print(kwargs) - - -@ingest_cli.command('test') -@click.argument('n', type=int) -@click.argument('size', type=int) -def create_atomic_chunks(n, size): - print(f'Queueing {n} items of size {size} ...') - chunk_pubsub = current_app.redis.pubsub() - chunk_pubsub.subscribe(**{'test-channel': handler}) - - for item_id in range(n): - current_app.test_q.enqueue( - independent_task, - args=(item_id, size)) - - thread = chunk_pubsub.run_in_thread(sleep_time=0.1) - return 'Queued' - - -def init_parallel_test_cmds(app): - app.cli.add_command(ingest_cli) \ No newline at end of file diff --git a/pychunkedgraph/examples/parallel_test/tasks.py b/pychunkedgraph/examples/parallel_test/tasks.py deleted file mode 100644 index 46b956009..000000000 --- a/pychunkedgraph/examples/parallel_test/tasks.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import time -from flask import current_app -from pychunkedgraph.utils.general import redis_job - -# not a good solution -# figure out how to use app context - -REDIS_HOST = os.environ.get('REDIS_SERVICE_HOST', 'localhost') -REDIS_PORT = os.environ.get('REDIS_SERVICE_PORT', '6379') -REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', 'dev') -REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0' - -@redis_job(REDIS_URL, 'test-channel') -def independent_task(chunk_id, chunk_size): - print(f' Working on chunk id: {chunk_id}, size {chunk_size}') - i = 0 - while i < chunk_size: - i += 1 - print('Done') - return chunk_id \ No newline at end of file diff --git a/pychunkedgraph/ingest/initialization/__init__.py b/pychunkedgraph/ingest/initialization/__init__.py new file mode 100644 index 000000000..009da30ed --- /dev/null +++ b/pychunkedgraph/ingest/initialization/__init__.py @@ -0,0 +1,3 @@ +""" +modules for chunkedgraph initialization/creation +""" diff --git a/pychunkedgraph/ingest/initialization/create.py b/pychunkedgraph/ingest/initialization/create.py new file mode 100644 index 000000000..7ccf29760 --- /dev/null +++ b/pychunkedgraph/ingest/initialization/create.py @@ -0,0 +1,175 @@ +""" +Module for stuff related to creating the initial chunkedgraph +""" + +import datetime +from typing import Optional, Sequence, Dict + +import pytz +import numpy as np + +from ...backend.chunkedgraph import ChunkedGraph +from ...backend.utils import basetypes, serializers, column_keys +from ...backend.definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK, TYPES as EDGE_TYPES +from ...backend.chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp +from ...backend.flatgraph_utils import build_gt_graph, connected_components + + +def add_atomic_edges( + cg_instance: ChunkedGraph, + chunk_coord: np.ndarray, + chunk_edges_d: Dict[str, Edges], + isolated: Sequence[int], + time_stamp: Optional[datetime.datetime] = None, +): + """ + Creates atomic nodes in first abstraction layer for a SINGLE chunk + and all abstract nodes in the second for the same chunk. + All the edges (edge_ids) need to be from one chunk and no nodes should + exist for this chunk prior to calling this function. All cross edges + (cross_edge_ids) have to point out the chunk (first entry is the id + within the chunk) + + :param cg_instance: + :param chunk_coord: [x,y,z] + :param chunk_edges_d: dict of {"edge_type": Edges} + :param isolated: list of isolated node ids + :param time_stamp: datetime + """ + + chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges_d, isolated) + if not chunk_node_ids.size: + return 0 + + chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) + assert len(np.unique(chunk_ids)) == 1 + + graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) + ccs = connected_components(graph) + + parent_chunk_id = cg_instance.get_chunk_id( + layer=2, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] + ) + parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) + + sparse_indices, remapping = _get_remapping(chunk_edges_d) + time_stamp = _get_valid_timestamp(time_stamp) + rows = [] + for i_cc, component in enumerate(ccs): + _rows = _process_component( + cg_instance, + chunk_edges_d, + parent_ids[i_cc], + unique_ids[component], + sparse_indices, + remapping, + time_stamp, + ) + rows.extend(_rows) + + if len(rows) > 100000: + cg_instance.bulk_write(rows) + rows = [] + cg_instance.bulk_write(rows) + + +def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[int]): + """ + returns IN_CHUNK edges and nodes_ids + """ + isolated_nodes_self_edges = np.vstack([isolated_ids, isolated_ids]).T + node_ids = [isolated_ids] + edge_ids = [isolated_nodes_self_edges] + for edge_type in EDGE_TYPES: + edges = chunk_edges_d[edge_type] + node_ids.append(edges.node_ids1) + if edge_type == IN_CHUNK: + node_ids.append(edges.node_ids2) + edge_ids.append(edges.get_pairs()) + + chunk_node_ids = np.unique(np.concatenate(node_ids)) + chunk_edge_ids = np.concatenate(edge_ids) + + return (chunk_node_ids, chunk_edge_ids) + + +def _get_remapping(chunk_edges_d: dict): + """ + TODO add logic explanation + """ + sparse_indices = {} + remapping = {} + for edge_type in [BT_CHUNK, CX_CHUNK]: + edges = chunk_edges_d[edge_type].get_pairs() + u_ids, inv_ids = np.unique(edges, return_inverse=True) + mapped_ids = np.arange(len(u_ids), dtype=np.int32) + remapped_arr = mapped_ids[inv_ids].reshape(edges.shape) + sparse_indices[edge_type] = compute_indices_pandas(remapped_arr) + remapping[edge_type] = dict(zip(u_ids, mapped_ids)) + return sparse_indices, remapping + + +def _get_valid_timestamp(timestamp): + if timestamp is None: + timestamp = datetime.datetime.utcnow() + + if timestamp.tzinfo is None: + timestamp = pytz.UTC.localize(timestamp) + + # Comply to resolution of BigTables TimeRange + return get_google_compatible_time_stamp(timestamp, round_up=False) + + +def _process_component( + cg_instance, + chunk_edges_d, + parent_id, + node_ids, + sparse_indices, + remapping, + time_stamp, +): + rows = [] + chunk_out_edges = [] # out = between + cross + for node_id in node_ids: + _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) + chunk_out_edges.append(_edges) + val_dict = {column_keys.Hierarchy.Parent: parent_id} + + r_key = serializers.serialize_uint64(node_id) + rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) + + chunk_out_edges = np.concatenate(chunk_out_edges) + cce_layers = cg_instance.get_cross_chunk_edges_layer(chunk_out_edges) + u_cce_layers = np.unique(cce_layers) + + val_dict = {column_keys.Hierarchy.Child: node_ids} + for cc_layer in u_cce_layers: + layer_out_edges = chunk_out_edges[cce_layers == cc_layer] + if layer_out_edges.size: + col = column_keys.Connectivity.CrossChunkEdge[cc_layer] + val_dict[col] = layer_out_edges + + r_key = serializers.serialize_uint64(parent_id) + rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) + return rows + + +def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): + """ + TODO add docs + returns edges of node_id pointing outside the chunk (between and cross) + """ + chunk_out_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) + for edge_type in remapping: + if node_id in remapping[edge_type]: + edges_obj = chunk_edges_d[edge_type] + edges = edges_obj.get_pairs() + + row_ids, column_ids = sparse_indices[edge_type][ + remapping[edge_type][node_id] + ] + row_ids = row_ids[column_ids == 0] + # edges that this node is part of + chunk_out_edges = np.concatenate([chunk_out_edges, edges[row_ids]]) + return chunk_out_edges diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 7dbcf9a9a..350c77a93 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -18,9 +18,9 @@ from rq import Queue from redis import Redis -from ..utils.general import redis_job, REDIS_URL +from ..utils.redis import redis_job, REDIS_URL from . import ingestionmanager, ingestion_utils as iu -from ..backend.initialization.create import add_atomic_edges +from .initialization.create import add_atomic_edges from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES from ..backend.utils import basetypes from ..io.edges import put_chunk_edges diff --git a/pychunkedgraph/meshing/meshgen.py b/pychunkedgraph/meshing/meshgen.py index f7e741a90..298b39246 100644 --- a/pychunkedgraph/meshing/meshgen.py +++ b/pychunkedgraph/meshing/meshgen.py @@ -1,4 +1,3 @@ -from pychunkedgraph.utils.general import redis_job from typing import Sequence import sys import os @@ -28,6 +27,7 @@ from pychunkedgraph.backend import chunkedgraph # noqa from pychunkedgraph.backend.utils import serializers, column_keys # noqa from pychunkedgraph.meshing import meshgen_utils # noqa +from pychunkedgraph.utils.redis import redis_job # Change below to true if debugging and want to see results in stdout PRINT_FOR_DEBUGGING = False diff --git a/pychunkedgraph/utils/general.py b/pychunkedgraph/utils/general.py index 65643db7a..b2825f671 100644 --- a/pychunkedgraph/utils/general.py +++ b/pychunkedgraph/utils/general.py @@ -2,40 +2,7 @@ generic helper funtions """ -import os -import functools - import numpy as np -import redis - - -REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") -REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") -REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") -REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" - - -def redis_job(redis_url, redis_channel): - """ - Decorator factory - Returns a decorator that connects to a redis instance - and publish a message (return value of the function) when the job is done. - """ - - def redis_job_decorator(func): - r = redis.Redis.from_url(redis_url) - - @functools.wraps(func) - def wrapper(*args, **kwargs): - job_result = func(*args, **kwargs) - if not job_result: - job_result = str(job_result) - r.publish(redis_channel, job_result) - - return wrapper - - return redis_job_decorator - def reverse_dictionary(dictionary): """ diff --git a/pychunkedgraph/utils/redis.py b/pychunkedgraph/utils/redis.py new file mode 100644 index 000000000..79aa7daea --- /dev/null +++ b/pychunkedgraph/utils/redis.py @@ -0,0 +1,35 @@ +""" +generic helper funtions +""" + +import os +import functools + +import redis + +REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") +REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") +REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") +REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" + + +def redis_job(redis_url, redis_channel): + """ + Decorator factory + Returns a decorator that connects to a redis instance + and publish a message (return value of the function) when the job is done. + """ + + def redis_job_decorator(func): + r = redis.Redis.from_url(redis_url) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + job_result = func(*args, **kwargs) + if not job_result: + job_result = str(job_result) + r.publish(redis_channel, job_result) + + return wrapper + + return redis_job_decorator diff --git a/run_dev_cli.py b/run_dev_cli.py deleted file mode 100644 index 328179dbc..000000000 --- a/run_dev_cli.py +++ /dev/null @@ -1,10 +0,0 @@ -from flask.cli import FlaskGroup -from pychunkedgraph.examples import create_example_app - - -app = create_example_app() -cli = FlaskGroup(create_app=create_example_app) - - -if __name__ == '__main__': - cli() \ No newline at end of file From eb8a0e203636b22c07cdca6f64c0d662dbb548c8 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 3 Sep 2019 10:17:18 -0400 Subject: [PATCH 0181/1097] updates --- pychunkedgraph/app/redis_cli.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py index d628a15cf..6431d14b9 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/redis_cli.py @@ -10,9 +10,9 @@ from flask import current_app from flask.cli import AppGroup -from ..utils.general import REDIS_HOST -from ..utils.general import REDIS_PORT -from ..utils.general import REDIS_PASSWORD +from ..utils.redis import REDIS_HOST +from ..utils.redis import REDIS_PORT +from ..utils.redis import REDIS_PASSWORD redis_cli = AppGroup("redis") From d9f322f5625cc12bfa886ad3b70594bc68712b3c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 3 Sep 2019 10:22:12 -0400 Subject: [PATCH 0182/1097] remove old initialization package --- .../backend/initialization/__init__.py | 3 - .../backend/initialization/create.py | 176 ------------------ 2 files changed, 179 deletions(-) delete mode 100644 pychunkedgraph/backend/initialization/__init__.py delete mode 100644 pychunkedgraph/backend/initialization/create.py diff --git a/pychunkedgraph/backend/initialization/__init__.py b/pychunkedgraph/backend/initialization/__init__.py deleted file mode 100644 index 009da30ed..000000000 --- a/pychunkedgraph/backend/initialization/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -modules for chunkedgraph initialization/creation -""" diff --git a/pychunkedgraph/backend/initialization/create.py b/pychunkedgraph/backend/initialization/create.py deleted file mode 100644 index c433c9bc0..000000000 --- a/pychunkedgraph/backend/initialization/create.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -Module for stuff related to creating the initial chunkedgraph -""" - -import datetime -from typing import Optional, Sequence, Dict - -import pytz -import numpy as np - -from ..chunkedgraph import ChunkedGraph -from ..utils import basetypes -from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK, TYPES as EDGE_TYPES -from ..chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp -from ..flatgraph_utils import build_gt_graph, connected_components -from ..utils import serializers, column_keys - - -def add_atomic_edges( - cg_instance: ChunkedGraph, - chunk_coord: np.ndarray, - chunk_edges_d: Dict[str, Edges], - isolated: Sequence[int], - time_stamp: Optional[datetime.datetime] = None, -): - """ - Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk. - All the edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param cg_instance: - :param chunk_coord: [x,y,z] - :param chunk_edges_d: dict of {"edge_type": Edges} - :param isolated: list of isolated node ids - :param time_stamp: datetime - """ - - chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges_d, isolated) - if not chunk_node_ids.size: - return 0 - - chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) - assert len(np.unique(chunk_ids)) == 1 - - graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) - ccs = connected_components(graph) - - parent_chunk_id = cg_instance.get_chunk_id( - layer=2, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] - ) - parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) - - sparse_indices, remapping = _get_remapping(chunk_edges_d) - time_stamp = _get_valid_timestamp(time_stamp) - rows = [] - for i_cc, component in enumerate(ccs): - _rows = _process_component( - cg_instance, - chunk_edges_d, - parent_ids[i_cc], - unique_ids[component], - sparse_indices, - remapping, - time_stamp, - ) - rows.extend(_rows) - - if len(rows) > 100000: - cg_instance.bulk_write(rows) - rows = [] - cg_instance.bulk_write(rows) - - -def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[int]): - """ - returns IN_CHUNK edges and nodes_ids - """ - isolated_nodes_self_edges = np.vstack([isolated_ids, isolated_ids]).T - node_ids = [isolated_ids] - edge_ids = [isolated_nodes_self_edges] - for edge_type in EDGE_TYPES: - edges = chunk_edges_d[edge_type] - node_ids.append(edges.node_ids1) - if edge_type == IN_CHUNK: - node_ids.append(edges.node_ids2) - edge_ids.append(edges.get_pairs()) - - chunk_node_ids = np.unique(np.concatenate(node_ids)) - chunk_edge_ids = np.concatenate(edge_ids) - - return (chunk_node_ids, chunk_edge_ids) - - -def _get_remapping(chunk_edges_d: dict): - """ - TODO add logic explanation - """ - sparse_indices = {} - remapping = {} - for edge_type in [BT_CHUNK, CX_CHUNK]: - edges = chunk_edges_d[edge_type].get_pairs() - u_ids, inv_ids = np.unique(edges, return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edges.shape) - sparse_indices[edge_type] = compute_indices_pandas(remapped_arr) - remapping[edge_type] = dict(zip(u_ids, mapped_ids)) - return sparse_indices, remapping - - -def _get_valid_timestamp(timestamp): - if timestamp is None: - timestamp = datetime.datetime.utcnow() - - if timestamp.tzinfo is None: - timestamp = pytz.UTC.localize(timestamp) - - # Comply to resolution of BigTables TimeRange - return get_google_compatible_time_stamp(timestamp, round_up=False) - - -def _process_component( - cg_instance, - chunk_edges_d, - parent_id, - node_ids, - sparse_indices, - remapping, - time_stamp, -): - rows = [] - chunk_out_edges = [] # out = between + cross - for node_id in node_ids: - _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) - chunk_out_edges.append(_edges) - val_dict = {column_keys.Hierarchy.Parent: parent_id} - - r_key = serializers.serialize_uint64(node_id) - rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) - - chunk_out_edges = np.concatenate(chunk_out_edges) - cce_layers = cg_instance.get_cross_chunk_edges_layer(chunk_out_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {column_keys.Hierarchy.Child: node_ids} - for cc_layer in u_cce_layers: - layer_out_edges = chunk_out_edges[cce_layers == cc_layer] - if layer_out_edges.size: - col = column_keys.Connectivity.CrossChunkEdge[cc_layer] - val_dict[col] = layer_out_edges - - r_key = serializers.serialize_uint64(parent_id) - rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) - return rows - - -def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): - """ - TODO add docs - returns edges of node_id pointing outside the chunk (between and cross) - """ - chunk_out_edges = np.array([], dtype=basetypes.NODE_ID).reshape(0, 2) - for edge_type in remapping: - if node_id in remapping[edge_type]: - edges_obj = chunk_edges_d[edge_type] - edges = edges_obj.get_pairs() - - row_ids, column_ids = sparse_indices[edge_type][ - remapping[edge_type][node_id] - ] - row_ids = row_ids[column_ids == 0] - # edges that this node is part of - chunk_out_edges = np.concatenate([chunk_out_edges, edges[row_ids]]) - return chunk_out_edges From 333e255f818abeaf1583c72c47f7cee63ae95d16 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 3 Sep 2019 15:21:08 -0400 Subject: [PATCH 0183/1097] remove old subpackages --- pychunkedgraph/creator/__init__.py | 0 pychunkedgraph/creator/buildgraph.md | 53 -- pychunkedgraph/creator/chunkcreator.py | 489 ------------------- pychunkedgraph/creator/creator_utils.py | 116 ----- pychunkedgraph/creator/data_test.py | 51 -- pychunkedgraph/creator/graph_tests.py | 133 ----- pychunkedgraph/edge_gen/Dockerfile | 38 -- pychunkedgraph/edge_gen/edgetask.py | 597 ----------------------- pychunkedgraph/edge_gen/requirements.txt | 3 - pychunkedgraph/rechunking/__init__.py | 0 pychunkedgraph/rechunking/transformer.py | 209 -------- 11 files changed, 1689 deletions(-) delete mode 100644 pychunkedgraph/creator/__init__.py delete mode 100644 pychunkedgraph/creator/buildgraph.md delete mode 100644 pychunkedgraph/creator/chunkcreator.py delete mode 100644 pychunkedgraph/creator/creator_utils.py delete mode 100644 pychunkedgraph/creator/data_test.py delete mode 100644 pychunkedgraph/creator/graph_tests.py delete mode 100644 pychunkedgraph/edge_gen/Dockerfile delete mode 100644 pychunkedgraph/edge_gen/edgetask.py delete mode 100644 pychunkedgraph/edge_gen/requirements.txt delete mode 100644 pychunkedgraph/rechunking/__init__.py delete mode 100644 pychunkedgraph/rechunking/transformer.py diff --git a/pychunkedgraph/creator/__init__.py b/pychunkedgraph/creator/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/pychunkedgraph/creator/buildgraph.md b/pychunkedgraph/creator/buildgraph.md deleted file mode 100644 index bfefb2742..000000000 --- a/pychunkedgraph/creator/buildgraph.md +++ /dev/null @@ -1,53 +0,0 @@ -# Creating a ChunkedGraph - -There are two steps to creating a ChunkedGraph for a region graph: - -0. Creating the table and BigTable family -1. Downloading files from `cloudvolume` and storing them on disk -2. Creating the ChunkedGraph from these files - -## Creating the table and family - -Deleting the current table: - -``` -from src.pychunkedgraph import chunkedgraph -cg = chunkedgraph.ChunkedGraph(table_id="mytableid") - -cg.table.delete() -``` - -Creating a new table and family: - -``` -cg = chunkedgraph.ChunkedGraph(table_id="mytableid") - -cg.table.create() -f = cg.table.column_family(cg.family_id) -f.create() -``` - -## Downloading all files from cloudvolume - -To download all relevant friles from a cloudvolume directory do - -``` -from src.pychunkedgraph import chunkcreator - -chunkcreator.download_and_store_cv_files(cv_url) -``` -The files are stored as h5's in a directory in `home`. The directory name is chosen to be the layer name. - - -## Building the ChunkedGraph - -``` -chunkcreator.create_chunked_graph(cv_url, table_id="mytableid", nb_cpus=1) -``` - -`nb_cpus` -allows the user to run this process in parallel using `subprocesses` (see [multiprocessing.md](https://github.com/seung-lab/PyChunkedGraph/blob/master/src/pychunkedgraph/multiprocessing.md)). - - - - diff --git a/pychunkedgraph/creator/chunkcreator.py b/pychunkedgraph/creator/chunkcreator.py deleted file mode 100644 index 58a038160..000000000 --- a/pychunkedgraph/creator/chunkcreator.py +++ /dev/null @@ -1,489 +0,0 @@ -import glob -import numpy as np -import os -import re -import time -import itertools -import random - -from cloudvolume import storage - -# from chunkedgraph import ChunkedGraph -import pychunkedgraph.backend.chunkedgraph_utils -from pychunkedgraph.backend import chunkedgraph -from multiwrapper import multiprocessing_utils as mu -from pychunkedgraph.creator import creator_utils - - -def download_and_store_cv_files(dataset_name="basil", - n_threads=10, olduint32=False): - """ Downloads files from google cloud using cloud-volume - - :param dataset_name: str - :param n_threads: int - :param olduint32: bool - """ - if "basil" == dataset_name: - cv_url = "gs://nkem/basil_4k_oldnet/region_graph/" - elif "pinky40" == dataset_name: - cv_url = "gs://nkem/pinky40_v11/mst_trimmed_sem_remap/region_graph/" - elif "pinky100" == dataset_name: - cv_url = "gs://nkem/pinky100_v0/region_graph/" - else: - raise Exception("Could not identify region graph ressource") - - with storage.SimpleStorage(cv_url) as cv_st: - dir_path = creator_utils.dir_from_layer_name( - creator_utils.layer_name_from_cv_url(cv_st.layer_path)) - - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - file_paths = list(cv_st.list_files()) - - file_chunks = np.array_split(file_paths, n_threads * 3) - multi_args = [] - for i_file_chunk, file_chunk in enumerate(file_chunks): - multi_args.append([i_file_chunk, cv_url, file_chunk, olduint32]) - - # Run parallelizing - if n_threads == 1: - mu.multiprocess_func(_download_and_store_cv_files_thread, - multi_args, n_threads=n_threads, - verbose=True, debug=n_threads==1) - else: - mu.multisubprocess_func(_download_and_store_cv_files_thread, - multi_args, n_threads=n_threads) - - -def _download_and_store_cv_files_thread(args): - """ Helper thread to download files from google cloud """ - chunk_id, cv_url, file_paths, olduint32 = args - - # Reset connection pool to make cloud-volume compatible with parallelizing - storage.reset_connection_pools() - - n_file_paths = len(file_paths) - time_start = time.time() - with storage.SimpleStorage(cv_url) as cv_st: - for i_fp, fp in enumerate(file_paths): - if i_fp % 100 == 1: - dt = time.time() - time_start - eta = dt / i_fp * n_file_paths - dt - print("%d: %d / %d - dt: %.3fs - eta: %.3fs" % ( - chunk_id, i_fp, n_file_paths, dt, eta)) - - creator_utils.download_and_store_edge_file(cv_st, fp) - - -def check_stored_cv_files(dataset_name="basil"): - """ Tests if all files were downloaded - - :param dataset_name: str - """ - if "basil" == dataset_name: - cv_url = "gs://nkem/basil_4k_oldnet/region_graph/" - elif "pinky40" == dataset_name: - cv_url = "gs://nkem/pinky40_v11/mst_trimmed_sem_remap/region_graph/" - elif "pinky100" == dataset_name: - cv_url = "gs://nkem/pinky100_v0/region_graph/" - else: - raise Exception("Could not identify region graph ressource") - - with storage.SimpleStorage(cv_url) as cv_st: - dir_path = creator_utils.dir_from_layer_name( - creator_utils.layer_name_from_cv_url(cv_st.layer_path)) - - file_paths = list(cv_st.list_files()) - - c = 0 - n_file_paths = len(file_paths) - time_start = time.time() - for i_fp, fp in enumerate(file_paths): - if i_fp % 1000 == 1: - dt = time.time() - time_start - eta = dt / i_fp * n_file_paths - dt - print("%d / %d - dt: %.3fs - eta: %.3fs" % ( - i_fp, n_file_paths, dt, eta)) - - if not os.path.exists(dir_path + fp[:-4] + ".h5"): - print(dir_path + fp[:-4] + ".h5") - c += 1 - - print("%d files were missing" % c) - - -def _sort_arrays(coords, paths): - sorting = np.lexsort((coords[..., 2], coords[..., 1], coords[..., 0])) - return coords[sorting], paths[sorting] - -def create_chunked_graph(table_id=None, cv_url=None, ws_url=None, fan_out=2, - bbox=None, chunk_size=(512, 512, 128), verbose=False, - n_threads=1): - """ Creates chunked graph from downloaded files - - :param table_id: str - :param cv_url: str - :param ws_url: str - :param fan_out: int - :param bbox: [[x_, y_, z_], [_x, _y, _z]] - :param chunk_size: tuple - :param verbose: bool - :param n_threads: int - """ - if cv_url is None or ws_url is None: - if "basil" in table_id: - cv_url = "gs://nkem/basil_4k_oldnet/region_graph/" - ws_url = "gs://neuroglancer/svenmd/basil_4k_oldnet_cg/watershed/" - elif "pinky40" in table_id: - cv_url = "gs://nkem/pinky40_v11/mst_trimmed_sem_remap/region_graph/" - ws_url = "gs://neuroglancer/svenmd/pinky40_v11/watershed/" - elif "pinky100" in table_id: - cv_url = "gs://nkem/pinky100_v0/region_graph/" - ws_url = "gs://neuroglancer/nkem/pinky100_v0/ws/lost_no-random/bbox1_0/" - else: - raise Exception("Could not identify region graph ressource") - - times = [] - time_start = time.time() - - chunk_size = np.array(list(chunk_size)) - - file_paths = np.sort(glob.glob(creator_utils.dir_from_layer_name( - creator_utils.layer_name_from_cv_url(cv_url)) + "/*")) - - file_path_blocks = np.array_split(file_paths, n_threads * 3) - - multi_args = [] - for fp_block in file_path_blocks: - multi_args.append([fp_block, table_id, chunk_size, bbox]) - - if n_threads == 1: - results = mu.multiprocess_func( - _preprocess_chunkedgraph_data_thread, multi_args, - n_threads=n_threads, - verbose=True, debug=n_threads == 1) - else: - results = mu.multisubprocess_func( - _preprocess_chunkedgraph_data_thread, multi_args, - n_threads=n_threads) - - in_chunk_connected_paths = np.array([]) - in_chunk_connected_ids = np.array([], dtype=np.uint64).reshape(-1, 3) - in_chunk_disconnected_paths = np.array([]) - in_chunk_disconnected_ids = np.array([], dtype=np.uint64).reshape(-1, 3) - between_chunk_paths = np.array([]) - between_chunk_ids = np.array([], dtype=np.uint64).reshape(-1, 2, 3) - isolated_paths = np.array([]) - isolated_ids = np.array([], dtype=np.uint64).reshape(-1, 3) - - for result in results: - in_chunk_connected_paths = np.concatenate([in_chunk_connected_paths, result[0]]) - in_chunk_connected_ids = np.concatenate([in_chunk_connected_ids, result[1]]) - in_chunk_disconnected_paths = np.concatenate([in_chunk_disconnected_paths, result[2]]) - in_chunk_disconnected_ids = np.concatenate([in_chunk_disconnected_ids, result[3]]) - between_chunk_paths = np.concatenate([between_chunk_paths, result[4]]) - between_chunk_ids = np.concatenate([between_chunk_ids, result[5]]) - isolated_paths = np.concatenate([isolated_paths, result[6]]) - isolated_ids = np.concatenate([isolated_ids, result[7]]) - - assert len(in_chunk_connected_ids) == len(in_chunk_connected_paths) == \ - len(in_chunk_disconnected_ids) == len(in_chunk_disconnected_paths) == \ - len(isolated_ids) == len(isolated_paths) - - in_chunk_connected_ids, in_chunk_connected_paths = \ - _sort_arrays(in_chunk_connected_ids, in_chunk_connected_paths) - - in_chunk_disconnected_ids, in_chunk_disconnected_paths = \ - _sort_arrays(in_chunk_disconnected_ids, in_chunk_disconnected_paths) - - isolated_ids, isolated_paths = \ - _sort_arrays(isolated_ids, isolated_paths) - - times.append(["Preprocessing", time.time() - time_start]) - - print("Preprocessing took %.3fs = %.2fh" % (times[-1][1], times[-1][1]/3600)) - - time_start = time.time() - - multi_args = [] - - in_chunk_id_blocks = np.array_split(in_chunk_connected_ids, max(1, n_threads)) - cumsum = 0 - - for in_chunk_id_block in in_chunk_id_blocks: - multi_args.append([between_chunk_ids, between_chunk_paths, - in_chunk_id_block, cumsum]) - cumsum += len(in_chunk_id_block) - - # Run parallelizing - if n_threads == 1: - results = mu.multiprocess_func( - _between_chunk_masks_thread, multi_args, n_threads=n_threads, - verbose=True, debug=n_threads == 1) - else: - results = mu.multisubprocess_func( - _between_chunk_masks_thread, multi_args, n_threads=n_threads) - - times.append(["Data sorting", time.time() - time_start]) - - print("Data sorting took %.3fs = %.2fh" % (times[-1][1], times[-1][1]/3600)) - - time_start = time.time() - - n_layers = int(np.ceil(pychunkedgraph.backend.chunkedgraph_utils.log_n(np.max(in_chunk_connected_ids) + 1, fan_out))) + 2 - - print("N layers: %d" % n_layers) - - cg = chunkedgraph.ChunkedGraph(table_id=table_id, n_layers=np.uint64(n_layers), - fan_out=np.uint64(fan_out), - chunk_size=np.array(chunk_size, dtype=np.uint64), - cv_path=ws_url, is_new=True) - - # Fill lowest layer and create first abstraction layer - # Create arguments for parallelizing - - multi_args = [] - for result in results: - offset, between_chunk_paths_out_masked, between_chunk_paths_in_masked = result - - for i_chunk in range(len(between_chunk_paths_out_masked)): - multi_args.append([table_id, - in_chunk_connected_paths[offset + i_chunk], - in_chunk_disconnected_paths[offset + i_chunk], - isolated_paths[offset + i_chunk], - between_chunk_paths_in_masked[i_chunk], - between_chunk_paths_out_masked[i_chunk], - verbose]) - - random.shuffle(multi_args) - - print("%d jobs for creating layer 1 + 2" % len(multi_args)) - - # Run parallelizing - if n_threads == 1: - mu.multiprocess_func( - _create_atomic_layer_thread, multi_args, n_threads=n_threads, - verbose=True, debug=n_threads == 1) - else: - mu.multisubprocess_func( - _create_atomic_layer_thread, multi_args, n_threads=n_threads) - - times.append(["Layers 1 + 2", time.time() - time_start]) - - # Fill higher abstraction layers - child_chunk_ids = in_chunk_connected_ids.copy() - for layer_id in range(3, n_layers + 1): - - time_start = time.time() - - print("\n\n\n --- LAYER %d --- \n\n\n" % layer_id) - - parent_chunk_ids = child_chunk_ids // cg.fan_out - parent_chunk_ids = parent_chunk_ids.astype(np.int) - - u_pcids, inds = np.unique(parent_chunk_ids, - axis=0, return_inverse=True) - - if len(u_pcids) > n_threads: - n_threads_per_process = 1 - else: - n_threads_per_process = int(np.ceil(n_threads / len(u_pcids))) - - multi_args = [] - for ind in range(len(u_pcids)): - multi_args.append([table_id, layer_id, - child_chunk_ids[inds == ind].astype(np.int), - n_threads_per_process]) - - child_chunk_ids = u_pcids - - # Run parallelizing - if n_threads == 1: - mu.multiprocess_func( - _add_layer_thread, multi_args, n_threads=n_threads, - verbose=True, - debug=n_threads == 1) - else: - mu.multisubprocess_func( - _add_layer_thread, multi_args, n_threads=n_threads, - suffix=str(layer_id)) - - times.append(["Layer %d" % layer_id, time.time() - time_start]) - - for time_entry in times: - print("%s: %.2fs = %.2fmin = %.2fh" % (time_entry[0], time_entry[1], - time_entry[1] / 60, - time_entry[1] / 3600)) - - -def _preprocess_chunkedgraph_data_thread(args): - """ Reads downloaded files and sorts them in _in_ and _between_ chunks """ - - file_paths, table_id, chunk_size, bbox = args - - if bbox is None: - bbox = [[0, 0, 0], [np.inf, np.inf, np.inf]] - - bbox = np.array(bbox) - - in_chunk_connected_paths = np.array([]) - in_chunk_connected_ids = np.array([], dtype=np.uint64).reshape(-1, 3) - in_chunk_disconnected_paths = np.array([]) - in_chunk_disconnected_ids = np.array([], dtype=np.uint64).reshape(-1, 3) - between_chunk_paths = np.array([]) - between_chunk_ids = np.array([], dtype=np.uint64).reshape(-1, 2, 3) - isolated_paths = np.array([]) - isolated_ids = np.array([], dtype=np.uint64).reshape(-1, 3) - - # Read file paths - gather chunk ids and in / out properties - for i_fp, fp in enumerate(file_paths): - file_name = os.path.basename(fp).split(".")[0] - - # Read coordinates from file path - x1, x2, y1, y2, z1, z2 = np.array(re.findall("[\d]+", file_name), dtype=np.int)[:6] - - if np.any((bbox[0] - np.array([x2, y2, z2])) >= 0) or \ - np.any((bbox[1] - np.array([x1, y1, z1])) <= 0): - continue - - dx = x2 - x1 - dy = y2 - y1 - dz = z2 - z1 - - d = np.array([dx, dy, dz]) - c = np.array([x1, y1, z1]) - - # if there is a 2 in d then the file contains edges that cross chunks - gap = 2 - - if gap in d: - s_c = np.where(d == gap)[0] - chunk_coord = c.copy() - - chunk1_id = np.array(chunk_coord / chunk_size, dtype=np.int) - chunk_coord[s_c] += chunk_size[s_c] - chunk2_id = np.array(chunk_coord / chunk_size, dtype=np.int) - - between_chunk_ids = np.concatenate([between_chunk_ids, - np.array([chunk1_id, chunk2_id])[None]]) - between_chunk_paths = np.concatenate([between_chunk_paths, [fp]]) - else: - chunk_coord = np.array(c / chunk_size, dtype=np.int) - - if "disconnected" in file_name: - in_chunk_disconnected_ids = np.concatenate([in_chunk_disconnected_ids, chunk_coord[None]]) - in_chunk_disconnected_paths = np.concatenate([in_chunk_disconnected_paths, [fp]]) - elif "isolated" in file_name: - isolated_ids = np.concatenate([isolated_ids, chunk_coord[None]]) - isolated_paths = np.concatenate([isolated_paths, [fp]]) - else: - in_chunk_connected_ids = np.concatenate([in_chunk_connected_ids, chunk_coord[None]]) - in_chunk_connected_paths = np.concatenate([in_chunk_connected_paths, [fp]]) - - return in_chunk_connected_paths, in_chunk_connected_ids, \ - in_chunk_disconnected_paths, in_chunk_disconnected_ids, \ - between_chunk_paths, between_chunk_ids, \ - isolated_paths, isolated_ids - - -def _between_chunk_masks_thread(args): - """""" - between_chunk_ids, between_chunk_paths, in_chunk_id_block, offset = args - - between_chunk_paths_out_masked = [] - between_chunk_paths_in_masked = [] - - for i_in_chunk_id, in_chunk_id in enumerate(in_chunk_id_block): - out_paths_mask = np.sum(np.abs(between_chunk_ids[:, 0] - in_chunk_id), axis=1) == 0 - in_paths_masks = np.sum(np.abs(between_chunk_ids[:, 1] - in_chunk_id), axis=1) == 0 - - between_chunk_paths_out_masked.append(between_chunk_paths[out_paths_mask]) - between_chunk_paths_in_masked.append(between_chunk_paths[in_paths_masks]) - - return offset, between_chunk_paths_out_masked, between_chunk_paths_in_masked - - -def _create_atomic_layer_thread(args): - """ Fills lowest layer and create first abstraction layer """ - # Load args - table_id, chunk_connected_path, chunk_disconnected_path, isolated_path,\ - in_paths, out_paths, verbose = args - - # Load edge information - edge_ids = {"in_connected": np.array([], dtype=np.uint64).reshape(0, 2), - "in_disconnected": np.array([], dtype=np.uint64).reshape(0, 2), - "cross": np.array([], dtype=np.uint64).reshape(0, 2), - "between_connected": np.array([], dtype=np.uint64).reshape(0, 2), - "between_disconnected": np.array([], dtype=np.uint64).reshape(0, 2)} - edge_affs = {"in_connected": np.array([], dtype=np.float32), - "in_disconnected": np.array([], dtype=np.float32), - "between_connected": np.array([], dtype=np.float32), - "between_disconnected": np.array([], dtype=np.float32)} - edge_areas = {"in_connected": np.array([], dtype=np.float32), - "in_disconnected": np.array([], dtype=np.float32), - "between_connected": np.array([], dtype=np.float32), - "between_disconnected": np.array([], dtype=np.float32)} - - in_connected_dict = creator_utils.read_edge_file_h5(chunk_connected_path) - in_disconnected_dict = creator_utils.read_edge_file_h5(chunk_disconnected_path) - - edge_ids["in_connected"] = in_connected_dict["edge_ids"] - edge_affs["in_connected"] = in_connected_dict["edge_affs"] - edge_areas["in_connected"] = in_connected_dict["edge_areas"] - - edge_ids["in_disconnected"] = in_disconnected_dict["edge_ids"] - edge_affs["in_disconnected"] = in_disconnected_dict["edge_affs"] - edge_areas["in_disconnected"] = in_disconnected_dict["edge_areas"] - - if os.path.exists(isolated_path): - isolated_ids = creator_utils.read_edge_file_h5(isolated_path)["node_ids"] - else: - isolated_ids = np.array([], dtype=np.uint64) - - for fp in in_paths: - edge_dict = creator_utils.read_edge_file_h5(fp) - - # Cross edges are always ordered to point OUT of the chunk - if "unbreakable" in fp: - edge_ids["cross"] = np.concatenate([edge_ids["cross"], edge_dict["edge_ids"][:, [1, 0]]]) - elif "disconnected" in fp: - edge_ids["between_disconnected"] = np.concatenate([edge_ids["between_disconnected"], edge_dict["edge_ids"][:, [1, 0]]]) - edge_affs["between_disconnected"] = np.concatenate([edge_affs["between_disconnected"], edge_dict["edge_affs"]]) - edge_areas["between_disconnected"] = np.concatenate([edge_areas["between_disconnected"], edge_dict["edge_areas"]]) - else: - # connected - edge_ids["between_connected"] = np.concatenate([edge_ids["between_connected"], edge_dict["edge_ids"][:, [1, 0]]]) - edge_affs["between_connected"] = np.concatenate([edge_affs["between_connected"], edge_dict["edge_affs"]]) - edge_areas["between_connected"] = np.concatenate([edge_areas["between_connected"], edge_dict["edge_areas"]]) - - for fp in out_paths: - edge_dict = creator_utils.read_edge_file_h5(fp) - - if "unbreakable" in fp: - edge_ids["cross"] = np.concatenate([edge_ids["cross"], edge_dict["edge_ids"]]) - elif "disconnected" in fp: - edge_ids["between_disconnected"] = np.concatenate([edge_ids["between_disconnected"], edge_dict["edge_ids"]]) - edge_affs["between_disconnected"] = np.concatenate([edge_affs["between_disconnected"], edge_dict["edge_affs"]]) - edge_areas["between_disconnected"] = np.concatenate([edge_areas["between_disconnected"], edge_dict["edge_areas"]]) - else: - # connected - edge_ids["between_connected"] = np.concatenate([edge_ids["between_connected"], edge_dict["edge_ids"]]) - edge_affs["between_connected"] = np.concatenate([edge_affs["between_connected"], edge_dict["edge_affs"]]) - edge_areas["between_connected"] = np.concatenate([edge_areas["between_connected"], edge_dict["edge_areas"]]) - - # Initialize an ChunkedGraph instance and write to it - cg = chunkedgraph.ChunkedGraph(table_id=table_id) - - cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas, - isolated_node_ids=isolated_ids, - verbose=verbose) - - -def _add_layer_thread(args): - """ Creates abstraction layer """ - table_id, layer_id, chunk_coords, n_threads_per_process = args - - cg = chunkedgraph.ChunkedGraph(table_id=table_id) - cg.add_layer(layer_id, chunk_coords, n_threads=n_threads_per_process) - diff --git a/pychunkedgraph/creator/creator_utils.py b/pychunkedgraph/creator/creator_utils.py deleted file mode 100644 index 4bca81629..000000000 --- a/pychunkedgraph/creator/creator_utils.py +++ /dev/null @@ -1,116 +0,0 @@ -import numpy as np -import h5py -import os - -HOME = os.path.expanduser("~") - - -def layer_name_from_cv_url(cv_url): - return cv_url.strip("/").split("/")[-2] - - -def dir_from_layer_name(layer_name): - return HOME + "/" + layer_name + "/" - - -def read_edge_file_cv(cv_st, path): - """ Reads the edge ids and affinities from an edge file """ - - if 'unbreakable' in path: - dt = 'uint64, uint64' - elif 'isolated' in path: - dt = 'uint64' - else: - dt = 'uint64, uint64, float32, uint64' - - buf = cv_st.get_file(path) - edge_data = np.frombuffer(buf, dtype=dt) - - if len(edge_data) == 0: - if len(dt.split(",")) == 1: - edge_data = np.array([], dtype=np.uint64) - else: - edge_data = {"f0": np.array([], dtype=np.uint64), - "f1": np.array([], dtype=np.uint64), - "f2": np.array([], dtype=np.float32), - "f3": np.array([], dtype=np.uint64)} - - if 'isolated' in path: - edge_dict = {"node_ids": edge_data} - else: - edge_ids = np.concatenate([edge_data["f0"].reshape(-1, 1), - edge_data["f1"].reshape(-1, 1)], axis=1) - - edge_dict = {"edge_ids": edge_ids} - - if "connected" in path: - edge_dict['edge_affs'] = edge_data['f2'] - edge_dict['edge_areas'] = edge_data['f3'] - - return edge_dict - - -def read_edge_file_h5(path, layer_name=None): - if not path.endswith(".h5"): - path = path[:-4] + ".h5" - - if layer_name is not None: - path = dir_from_layer_name(layer_name) + path - - edge_dict = {} - with h5py.File(path, "r") as f: - for k in f.keys(): - edge_dict[k] = f[k].value - - return edge_dict - - -def download_and_store_edge_file(cv_st, path, create_dir=True): - edge_dict = read_edge_file_cv(cv_st, path) - - dir_path = dir_from_layer_name(layer_name_from_cv_url(cv_st.layer_path)) - - if not os.path.exists(dir_path) and create_dir: - os.makedirs(dir_path) - - with h5py.File(dir_path + path[:-4] + ".h5", "w") as f: - for k in edge_dict.keys(): - f.create_dataset(k, data=edge_dict[k], compression="gzip") - - -# def read_mapping_cv(cv_st, path, olduint32=False): -# """ Reads the mapping information from a file """ -# -# if olduint32: -# mapping = np.frombuffer(cv_st.get_file(path), -# dtype=np.uint64).reshape(-1, 2) -# mapping_to = mapping[:, 1] -# mapping_from = np.frombuffer(np.ascontiguousarray(mapping[:, 0]), dtype=np.uint32)[::2].astype(np.uint64) -# return np.concatenate([mapping_from[:, None], mapping_to[:, None]], axis=1) -# else: -# return np.frombuffer(cv_st.get_file(path), dtype=np.uint64).reshape(-1, 2) -# -# -# def read_mapping_h5(path, layer_name=None): -# if not path.endswith(".h5"): -# path = path[:-4] + ".h5" -# -# if layer_name is not None: -# path = dir_from_layer_name(layer_name) + path -# -# with h5py.File(path, "r") as f: -# mapping = f["mapping"].value -# -# return mapping -# -# -# def download_and_store_mapping_file(cv_st, path, olduint32=False): -# mapping = read_mapping_cv(cv_st, path, olduint32=olduint32) -# -# dir_path = dir_from_layer_name(layer_name_from_cv_url(cv_st.layer_path)) -# -# if not os.path.exists(dir_path): -# os.makedirs(dir_path) -# -# with h5py.File(dir_path + path[:-4] + ".h5", "w") as f: -# f["mapping"] = mapping \ No newline at end of file diff --git a/pychunkedgraph/creator/data_test.py b/pychunkedgraph/creator/data_test.py deleted file mode 100644 index 48af7b51b..000000000 --- a/pychunkedgraph/creator/data_test.py +++ /dev/null @@ -1,51 +0,0 @@ -import glob -import collections -import numpy as np - -from pychunkedgraph.creator import creator_utils -from multiwrapper import multiprocessing_utils as mu - - -def _test_unique_edge_assignment_thread(args): - paths = args[0] - - id_dict = collections.Counter() - for path in paths: - try: - ids = creator_utils.read_edge_file_h5(path)["edge_ids"] - except: - ids = creator_utils.read_edge_file_h5(path)["node_ids"] - u_ids = np.unique(ids) - - u_id_d = dict(zip(u_ids, np.ones(len(u_ids), dtype=np.int))) - add_counter = collections.Counter(u_id_d) - - id_dict += add_counter - - # return np.array(list(id_dict.items())) - return id_dict - - -def test_unique_edge_assignment(dir, n_threads=128): - file_paths = glob.glob(dir + "/*") - - file_chunks = np.array_split(file_paths, n_threads * 3) - multi_args = [] - for i_file_chunk, file_chunk in enumerate(file_chunks): - multi_args.append([file_chunk]) - - # Run parallelizing - if n_threads == 1: - results = mu.multiprocess_func(_test_unique_edge_assignment_thread, - multi_args, n_threads=n_threads, - verbose=True, debug=n_threads==1) - else: - results = mu.multiprocess_func(_test_unique_edge_assignment_thread, - multi_args, n_threads=n_threads) - - id_dict = collections.Counter() - for result in results: - # id_dict += collections.Counter(dict(result)) - id_dict += result - - return id_dict \ No newline at end of file diff --git a/pychunkedgraph/creator/graph_tests.py b/pychunkedgraph/creator/graph_tests.py deleted file mode 100644 index 54d51411a..000000000 --- a/pychunkedgraph/creator/graph_tests.py +++ /dev/null @@ -1,133 +0,0 @@ -import itertools -import numpy as np -import time - -import pychunkedgraph.backend.chunkedgraph_utils -from pychunkedgraph.backend.utils import column_keys -from pychunkedgraph.backend import chunkedgraph -from multiwrapper import multiprocessing_utils as mu - - -def _family_consistency_test_thread(args): - """ Helper to test family consistency """ - - table_id, coord, layer_id = args - - x, y, z = coord - - cg = chunkedgraph.ChunkedGraph(table_id) - - rows = cg.range_read_chunk(layer_id, x, y, z) - parent_column = column_keys.Hierarchy.Parent - - failed_node_ids = [] - - time_start = time.time() - for i_k, node_id in enumerate(rows.keys()): - if i_k % 100 == 1: - dt = time.time() - time_start - eta = dt / i_k * len(rows) - dt - print("%d / %d - %.3fs -> %.3fs " % (i_k, len(rows), dt, eta), - end="\r") - - parent_id = rows[node_id][parent_column][0].value - - if node_id not in cg.get_children(parent_id): - failed_node_ids.append([node_id, parent_id]) - - return failed_node_ids - - -def family_consistency_test(table_id, n_threads=64): - """ Runs a simple test on the WHOLE graph - - tests: id in children(parent(id)) - - :param table_id: str - :param n_threads: int - :return: dict - n x 2 per layer - each failed pair: (node_id, parent_id) - """ - - cg = chunkedgraph.ChunkedGraph(table_id) - - failed_node_id_dict = {} - for layer_id in range(1, cg.n_layers): - print("\n\n Layer %d \n\n" % layer_id) - - step = int(cg.fan_out ** np.max([0, layer_id - 2])) - coords = list(itertools.product(range(0, 8, step), - range(0, 8, step), - range(0, 4, step))) - - multi_args = [] - for coord in coords: - multi_args.append([table_id, coord, layer_id]) - - collected_failed_node_ids = mu.multisubprocess_func( - _family_consistency_test_thread, multi_args, n_threads=n_threads) - - failed_node_ids = [] - for _failed_node_ids in collected_failed_node_ids: - failed_node_ids.extend(_failed_node_ids) - - failed_node_id_dict[layer_id] = np.array(failed_node_ids) - - print("\n%d nodes rows failed\n" % len(failed_node_ids)) - - return failed_node_id_dict - - -def children_test(table_id, layer, coord_list): - - cg = chunkedgraph.ChunkedGraph(table_id) - child_column = column_keys.Hierarchy.Child - - for coords in coord_list: - x, y, z = coords - - node_ids = cg.range_read_chunk(layer, x, y, z, columns=child_column) - all_children = [] - children_chunks = [] - for children in node_ids.values(): - children = children[0].value - for child in children: - all_children.append(child) - children_chunks.append(cg.get_chunk_id(child)) - - u_children_chunks, c_children_chunks = np.unique(children_chunks, - return_counts=True) - u_chunk_coords = [cg.get_chunk_coordinates(c) for c in u_children_chunks] - - print("\n--- Layer %d ---- [%d, %d, %d] ---" % (layer, x, y, z)) - print("N(all children): %d" % len(all_children)) - print("N(unique children): %d" % len(np.unique(all_children))) - print("N(unique children chunks): %d" % len(u_children_chunks)) - print("Unique children chunk coords", u_chunk_coords) - print("N(ids per unique children chunk):", c_children_chunks) - - -def root_cross_edge_test(node_id, table_id=None, cg=None): - if cg is None: - assert isinstance(table_id, str) - cg = chunkedgraph.ChunkedGraph(table_id) - - cross_edge_dict_layers = {} - cross_edge_dict_children = {} - for layer in range(2, cg.n_layers): - child_ids = cg.get_subgraph_nodes(node_id, return_layers=[layer]) - - cross_edge_dict = {} - child_reference_ids = [] - for child_id in child_ids: - cross_edge_dict = pychunkedgraph.backend.chunkedgraph_utils.combine_cross_chunk_edge_dicts(cross_edge_dict, cg.read_cross_chunk_edges(child_id)) - - cross_edge_dict_layers[layer] = cross_edge_dict - - for layer in cross_edge_dict_layers.keys(): - print("\n--------\n") - for i_layer in cross_edge_dict_layers[layer].keys(): - print(layer, i_layer, len(cross_edge_dict_layers[layer][i_layer])) - - return cross_edge_dict_layers diff --git a/pychunkedgraph/edge_gen/Dockerfile b/pychunkedgraph/edge_gen/Dockerfile deleted file mode 100644 index 33e07707f..000000000 --- a/pychunkedgraph/edge_gen/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -FROM python:3-alpine - -RUN apk add --no-cache --virtual .build-deps \ - curl \ - libc6-compat \ - git \ - gcc \ - g++ \ - linux-headers \ - jpeg-dev \ - mariadb-dev \ - && apk add --no-cache \ - libstdc++ \ - libjpeg-turbo \ - mariadb-connector-c \ - \ - # separate numpy install fixes cloudvolume bug - && pip install --no-cache-dir \ - numpy \ - && pip install --no-cache-dir --upgrade \ - cloud-volume \ - tenacity \ - networkx \ - google-cloud-bigtable \ - zstandard \ - mysqlclient \ - && mkdir /root/.cloudvolume \ - && ln -s /secrets /root/.cloudvolume/secrets \ - \ - && git clone "https://github.com/seung-lab/pychunkedgraph.git" /usr/local/pychunkedgraph \ - && rm -rf /usr/local/pychunkedgraph/.git \ - && apk del .build-deps \ - && find /usr/local -depth \ - \( \ - \( -type d -a \( -name __pycache__ \) \) \ - -o \ - \( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \ - \) -exec rm -rf '{}' + diff --git a/pychunkedgraph/edge_gen/edgetask.py b/pychunkedgraph/edge_gen/edgetask.py deleted file mode 100644 index 478b03434..000000000 --- a/pychunkedgraph/edge_gen/edgetask.py +++ /dev/null @@ -1,597 +0,0 @@ -import json -import os -import re -import sys -from copy import deepcopy -from functools import lru_cache -from itertools import chain -from operator import itemgetter -from typing import Iterable, Mapping, Tuple, Union - -from cloudvolume import CloudVolume, Storage - -import MySQLdb -import numpy as np -import zstandard as zstd - -sys.path.insert(0, os.path.join(sys.path[0], '..')) -from backend import chunkedgraph # noqa - -UINT64_ZERO = np.uint64(0) -UINT64_ONE = np.uint64(1) - - -class EdgeTask: - def __init__(self, - cgraph: chunkedgraph.ChunkedGraph, - mysql_conn: any, - agglomeration_input: CloudVolume, - watershed_input: CloudVolume, - regiongraph_input: Storage, - regiongraph_output: Storage, - regiongraph_chunksize: Tuple[int, int, int], - roi: Tuple[slice, slice, slice]): - self.__cgraph = cgraph - self.__mysql_conn = mysql_conn - self.__watershed = { - "cv_input": watershed_input, - "original": np.array([], dtype=np.uint64, ndmin=3), - "relabeled": np.array([], dtype=np.uint64, ndmin=3), - "rg2cg_complete": {}, - "rg2cg_boundary": {} - } - self.__agglomeration = { - "cv": agglomeration_input, - "original": np.array([], dtype=np.uint64, ndmin=3) - } - self.__regiongraph = { - "storage_in": regiongraph_input, - "storage_out": regiongraph_output, - "edges": {}, - "chunksize": regiongraph_chunksize, - "offset": self.__watershed["cv_input"].voxel_offset, - "maxlevel": int(np.ceil(np.log2(np.max(np.floor_divide( - self.__watershed["cv_input"].volume_size, regiongraph_chunksize))))) - } - self.__roi = roi - self.__watershed["original"] = self.__watershed["cv_input"][self.__roi] - self.__watershed["relabeled"] = np.empty_like(self.__watershed["original"]) - self.__agglomeration["original"] = \ - self.__agglomeration["cv"][self.__roi] - - def execute(self): - self.__relabel_cutout() - - self.__compute_cutout_regiongraph() - - return - - def get_relabeled_watershed(self): - return self.__watershed["relabeled"][0:-1, 0:-1, 0:-1, :] - - def __load_rg_chunkhierarchy_affinities(self): - """ - Collect all weighted edges from the Region Graph chunk hierarchy - within the ROI. - """ - - # Convert ROI (in voxel) to Region Graph chunk indices - chunk_range = tuple(map( - lambda x: - np.floor_divide( - np.maximum(0, np.subtract(x, self.__regiongraph["offset"])), - self.__regiongraph["chunksize"]), - ((self.__roi[0].start, self.__roi[1].start, self.__roi[2].start), - (self.__roi[0].stop, self.__roi[1].stop, self.__roi[2].stop)) - )) - - # TODO: Possible speedup by skipping high level chunks that don't - # intersect with ROI - edges = [] - for l in range(self.__regiongraph["maxlevel"] + 1): - for x in range(chunk_range[0][0], chunk_range[1][0] + 1): - for y in range(chunk_range[0][1], chunk_range[1][1] + 1): - for z in range(chunk_range[0][2], chunk_range[1][2] + 1): - print("Loading layer %i: (%i,%i,%i)" % (l, x, y, z)) - chunk_path = "edges_%i_%i_%i_%i.data.zst" % (l, x, y, z) - edges.append(load_rg_chunk_affinities( - self.__regiongraph["storage_in"], chunk_path) - ) - - chunk_range = (chunk_range[0] // 2, chunk_range[1] // 2) - - print("Converting to Set") - return {e.item()[0:2]: e for e in chain(*edges)} - - def __load_cutout_labels_from_db(self): - chunks_to_fetch = [] - for x in range(self.__roi[0].start, self.__roi[0].stop, self.__cgraph.chunk_size[0]): - for y in range(self.__roi[1].start, self.__roi[1].stop, self.__cgraph.chunk_size[1]): - for z in range(self.__roi[2].start, self.__roi[2].stop, self.__cgraph.chunk_size[2]): - chunks_to_fetch.append(self.__cgraph.get_chunk_id_from_coord(1, x, y, z)) - - self.__mysql_conn.query("SELECT id, edges FROM chunkedges WHERE id IN (%s);" % ",".join(str(x) for x in chunks_to_fetch)) - res = self.__mysql_conn.store_result() - - chunk_labels = {x: {} for x in chunks_to_fetch} - for row in res.fetch_row(maxrows=0): - edges_iter = iter(np.frombuffer(row[1], dtype=np.uint64)) - chunk_labels[row[0]] = dict(zip(edges_iter, edges_iter)) - - self.__watershed["rg2cg_boundary"] = chunk_labels - self.__watershed["rg2cg_complete"] = deepcopy(self.__watershed["rg2cg_boundary"]) - - def __save_cutout_labels_to_db(self): - self.__mysql_conn.query("START TRANSACTION;") - - chunk_labels = self.__watershed["rg2cg_boundary"] - for chunk_id, mappings in chunk_labels.items(): - if len(mappings) > 0: - flat_binary_mapping = np.fromiter((item for k in mappings for item in (k, mappings[k])), dtype=np.uint64).tobytes() - flat_binary_mapping_escaped = self.__mysql_conn.escape_string(flat_binary_mapping) - self.__mysql_conn.query(b"INSERT INTO chunkedges (id, edges) VALUES (%i, \"%s\") ON DUPLICATE KEY UPDATE edges = VALUES(edges);" % (chunk_id, flat_binary_mapping_escaped)) - - self.__mysql_conn.query("COMMIT;") - - def __relabel_cutout(self): - # Load existing labels of center + neighboring chunks - self.__load_cutout_labels_from_db() - assigned_node_ids = {node_id for chunk_edges in self.__watershed["rg2cg_complete"].values() for node_id in chunk_edges.values()} - - def relabel_chunk(chunk_id: np.uint64, view_range: Tuple[slice, slice, slice]): - next_segment_id = UINT64_ONE - - original = np.nditer( - self.__watershed["original"][view_range], flags=['multi_index']) - relabeled = np.nditer(self.__watershed["relabeled"][view_range], flags=['multi_index'], op_flags=['writeonly']) - - print("Starting Loop for chunk %i" % chunk_id) - while not original.finished: - original_val = np.uint64(original[0]) - - if original_val == UINT64_ZERO: - # Don't relabel cell boundary (ID 0) - relabeled[0] = UINT64_ZERO - elif original_val in self.__watershed["rg2cg_complete"][chunk_id]: - # Already encountered this ID before. - relabeled[0] = relabeled_val = self.__watershed["rg2cg_complete"][chunk_id][original_val] - if original.multi_index[0] == 0 or \ - original.multi_index[1] == 0 or \ - original.multi_index[2] == 0: - self.__watershed["rg2cg_boundary"][chunk_id][original_val] = relabeled_val - else: - # Find new, unused node ID for this chunk. - while self.__cgraph.get_node_id( - segment_id=next_segment_id, - chunk_id=chunk_id) in assigned_node_ids: - next_segment_id += UINT64_ONE - - relabeled_val = self.__cgraph.get_node_id( - segment_id=next_segment_id, - chunk_id=chunk_id) - - relabeled[0] = relabeled_val - next_segment_id += UINT64_ONE - assigned_node_ids.add(relabeled_val) - - self.__watershed["rg2cg_complete"][chunk_id][original_val] = relabeled_val - if original.multi_index[0] == 0 or \ - original.multi_index[1] == 0 or \ - original.multi_index[2] == 0: - self.__watershed["rg2cg_boundary"][chunk_id][original_val] = relabeled_val - - original.iternext() - relabeled.iternext() - - for x_start in (0, int(self.__cgraph.chunk_size[0])): - for y_start in (0, int(self.__cgraph.chunk_size[1])): - for z_start in (0, int(self.__cgraph.chunk_size[2])): - x_end = x_start + int(self.__cgraph.chunk_size[0]) - y_end = y_start + int(self.__cgraph.chunk_size[1]) - z_end = z_start + int(self.__cgraph.chunk_size[2]) - - chunk_id = self.__cgraph.get_chunk_id_from_coord( - layer=1, - x=self.__roi[0].start + x_start, - y=self.__roi[1].start + y_start, - z=self.__roi[2].start + z_start) - - relabel_chunk(chunk_id, (slice(x_start, x_end), slice(y_start, y_end), slice(z_start, z_end))) - - self.__save_cutout_labels_to_db() - - def __compute_cutout_regiongraph(self): - edges_center_connected = np.array([]) - edges_center_disconnected = np.array([]) - isolated_sv = np.array([]) - edges_xplus_connected = np.array([]) - edges_xplus_disconnected = np.array([]) - edges_xplus_unbreakable = np.array([]) - edges_yplus_connected = np.array([]) - edges_yplus_disconnected = np.array([]) - edges_yplus_unbreakable = np.array([]) - edges_zplus_connected = np.array([]) - edges_zplus_disconnected = np.array([]) - edges_zplus_unbreakable = np.array([]) - - if np.any(self.__watershed["original"]): - # Download all region graph edges covering this part of the dataset - regiongraph_edges = self.__load_rg_chunkhierarchy_affinities() - - print("Calculating RegionGraph...") - - original = self.__watershed["original"] - agglomeration = self.__agglomeration["original"] - - # Shortcut to Original -> Relabeled supervoxel lookup table for the - # center chunk - rg2cg_center = self.__watershed["rg2cg_complete"][ - self.__cgraph.get_chunk_id_from_coord( - layer=1, - x=self.__roi[0].start, - y=self.__roi[1].start, - z=self.__roi[2].start)] - - # Original -> Relabeled supervoxel lookup table for chunk in X+ dir - rg2cg_xplus = self.__watershed["rg2cg_complete"][ - self.__cgraph.get_chunk_id_from_coord( - layer=1, - x=self.__roi[0].start + int(self.__cgraph.chunk_size[0]), - y=self.__roi[1].start, - z=self.__roi[2].start)] - - # Original -> Relabeled supervoxel lookup table for chunk in Y+ dir - rg2cg_yplus = self.__watershed["rg2cg_complete"][ - self.__cgraph.get_chunk_id_from_coord( - layer=1, - x=self.__roi[0].start, - y=self.__roi[1].start + int(self.__cgraph.chunk_size[1]), - z=self.__roi[2].start)] - - # Original -> Relabeled supervoxel lookup table for chunk in Z+ dir - rg2cg_zplus = self.__watershed["rg2cg_complete"][ - self.__cgraph.get_chunk_id_from_coord( - layer=1, - x=self.__roi[0].start, - y=self.__roi[1].start, - z=self.__roi[2].start + int(self.__cgraph.chunk_size[2]))] - - # Mask unsegmented voxel (ID=0) and voxel not at a supervoxel - # boundary in X-direction - sv_boundaries_x = \ - (original[:-1, :, :] != UINT64_ZERO) & (original[1:, :, :] != UINT64_ZERO) & \ - (original[:-1, :, :] != original[1:, :, :]) - - # Mask voxel that are not at an agglomeration boundary in X-direction - agg_boundaries_x = (agglomeration[:-1, :, :] == agglomeration[1:, :, :]) - - # Mask unsegmented voxel (ID=0) and voxel not at a supervoxel - # boundary in Y-direction - sv_boundaries_y = \ - (original[:, :-1, :] != UINT64_ZERO) & (original[:, 1:, :] != UINT64_ZERO) & \ - (original[:, :-1, :] != original[:, 1:, :]) - - # Mask voxel that are not at an agglomeration boundary in Y-direction - agg_boundaries_y = (agglomeration[:, :-1, :] == agglomeration[:, 1:, :]) - - # Mask unsegmented voxel (ID=0) and voxel not at a supervoxel - # boundary in Z-direction - sv_boundaries_z = \ - (original[:, :, :-1] != UINT64_ZERO) & (original[:, :, 1:] != UINT64_ZERO) & \ - (original[:, :, :-1] != original[:, :, 1:]) - - # Mask voxel that are not at an agglomeration boundary in Z-direction - agg_boundaries_z = (agglomeration[:, :, :-1] == agglomeration[:, :, 1:]) - - # Center Chunk: - # Collect all unique pairs of adjacent supervoxel IDs from the original - # watershed labeling that are part of the same agglomeration. - # Note that edges are sorted (lower supervoxel ID comes first). - edges_center_connected = {x if x[0] < x[1] else (x[1], x[0]) for x in chain( - zip(original[:-2, :-1, :-1][sv_boundaries_x[:-1, :-1, :-1] & agg_boundaries_x[:-1, :-1, :-1]], - original[1:-1, :-1, :-1][sv_boundaries_x[:-1, :-1, :-1] & agg_boundaries_x[:-1, :-1, :-1]]), - zip(original[:-1, :-2, :-1][sv_boundaries_y[:-1, :-1, :-1] & agg_boundaries_y[:-1, :-1, :-1]], - original[:-1, 1:-1, :-1][sv_boundaries_y[:-1, :-1, :-1] & agg_boundaries_y[:-1, :-1, :-1]]), - zip(original[:-1, :-1, :-2][sv_boundaries_z[:-1, :-1, :-1] & agg_boundaries_z[:-1, :-1, :-1]], - original[:-1, :-1, 1:-1][sv_boundaries_z[:-1, :-1, :-1] & agg_boundaries_z[:-1, :-1, :-1]]))} - - # Look up the affinity information for each edge and replace - # original supervoxel IDs with relabeled IDs - if edges_center_connected: - edges_center_connected = np.array([ - (*sorted(itemgetter(x[0], x[1])(rg2cg_center)), x[2], x[3]) - for x in [regiongraph_edges[e] for e in edges_center_connected] - ], dtype='uint64, uint64, float32, uint64') - else: - edges_center_connected = np.array([], dtype='uint64, uint64, float32, uint64') - - # Collect all unique pairs of adjacent supervoxel IDs from the original - # watershed labeling that are NOT part of the same agglomeration. - edges_center_disconnected = {x if x[0] < x[1] else (x[1], x[0]) for x in chain( - zip(original[:-2, :-1, :-1][sv_boundaries_x[:-1, :-1, :-1] & ~agg_boundaries_x[:-1, :-1, :-1]], - original[1:-1, :-1, :-1][sv_boundaries_x[:-1, :-1, :-1] & ~agg_boundaries_x[:-1, :-1, :-1]]), - zip(original[:-1, :-2, :-1][sv_boundaries_y[:-1, :-1, :-1] & ~agg_boundaries_y[:-1, :-1, :-1]], - original[:-1, 1:-1, :-1][sv_boundaries_y[:-1, :-1, :-1] & ~agg_boundaries_y[:-1, :-1, :-1]]), - zip(original[:-1, :-1, :-2][sv_boundaries_z[:-1, :-1, :-1] & ~agg_boundaries_z[:-1, :-1, :-1]], - original[:-1, :-1, 1:-1][sv_boundaries_z[:-1, :-1, :-1] & ~agg_boundaries_z[:-1, :-1, :-1]]))} - - # Look up the affinity information for each edge and replace - # original supervoxel IDs with relabeled IDs - if edges_center_disconnected: - edges_center_disconnected = np.array([ - (*sorted(itemgetter(x[0], x[1])(rg2cg_center)), x[2], x[3]) - for x in [regiongraph_edges[e] for e in edges_center_disconnected] - ], dtype='uint64, uint64, float32, uint64') - else: - edges_center_disconnected = np.array([], dtype='uint64, uint64, float32, uint64') - - # Check if there are supervoxel that are not connected to any other - # supervoxel - surrounded by ID 0 - isolated_sv = set(rg2cg_center.values()) - for e in chain(edges_center_connected, edges_center_disconnected): - isolated_sv.discard(e[0]) - isolated_sv.discard(e[1]) - isolated_sv = np.array(list(isolated_sv), dtype=np.uint64) - - # XPlus Chunk: - # Collect edges between center chunk and the chunk in X+ direction. - # Slightly different approach because the relabeling lookup needs - # to be done for two different dictionaries. Slower, but fast enough - # due to far fewer edges near the boundary. - # Node ID layout guarantees that center chunk IDs are always smaller - # than IDs of positive neighboring chunks. - edges_xplus_connected = np.array(list({ - (rg2cg_center[x[0]], - rg2cg_xplus[x[1]], - *regiongraph_edges[x if x[0] < x[1] else (x[1], x[0])].item()[2:]) for x in zip( - original[-2:-1, :-1, :-1][sv_boundaries_x[-1:, :-1, :-1] & agg_boundaries_x[-1:, :-1, :-1]], - original[-1:, :-1, :-1][sv_boundaries_x[-1:, :-1, :-1] & agg_boundaries_x[-1:, :-1, :-1]]) - }), dtype='uint64, uint64, float32, uint64') - - edges_xplus_disconnected = np.array(list({ - (rg2cg_center[x[0]], - rg2cg_xplus[x[1]], - *regiongraph_edges[x if x[0] < x[1] else (x[1], x[0])].item()[2:]) for x in zip( - original[-2:-1, :-1, :-1][sv_boundaries_x[-1:, :-1, :-1] & ~agg_boundaries_x[-1:, :-1, :-1]], - original[-1:, :-1, :-1][sv_boundaries_x[-1:, :-1, :-1] & ~agg_boundaries_x[-1:, :-1, :-1]]) - }), dtype='uint64, uint64, float32, uint64') - - # Unbreakable edges (caused by relabeling and chunking) don't have - # sum of area or affinity values - edges_xplus_unbreakable = np.array(list({ - (rg2cg_center[x], rg2cg_xplus[x]) for x in np.unique( - original[-2:-1, :-1, :-1][(original[-2:-1, :-1, :-1] != UINT64_ZERO) & - (original[-2:-1, :-1, :-1] == original[-1:, :-1, :-1])]) - }), dtype='uint64, uint64') - - # YPlus Chunk: - # Collect edges between center chunk and the chunk in Y+ direction. - edges_yplus_connected = np.array(list({ - (rg2cg_center[x[0]], - rg2cg_yplus[x[1]], - *regiongraph_edges[x if x[0] < x[1] else (x[1], x[0])].item()[2:]) for x in zip( - original[:-1, -2:-1, :-1][sv_boundaries_y[:-1, -1:, :-1] & agg_boundaries_y[:-1, -1:, :-1]], - original[:-1, -1:, :-1][sv_boundaries_y[:-1, -1:, :-1] & agg_boundaries_y[:-1, -1:, :-1]]) - }), dtype='uint64, uint64, float32, uint64') - - edges_yplus_disconnected = np.array(list({ - (rg2cg_center[x[0]], - rg2cg_yplus[x[1]], - *regiongraph_edges[x if x[0] < x[1] else (x[1], x[0])].item()[2:]) for x in zip( - original[:-1, -2:-1, :-1][sv_boundaries_y[:-1, -1:, :-1] & ~agg_boundaries_y[:-1, -1:, :-1]], - original[:-1, -1:, :-1][sv_boundaries_y[:-1, -1:, :-1] & ~agg_boundaries_y[:-1, -1:, :-1]]) - }), dtype='uint64, uint64, float32, uint64') - - edges_yplus_unbreakable = np.array(list({ - (rg2cg_center[x], rg2cg_yplus[x]) for x in np.unique( - original[:-1, -2:-1, :-1][(original[:-1, -2:-1, :-1] != UINT64_ZERO) & - (original[:-1, -2:-1, :-1] == original[:-1, -1:, :-1])]) - }), dtype='uint64, uint64') - - # ZPlus Chunk - # Collect edges between center chunk and the chunk in Z+ direction. - edges_zplus_connected = np.array(list({ - (rg2cg_center[x[0]], - rg2cg_zplus[x[1]], - *regiongraph_edges[x if x[0] < x[1] else (x[1], x[0])].item()[2:]) for x in zip( - original[:-1, :-1, -2:-1][sv_boundaries_z[:-1, :-1, -1:] & agg_boundaries_z[:-1, :-1, -1:]], - original[:-1, :-1, -1:][sv_boundaries_z[:-1, :-1, -1:] & agg_boundaries_z[:-1, :-1, -1:]]) - }), dtype='uint64, uint64, float32, uint64') - - edges_zplus_disconnected = np.array(list({ - (rg2cg_center[x[0]], - rg2cg_zplus[x[1]], - *regiongraph_edges[x if x[0] < x[1] else (x[1], x[0])].item()[2:]) for x in zip( - original[:-1, :-1, -2:-1][sv_boundaries_z[:-1, :-1, -1:] & ~agg_boundaries_z[:-1, :-1, -1:]], - original[:-1, :-1, -1:][sv_boundaries_z[:-1, :-1, -1:] & ~agg_boundaries_z[:-1, :-1, -1:]]) - }), dtype='uint64, uint64, float32, uint64') - - edges_zplus_unbreakable = np.array(list({ - (rg2cg_center[x], rg2cg_zplus[x]) for x in np.unique( - original[:-1, :-1, -2:-1][(original[:-1, :-1, -2:-1] != UINT64_ZERO) & - (original[:-1, :-1, -2:-1] == original[:-1, :-1, -1:])]) - }), dtype='uint64, uint64') - else: - print("Fast skipping Regiongraph calculation - empty block") - - # Prepare upload - rg2cg_center_str = slice_to_str( - slice(self.__roi[x].start, - self.__roi[x].start + int(self.__cgraph.chunk_size[x])) for x in range(3)) - - rg2cg_xplus_str = slice_to_str(( - slice(self.__roi[0].start + int(self.__cgraph.chunk_size[0]) - 1, - self.__roi[0].start + int(self.__cgraph.chunk_size[0]) + 1), - slice(self.__roi[1].start, self.__roi[1].start + int(self.__cgraph.chunk_size[1])), - slice(self.__roi[2].start, self.__roi[2].start + int(self.__cgraph.chunk_size[2])))) - - rg2cg_yplus_str = slice_to_str(( - slice(self.__roi[0].start, self.__roi[0].start + int(self.__cgraph.chunk_size[0])), - slice(self.__roi[1].start + int(self.__cgraph.chunk_size[1]) - 1, - self.__roi[1].start + int(self.__cgraph.chunk_size[1]) + 1), - slice(self.__roi[2].start, self.__roi[2].start + int(self.__cgraph.chunk_size[2])))) - - rg2cg_zplus_str = slice_to_str(( - slice(self.__roi[0].start, self.__roi[0].start + int(self.__cgraph.chunk_size[0])), - slice(self.__roi[1].start, self.__roi[1].start + int(self.__cgraph.chunk_size[1])), - slice(self.__roi[2].start + int(self.__cgraph.chunk_size[2]) - 1, - self.__roi[2].start + int(self.__cgraph.chunk_size[2]) + 1))) - - print("Uploading edges") - self.__regiongraph["storage_out"].put_files( - files=[(rg2cg_center_str + '_connected.bin', edges_center_connected.tobytes()), - (rg2cg_center_str + '_disconnected.bin', edges_center_disconnected.tobytes()), - (rg2cg_center_str + '_isolated.bin', isolated_sv.tobytes()), - (rg2cg_xplus_str + '_connected.bin', edges_xplus_connected.tobytes()), - (rg2cg_xplus_str + '_disconnected.bin', edges_xplus_disconnected.tobytes()), - (rg2cg_xplus_str + '_unbreakable.bin', edges_xplus_unbreakable.tobytes()), - (rg2cg_yplus_str + '_connected.bin', edges_yplus_connected.tobytes()), - (rg2cg_yplus_str + '_disconnected.bin', edges_yplus_disconnected.tobytes()), - (rg2cg_yplus_str + '_unbreakable.bin', edges_yplus_unbreakable.tobytes()), - (rg2cg_zplus_str + '_connected.bin', edges_zplus_connected.tobytes()), - (rg2cg_zplus_str + '_disconnected.bin', edges_zplus_disconnected.tobytes()), - (rg2cg_zplus_str + '_unbreakable.bin', edges_zplus_unbreakable.tobytes())], - content_type='application/octet-stream') - print("Done") - - -@lru_cache(maxsize=32) -def load_rg_chunk_affinities(regiongraph_storage: Storage, chunk_path: str) -> np.ndarray: - """ - Extract weighted supervoxel edges from zstd compressed Region Graph - file `chunk_path`. - The unversioned, custom binary file format shall be called RanStruct, - which, as of 2018-08-03, looks like this: - - struct RanStruct # Little Endian, not aligned -> 56 Byte - segA1::UInt64 - segB1::UInt64 - sum_aff1::Float32 - sum_area1::UInt64 - segA2::UInt64 # same as segA1 - segB2::UInt64 # same as segB1 - sum_aff2::Float32 # same as sum_aff1 - sum_area2::UInt64 # same as sum_area1 - end - - The big top level Region Graph chunks get requested almost every time, - thus the memoization. - """ - - f = regiongraph_storage.get_file(chunk_path) - if not f: - Warning("%s doesn't exist" % chunk_path) - return np.array([], dtype='uint64, uint64, float32, uint64') - - dctx = zstd.ZstdDecompressor() - decompressed = dctx.decompress(f) - - buf = np.frombuffer(decompressed, dtype='uint64, uint64, float32, uint64') - return np.lib.stride_tricks.as_strided( - buf, - shape=tuple(x//2 for x in buf.shape), - strides=tuple(x*2 for x in buf.strides), - writeable=False - ) - - -def str_to_slice(slice_str: str) -> Tuple[slice, slice, slice]: - match = re.match(r"(\d+)-(\d+)_(\d+)-(\d+)_(\d+)-(\d+)", slice_str) - return (slice(int(match.group(1)), int(match.group(2))), - slice(int(match.group(3)), int(match.group(4))), - slice(int(match.group(5)), int(match.group(6)))) - - -def slice_to_str(slices: Union[slice, Iterable[slice]]) -> str: - if isinstance(slices, slice): - return "%d-%d" % (slices.start, slices.stop) - else: - return '_'.join(map(slice_to_str, slices)) - - -def run_task_bundle(settings: Mapping, roi: Tuple[slice, slice, slice]): - # Remember: DB must be cleared before starting a whole new run - with open("/secrets/mysql") as passwd: - mysql_conn = MySQLdb.connect( - host=settings["mysql"]["host"], - user=settings["mysql"]["user"], - db=settings["mysql"]["db"], - passwd=passwd.read().strip() - ) - - cgraph = chunkedgraph.ChunkedGraph( - table_id=settings["chunkedgraph"]["table_id"], - instance_id=settings["chunkedgraph"]["instance_id"] - ) - - # Things to check: - # - Agglomeration and Input Watershed have the same offset/size - # - Taskbundle Offset and ROI is a multiple of cgraph.chunksize - # - Output Watershed chunksize must be a multiple of cgraph.chunksize - - agglomeration_input = CloudVolume( - settings["layers"]["agglomeration_path_input"], bounded=False) - watershed_input = CloudVolume( - settings["layers"]["watershed_path_input"], bounded=False) - watershed_output = CloudVolume( - settings["layers"]["watershed_path_output"], bounded=False, autocrop=True) - regiongraph_input = Storage( - settings["regiongraph"]["regiongraph_path_input"]) - regiongraph_output = Storage( - settings["regiongraph"]["regiongraph_path_output"]) - regiongraph_chunksize = tuple(settings["regiongraph"]["chunksize"]) - - chunkgraph_chunksize = np.array(cgraph.chunk_size, dtype=np.int) - output_watershed_chunksize = np.array(watershed_output.underlying, dtype=np.int) - outer_chunksize = np.maximum(chunkgraph_chunksize, output_watershed_chunksize, dtype=np.int) - - # Iterate through TaskBundle using a minimal chunk size that is a multiple - # of the output watershed chunk size and the Chunked Graph chunk size. - for ox in range(roi[0].start, roi[0].stop, outer_chunksize[0]): - for oy in range(roi[1].start, roi[1].stop, outer_chunksize[1]): - for oz in range(roi[2].start, roi[2].stop, outer_chunksize[2]): - - watershed_output_buffer = np.zeros((*outer_chunksize, 1), dtype=np.uint64) - - # Iterate through ChunkGraph chunk-sized tasks: - for ix_start in range(0, outer_chunksize[0], chunkgraph_chunksize[0]): - for iy_start in range(0, outer_chunksize[1], chunkgraph_chunksize[1]): - for iz_start in range(0, outer_chunksize[2], chunkgraph_chunksize[2]): - ix_end = ix_start + chunkgraph_chunksize[0] - iy_end = iy_start + chunkgraph_chunksize[1] - iz_end = iz_start + chunkgraph_chunksize[2] - - # One voxel overlap in each dimension to get - # consistent labeling across chunks - edgetask_roi = (slice(ox + ix_start, ox + ix_end + 1), - slice(oy + iy_start, oy + iy_end + 1), - slice(oz + iz_start, oz + iz_end + 1)) - - edgetask = EdgeTask( - cgraph=cgraph, - mysql_conn=mysql_conn, - agglomeration_input=agglomeration_input, - watershed_input=watershed_input, - regiongraph_input=regiongraph_input, - regiongraph_output=regiongraph_output, - regiongraph_chunksize=regiongraph_chunksize, - roi=edgetask_roi - ) - edgetask.execute() - - # Write relabeled ChunkGraph chunk to (possibly larger) - # watershed-chunk aligned buffer - watershed_output_buffer[ix_start:ix_end, - iy_start:iy_end, - iz_start:iz_end, :] = \ - edgetask.get_relabeled_watershed() - - watershed_output[ox:ox + outer_chunksize[0], - oy:oy + outer_chunksize[1], - oz:oz + outer_chunksize[2], :] = \ - watershed_output_buffer - - -if __name__ == "__main__": - params = json.loads(sys.argv[1]) - run_task_bundle(params, str_to_slice(sys.argv[2])) diff --git a/pychunkedgraph/edge_gen/requirements.txt b/pychunkedgraph/edge_gen/requirements.txt deleted file mode 100644 index a35e3b27a..000000000 --- a/pychunkedgraph/edge_gen/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -cloud-volume -mysqlclient -zstandard diff --git a/pychunkedgraph/rechunking/__init__.py b/pychunkedgraph/rechunking/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/pychunkedgraph/rechunking/transformer.py b/pychunkedgraph/rechunking/transformer.py deleted file mode 100644 index 5f4ed3707..000000000 --- a/pychunkedgraph/rechunking/transformer.py +++ /dev/null @@ -1,209 +0,0 @@ -import itertools -import numpy as np -import glob -import os -import time - -import cloudvolume - -from pychunkedgraph.creator import creator_utils -from multiwrapper import multiprocessing_utils as mu - - -def _rewrite_segmentation_thread(args): - file_paths, from_url, to_url = args - - from_cv = cloudvolume.CloudVolume(from_url) - to_cv = cloudvolume.CloudVolume(to_url, bounded=False) - - assert 'svenmd' in to_url - - n_file_paths = len(file_paths) - - time_start = time.time() - for i_fp, fp in enumerate(file_paths): - if i_fp % 10 == 5: - dt = time.time() - time_start - eta = dt / i_fp * n_file_paths - dt - print("%d / %d - dt: %.3fs - eta: %.3fs" % - (i_fp, n_file_paths, dt, eta)) - - rewrite_single_segmentation_block(fp, from_cv=from_cv, to_cv=to_cv) - - -def rewrite_single_segmentation_block(file_path, from_cv=None, to_cv=None, - from_url=None, to_url=None): - if from_cv is None: - assert from_url is not None - from_cv = cloudvolume.CloudVolume(from_url) - - if to_cv is None: - assert to_url is not None - assert 'svenmd' in to_url - to_cv = cloudvolume.CloudVolume(to_url, bounded=False) - - dx, dy, dz, _ = os.path.basename(file_path).split("_") - - x_start, x_end = np.array(dx.split("-"), dtype=np.int) - y_start, y_end = np.array(dy.split("-"), dtype=np.int) - z_start, z_end = np.array(dz.split("-"), dtype=np.int) - - bbox = to_cv.bounds.to_list()[3:] - if x_end > bbox[0]: - x_end = bbox[0] - - if y_end > bbox[1]: - y_end = bbox[1] - - if z_end > bbox[2]: - z_end = bbox[2] - - seg = from_cv[x_start: x_end, y_start: y_end, z_start: z_end] - mapping = creator_utils.read_mapping_h5(file_path) - - if 0 in seg and not 0 in mapping[:, 0]: - mapping = np.concatenate(([np.array([[0, 0]], dtype=np.uint64), mapping])) - - sort_idx = np.argsort(mapping[:, 0]) - idx = np.searchsorted(mapping[:, 0], seg, sorter=sort_idx) - out = np.asarray(mapping[:, 1])[sort_idx][idx] - - # print(out.shape, x_start, x_end, y_start, y_end, z_start, z_end) - to_cv[x_start: x_end, y_start: y_end, z_start: z_end] = out - - -def rewrite_segmentation(dataset_name, n_threads=64, n_units_per_thread=None): - if dataset_name == "pinky": - cv_url = "gs://nkem/pinky40_v11/mst_trimmed_sem_remap/region_graph/" - from_url = "gs://neuroglancer/pinky40_v11/watershed/" - to_url = "gs://neuroglancer/svenmd/pinky40_v11/watershed/" - elif dataset_name == "basil": - cv_url = "gs://nkem/basil_4k_oldnet/region_graph/" - from_url = "gs://neuroglancer/ranl/basil_4k_oldnet/ws/" - to_url = "gs://neuroglancer/svenmd/basil_4k_oldnet_cg/watershed/" - else: - raise Exception("Dataset unknown") - - file_paths = np.sort(glob.glob(creator_utils.dir_from_layer_name( - creator_utils.layer_name_from_cv_url(cv_url)) + "/*rg2cg*")) - - if n_units_per_thread is None: - file_path_blocks = np.array_split(file_paths, n_threads*3) - else: - n_blocks = int(np.ceil(len(file_paths) / n_units_per_thread)) - file_path_blocks = np.array_split(file_paths, n_blocks) - - multi_args = [] - for fp_block in file_path_blocks: - multi_args.append([fp_block, from_url, to_url]) - - # Run parallelizing - if n_threads == 1: - mu.multiprocess_func(_rewrite_segmentation_thread, multi_args, - n_threads=n_threads, verbose=True, - debug=n_threads == 1) - else: - mu.multisubprocess_func(_rewrite_segmentation_thread, multi_args, - n_threads=n_threads) - - -def _rewrite_image_thread(args): - start_coordinates, end_coordinates, block_size, from_url, to_url, mip = args - - from_cv = cloudvolume.CloudVolume(from_url, mip=mip) - to_cv = cloudvolume.CloudVolume(to_url, bounded=False, mip=mip) - - assert 'svenmd' in to_url - - coordinate_iter = itertools.product(np.arange(start_coordinates[0], end_coordinates[0], block_size[0]), - np.arange(start_coordinates[1], end_coordinates[1], block_size[1]), - np.arange(start_coordinates[2], end_coordinates[2], block_size[2])) - - for coordinate in coordinate_iter: - rewrite_single_image_block(coordinate, block_size, from_cv=from_cv, - to_cv=to_cv) - - -def rewrite_single_image_block(coordinate, block_size, from_cv=None, to_cv=None, - from_url=None, to_url=None, mip=None): - if from_cv is None: - assert from_url is not None and mip is not None - from_cv = cloudvolume.CloudVolume(from_url, mip=mip) - - if to_cv is None: - assert to_url is not None and mip is not None - assert 'svenmd' in to_url - to_cv = cloudvolume.CloudVolume(to_url, bounded=False, mip=mip, - compress=False) - - x_start = coordinate[0] - x_end = coordinate[0] + block_size[0] - y_start = coordinate[1] - y_end = coordinate[1] + block_size[1] - z_start = coordinate[2] - z_end = coordinate[2] + block_size[2] - - bbox = to_cv.bounds.to_list()[3:] - if x_end > bbox[0]: - x_end = bbox[0] - - if y_end > bbox[1]: - y_end = bbox[1] - - if z_end > bbox[2]: - z_end = bbox[2] - - print(x_start, y_start, z_start, x_end, y_end, z_end) - - img = from_cv[x_start: x_end, y_start: y_end, z_start: z_end] - to_cv[x_start: x_end, y_start: y_end, z_start: z_end] = img - - -def rechunk_dataset(dataset_name, block_size=(1024, 1024, 64), n_threads=64, - mip=0): - if dataset_name == "pinky40em": - from_url = "gs://neuroglancer/pinky40_v11/image_rechunked/" - to_url = "gs://neuroglancer/svenmd/pinky40_v11/image_512_512_32/" - elif dataset_name == "pinky100seg": - from_url = "gs://neuroglancer/nkem/pinky100_v0/ws/lost_no-random/bbox1_0/" - to_url = "gs://neuroglancer/svenmd/pinky100_v0/ws/lost_no-random/bbox1_0_64_64_16/" - elif dataset_name == "basil": - raise() - else: - raise Exception("Dataset unknown") - - from_cv = cloudvolume.CloudVolume(from_url, mip=mip) - - dataset_bounds = np.array(from_cv.bounds.to_list()) - block_size = np.array(list(block_size)) - - super_block_size = block_size * 2 - - coordinate_iter = itertools.product(np.arange(dataset_bounds[0], - dataset_bounds[3], - super_block_size[0]), - np.arange(dataset_bounds[1], - dataset_bounds[4], - super_block_size[1]), - np.arange(dataset_bounds[2], - dataset_bounds[5], - super_block_size[2])) - coordinates = np.array(list(coordinate_iter)) - - multi_args = [] - for coordinate in coordinates: - end_coordinate = coordinate + super_block_size - m = end_coordinate > dataset_bounds[3:] - end_coordinate[m] = dataset_bounds[3:][m] - - multi_args.append([coordinate, end_coordinate, block_size, - from_url, to_url, mip]) - - # Run parallelizing - if n_threads == 1: - mu.multiprocess_func(_rewrite_image_thread, multi_args, - n_threads=n_threads, verbose=True, - debug=n_threads == 1) - else: - mu.multisubprocess_func(_rewrite_image_thread, multi_args, - n_threads=n_threads) \ No newline at end of file From 1a17ec562278e6f47b459c05c0d12aefcb886536 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 4 Sep 2019 16:50:12 +0000 Subject: [PATCH 0184/1097] .devcontainer to gitignore and rename module --- .gitignore | 3 +- .../initialization/{create.py => base.py} | 2 +- .../ingest/initialization/hierarchy.py | 345 ++++++++++++++++++ pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 4 files changed, 349 insertions(+), 3 deletions(-) rename pychunkedgraph/ingest/initialization/{create.py => base.py} (98%) create mode 100644 pychunkedgraph/ingest/initialization/hierarchy.py diff --git a/.gitignore b/.gitignore index ef8107b32..b16998b36 100644 --- a/.gitignore +++ b/.gitignore @@ -111,4 +111,5 @@ venv.bak/ # local dev stuff output.txt -src/ \ No newline at end of file +src/ +/.devcontainer \ No newline at end of file diff --git a/pychunkedgraph/ingest/initialization/create.py b/pychunkedgraph/ingest/initialization/base.py similarity index 98% rename from pychunkedgraph/ingest/initialization/create.py rename to pychunkedgraph/ingest/initialization/base.py index 7ccf29760..c358b42f0 100644 --- a/pychunkedgraph/ingest/initialization/create.py +++ b/pychunkedgraph/ingest/initialization/base.py @@ -1,5 +1,5 @@ """ -Module for stuff related to creating the initial chunkedgraph +Functions for creating atomic nodes and their level 2 abstract parents """ import datetime diff --git a/pychunkedgraph/ingest/initialization/hierarchy.py b/pychunkedgraph/ingest/initialization/hierarchy.py new file mode 100644 index 000000000..3e0f0f992 --- /dev/null +++ b/pychunkedgraph/ingest/initialization/hierarchy.py @@ -0,0 +1,345 @@ +""" +Functions for creating parents in level 3 and above +""" + +import collections +import time +import datetime +from typing import Optional, Sequence + +import numpy as np +from multiwrapper import multiprocessing_utils as mu + +from pychunkedgraph.backend import flatgraph_utils +from pychunkedgraph.backend.chunkedgraph_utils import get_google_compatible_time_stamp +from pychunkedgraph.backend.utils import serializers, column_keys + + +def add_layer( + self, + layer_id: int, + child_chunk_coords: Sequence[Sequence[int]], + time_stamp: Optional[datetime.datetime] = None, + verbose: bool = True, + n_threads: int = 20, +) -> None: + """ Creates the abstract nodes for a given chunk in a given layer + :param layer_id: int + :param child_chunk_coords: int array of length 3 + coords in chunk space + :param time_stamp: datetime + :param verbose: bool + :param n_threads: in + """ + + def _read_subchunks_thread(chunk_coord): + # Get start and end key + x, y, z = chunk_coord + + columns = [column_keys.Hierarchy.Child] + [ + column_keys.Connectivity.CrossChunkEdge[l] + for l in range(layer_id - 1, self.n_layers) + ] + range_read = self.range_read_chunk(layer_id - 1, x, y, z, columns=columns) + + # Due to restarted jobs some nodes in the layer below might be + # duplicated. We want to ignore the earlier created node(s) because + # they belong to the failed job. We can find these duplicates only + # by comparing their children because each node has a unique id. + # However, we can use that more recently created nodes have higher + # segment ids (not true on root layer but we do not have that here. + # We are only interested in the latest version of any duplicated + # parents. + + # Deserialize row keys and store child with highest id for + # comparison + row_cell_dict = {} + segment_ids = [] + row_ids = [] + max_child_ids = [] + for row_id, row_data in range_read.items(): + segment_id = self.get_segment_id(row_id) + + cross_edge_columns = { + k: v + for (k, v) in row_data.items() + if k.family_id == self.cross_edge_family_id + } + if cross_edge_columns: + row_cell_dict[row_id] = cross_edge_columns + + node_child_ids = row_data[column_keys.Hierarchy.Child][0].value + + max_child_ids.append(np.max(node_child_ids)) + segment_ids.append(segment_id) + row_ids.append(row_id) + + segment_ids = np.array(segment_ids, dtype=np.uint64) + row_ids = np.array(row_ids) + max_child_ids = np.array(max_child_ids, dtype=np.uint64) + + sorting = np.argsort(segment_ids)[::-1] + row_ids = row_ids[sorting] + max_child_ids = max_child_ids[sorting] + + counter = collections.defaultdict(int) + max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) + for i_row in range(len(max_child_ids)): + max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] + counter[max_child_ids[i_row]] += 1 + + # Filter last occurences (we inverted the list) of each node + m = max_child_ids_occ_so_far == 0 + row_ids = row_ids[m] + ll_node_ids.extend(row_ids) + + # Loop through nodes from this chunk + for row_id in row_ids: + if row_id in row_cell_dict: + cross_edge_dict[row_id] = {} + + cell_family = row_cell_dict[row_id] + + for l in range(layer_id - 1, self.n_layers): + row_key = column_keys.Connectivity.CrossChunkEdge[l] + if row_key in cell_family: + cross_edge_dict[row_id][l] = cell_family[row_key][0].value + + if int(layer_id - 1) in cross_edge_dict[row_id]: + atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] + + if len(atomic_cross_edges) > 0: + atomic_partner_id_dict[row_id] = atomic_cross_edges[:, 1] + + new_pairs = zip( + atomic_cross_edges[:, 0], [row_id] * len(atomic_cross_edges) + ) + atomic_child_id_dict_pairs.extend(new_pairs) + + def _resolve_cross_chunk_edges_thread(args) -> None: + start, end = args + + for i_child_key, child_key in enumerate(atomic_partner_id_dict_keys[start:end]): + this_atomic_partner_ids = atomic_partner_id_dict[child_key] + + partners = { + atomic_child_id_dict[atomic_cross_id] + for atomic_cross_id in this_atomic_partner_ids + if atomic_child_id_dict[atomic_cross_id] != 0 + } + + if len(partners) > 0: + partners = np.array(list(partners), dtype=np.uint64)[:, None] + + this_ids = np.array([child_key] * len(partners), dtype=np.uint64)[ + :, None + ] + these_edges = np.concatenate([this_ids, partners], axis=1) + + edge_ids.extend(these_edges) + + def _write_out_connected_components(args) -> None: + start, end = args + + # Collect cc info + parent_layer_ids = range(layer_id, self.n_layers + 1) + cc_connections = {l: [] for l in parent_layer_ids} + for i_cc, cc in enumerate(ccs[start:end]): + node_ids = unique_graph_ids[cc] + + parent_cross_edges = collections.defaultdict(list) + + # Collect row info for nodes that are in this chunk + for node_id in node_ids: + if node_id in cross_edge_dict: + # Extract edges relevant to this node + for l in range(layer_id, self.n_layers): + if ( + l in cross_edge_dict[node_id] + and len(cross_edge_dict[node_id][l]) > 0 + ): + parent_cross_edges[l].append(cross_edge_dict[node_id][l]) + + if self.use_skip_connections and len(node_ids) == 1: + for l in parent_layer_ids: + if l == self.n_layers or len(parent_cross_edges[l]) > 0: + cc_connections[l].append([node_ids, parent_cross_edges]) + break + else: + cc_connections[layer_id].append([node_ids, parent_cross_edges]) + + # Write out cc info + rows = [] + + # Iterate through layers + for parent_layer_id in parent_layer_ids: + if len(cc_connections[parent_layer_id]) == 0: + continue + + parent_chunk_id = parent_chunk_id_dict[parent_layer_id] + reserved_parent_ids = self.get_unique_node_id_range( + parent_chunk_id, step=len(cc_connections[parent_layer_id]) + ) + + for i_cc, cc_info in enumerate(cc_connections[parent_layer_id]): + node_ids, parent_cross_edges = cc_info + + parent_id = reserved_parent_ids[i_cc] + val_dict = {column_keys.Hierarchy.Parent: parent_id} + + for node_id in node_ids: + rows.append( + self.mutate_row( + serializers.serialize_uint64(node_id), + val_dict, + time_stamp=time_stamp, + ) + ) + + val_dict = {column_keys.Hierarchy.Child: node_ids} + for l in range(parent_layer_id, self.n_layers): + if l in parent_cross_edges and len(parent_cross_edges[l]) > 0: + val_dict[ + column_keys.Connectivity.CrossChunkEdge[l] + ] = np.concatenate(parent_cross_edges[l]) + + rows.append( + self.mutate_row( + serializers.serialize_uint64(parent_id), + val_dict, + time_stamp=time_stamp, + ) + ) + + if len(rows) > 100000: + self.bulk_write(rows) + rows = [] + + if len(rows) > 0: + self.bulk_write(rows) + + if time_stamp is None: + time_stamp = datetime.datetime.utcnow() + + if time_stamp.tzinfo is None: + time_stamp = UTC.localize(time_stamp) + + # Comply to resolution of BigTables TimeRange + time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) + + # 1 -------------------------------------------------------------------- + # The first part is concerned with reading data from the child nodes + # of this layer and pre-processing it for the second part + + time_start = time.time() + + atomic_partner_id_dict = {} + cross_edge_dict = {} + atomic_child_id_dict_pairs = [] + ll_node_ids = [] + + multi_args = child_chunk_coords + n_jobs = np.min([n_threads, len(multi_args)]) + + if n_jobs > 0: + mu.multithread_func(_read_subchunks_thread, multi_args, n_threads=n_jobs) + + d = dict(atomic_child_id_dict_pairs) + atomic_child_id_dict = collections.defaultdict(np.uint64, d) + ll_node_ids = np.array(ll_node_ids, dtype=np.uint64) + + if verbose: + self.logger.debug( + "Time iterating through subchunks: %.3fs" % (time.time() - time_start) + ) + time_start = time.time() + + # Extract edges from remaining cross chunk edges + # and maintain unused cross chunk edges + edge_ids = [] + # u_atomic_child_ids = np.unique(atomic_child_ids) + atomic_partner_id_dict_keys = np.array( + list(atomic_partner_id_dict.keys()), dtype=np.uint64 + ) + + if n_threads > 1: + n_jobs = n_threads * 3 # Heuristic + else: + n_jobs = 1 + + n_jobs = np.min([n_jobs, len(atomic_partner_id_dict_keys)]) + + if n_jobs > 0: + spacing = np.linspace(0, len(atomic_partner_id_dict_keys), n_jobs + 1).astype( + np.int + ) + starts = spacing[:-1] + ends = spacing[1:] + + multi_args = list(zip(starts, ends)) + + mu.multithread_func( + _resolve_cross_chunk_edges_thread, multi_args, n_threads=n_threads + ) + + if verbose: + self.logger.debug( + "Time resolving cross chunk edges: %.3fs" % (time.time() - time_start) + ) + time_start = time.time() + + # 2 -------------------------------------------------------------------- + # The second part finds connected components, writes the parents to + # BigTable and updates the childs + + # Make parent id creation easier + x, y, z = np.min(child_chunk_coords, axis=0) // self.fan_out + chunk_id = self.get_chunk_id(layer=layer_id, x=x, y=y, z=z) + + parent_chunk_id_dict = self.get_parent_chunk_id_dict(chunk_id) + + # Extract connected components + isolated_node_mask = ~np.in1d(ll_node_ids, np.unique(edge_ids)) + add_node_ids = ll_node_ids[isolated_node_mask].squeeze() + add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T + edge_ids.extend(add_edge_ids) + + graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True + ) + + ccs = flatgraph_utils.connected_components(graph) + + if verbose: + self.logger.debug( + "Time connected components: %.3fs" % (time.time() - time_start) + ) + time_start = time.time() + + # Add rows for nodes that are in this chunk + # a connected component at a time + if n_threads > 1: + n_jobs = n_threads * 3 # Heuristic + else: + n_jobs = 1 + + n_jobs = np.min([n_jobs, len(ccs)]) + + spacing = np.linspace(0, len(ccs), n_jobs + 1).astype(np.int) + starts = spacing[:-1] + ends = spacing[1:] + + multi_args = list(zip(starts, ends)) + + mu.multithread_func( + _write_out_connected_components, multi_args, n_threads=n_threads + ) + + if verbose: + self.logger.debug( + "Time writing %d connected components in layer %d: %.3fs" + % (len(ccs), layer_id, time.time() - time_start) + ) + + # to track worker completion + return str(layer_id) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 350c77a93..036ab96f2 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -20,7 +20,7 @@ from ..utils.redis import redis_job, REDIS_URL from . import ingestionmanager, ingestion_utils as iu -from .initialization.create import add_atomic_edges +from .initialization.base import add_atomic_edges from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES from ..backend.utils import basetypes from ..io.edges import put_chunk_edges From 5ba819a00b46a2a535e5337f417211b2a5b60188 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 4 Sep 2019 20:22:01 +0000 Subject: [PATCH 0185/1097] wip: refactor add_layer --- .../{hierarchy.py => abstract_layers.py} | 64 ++++--------------- .../{base.py => atomic_layer.py} | 17 +---- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 3 files changed, 14 insertions(+), 69 deletions(-) rename pychunkedgraph/ingest/initialization/{hierarchy.py => abstract_layers.py} (89%) rename pychunkedgraph/ingest/initialization/{base.py => atomic_layer.py} (89%) diff --git a/pychunkedgraph/ingest/initialization/hierarchy.py b/pychunkedgraph/ingest/initialization/abstract_layers.py similarity index 89% rename from pychunkedgraph/ingest/initialization/hierarchy.py rename to pychunkedgraph/ingest/initialization/abstract_layers.py index 3e0f0f992..1639f9bed 100644 --- a/pychunkedgraph/ingest/initialization/hierarchy.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -3,7 +3,6 @@ """ import collections -import time import datetime from typing import Optional, Sequence @@ -20,18 +19,8 @@ def add_layer( layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, - verbose: bool = True, n_threads: int = 20, ) -> None: - """ Creates the abstract nodes for a given chunk in a given layer - :param layer_id: int - :param child_chunk_coords: int array of length 3 - coords in chunk space - :param time_stamp: datetime - :param verbose: bool - :param n_threads: in - """ - def _read_subchunks_thread(chunk_coord): # Get start and end key x, y, z = chunk_coord @@ -218,21 +207,11 @@ def _write_out_connected_components(args) -> None: if len(rows) > 0: self.bulk_write(rows) - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, round_up=False) + time_stamp = _get_valid_timestamp(time_stamp) # 1 -------------------------------------------------------------------- # The first part is concerned with reading data from the child nodes # of this layer and pre-processing it for the second part - - time_start = time.time() - atomic_partner_id_dict = {} cross_edge_dict = {} atomic_child_id_dict_pairs = [] @@ -248,12 +227,6 @@ def _write_out_connected_components(args) -> None: atomic_child_id_dict = collections.defaultdict(np.uint64, d) ll_node_ids = np.array(ll_node_ids, dtype=np.uint64) - if verbose: - self.logger.debug( - "Time iterating through subchunks: %.3fs" % (time.time() - time_start) - ) - time_start = time.time() - # Extract edges from remaining cross chunk edges # and maintain unused cross chunk edges edge_ids = [] @@ -268,26 +241,17 @@ def _write_out_connected_components(args) -> None: n_jobs = 1 n_jobs = np.min([n_jobs, len(atomic_partner_id_dict_keys)]) - if n_jobs > 0: spacing = np.linspace(0, len(atomic_partner_id_dict_keys), n_jobs + 1).astype( np.int ) starts = spacing[:-1] ends = spacing[1:] - multi_args = list(zip(starts, ends)) - mu.multithread_func( _resolve_cross_chunk_edges_thread, multi_args, n_threads=n_threads ) - if verbose: - self.logger.debug( - "Time resolving cross chunk edges: %.3fs" % (time.time() - time_start) - ) - time_start = time.time() - # 2 -------------------------------------------------------------------- # The second part finds connected components, writes the parents to # BigTable and updates the childs @@ -310,12 +274,6 @@ def _write_out_connected_components(args) -> None: ccs = flatgraph_utils.connected_components(graph) - if verbose: - self.logger.debug( - "Time connected components: %.3fs" % (time.time() - time_start) - ) - time_start = time.time() - # Add rows for nodes that are in this chunk # a connected component at a time if n_threads > 1: @@ -328,18 +286,20 @@ def _write_out_connected_components(args) -> None: spacing = np.linspace(0, len(ccs), n_jobs + 1).astype(np.int) starts = spacing[:-1] ends = spacing[1:] - multi_args = list(zip(starts, ends)) - mu.multithread_func( _write_out_connected_components, multi_args, n_threads=n_threads ) - - if verbose: - self.logger.debug( - "Time writing %d connected components in layer %d: %.3fs" - % (len(ccs), layer_id, time.time() - time_start) - ) - # to track worker completion return str(layer_id) + + +def _get_valid_timestamp(timestamp): + if timestamp is None: + timestamp = datetime.datetime.utcnow() + + if timestamp.tzinfo is None: + timestamp = pytz.UTC.localize(timestamp) + + # Comply to resolution of BigTables TimeRange + return get_google_compatible_time_stamp(timestamp, round_up=False) diff --git a/pychunkedgraph/ingest/initialization/base.py b/pychunkedgraph/ingest/initialization/atomic_layer.py similarity index 89% rename from pychunkedgraph/ingest/initialization/base.py rename to pychunkedgraph/ingest/initialization/atomic_layer.py index c358b42f0..c5c191688 100644 --- a/pychunkedgraph/ingest/initialization/base.py +++ b/pychunkedgraph/ingest/initialization/atomic_layer.py @@ -3,7 +3,7 @@ """ import datetime -from typing import Optional, Sequence, Dict +from typing import Optional, Sequence, Dict, List import pytz import numpy as np @@ -22,21 +22,6 @@ def add_atomic_edges( isolated: Sequence[int], time_stamp: Optional[datetime.datetime] = None, ): - """ - Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk. - All the edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param cg_instance: - :param chunk_coord: [x,y,z] - :param chunk_edges_d: dict of {"edge_type": Edges} - :param isolated: list of isolated node ids - :param time_stamp: datetime - """ - chunk_node_ids, chunk_edge_ids = _get_chunk_nodes_and_edges(chunk_edges_d, isolated) if not chunk_node_ids.size: return 0 diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 036ab96f2..f03e542f3 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -20,7 +20,7 @@ from ..utils.redis import redis_job, REDIS_URL from . import ingestionmanager, ingestion_utils as iu -from .initialization.base import add_atomic_edges +from .initialization.atomic_layer import add_atomic_edges from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES from ..backend.utils import basetypes from ..io.edges import put_chunk_edges From d455c55e75a2772643da9becf2e6f0769e72dea8 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 4 Sep 2019 20:29:00 +0000 Subject: [PATCH 0186/1097] wip: refactor add_layer simplify n_jobs calculation --- .../ingest/initialization/abstract_layers.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 1639f9bed..fe275107d 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -235,12 +235,7 @@ def _write_out_connected_components(args) -> None: list(atomic_partner_id_dict.keys()), dtype=np.uint64 ) - if n_threads > 1: - n_jobs = n_threads * 3 # Heuristic - else: - n_jobs = 1 - - n_jobs = np.min([n_jobs, len(atomic_partner_id_dict_keys)]) + n_jobs = np.min([n_threads * 3 if n_threads > 1 else 1, len(atomic_partner_id_dict_keys)]) if n_jobs > 0: spacing = np.linspace(0, len(atomic_partner_id_dict_keys), n_jobs + 1).astype( np.int @@ -276,12 +271,7 @@ def _write_out_connected_components(args) -> None: # Add rows for nodes that are in this chunk # a connected component at a time - if n_threads > 1: - n_jobs = n_threads * 3 # Heuristic - else: - n_jobs = 1 - - n_jobs = np.min([n_jobs, len(ccs)]) + n_jobs = np.min([n_threads * 3 if n_threads > 1 else 1, len(ccs)]) spacing = np.linspace(0, len(ccs), n_jobs + 1).astype(np.int) starts = spacing[:-1] @@ -302,4 +292,5 @@ def _get_valid_timestamp(timestamp): timestamp = pytz.UTC.localize(timestamp) # Comply to resolution of BigTables TimeRange - return get_google_compatible_time_stamp(timestamp, round_up=False) + return get_google_compatible_time_stamp(timestamp, round_up=False) + From 023c59bdc9fdba489dfae5f4f832a7833e16e446 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 4 Sep 2019 20:36:50 +0000 Subject: [PATCH 0187/1097] wip: refactor remove repeated timestamp calculation to utils --- pychunkedgraph/backend/chunkedgraph.py | 72 +++---------------- pychunkedgraph/backend/chunkedgraph_utils.py | 11 +++ .../ingest/initialization/abstract_layers.py | 18 +---- .../ingest/initialization/atomic_layer.py | 15 +--- 4 files changed, 24 insertions(+), 92 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 465c25c1f..3221c760f 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -16,7 +16,7 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ - compute_bitmasks, get_google_compatible_time_stamp, \ + compute_bitmasks, get_valid_timestamp, \ get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes @@ -1495,16 +1495,7 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, :param verbose: bool :param time_stamp: datetime """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - + time_stamp = get_valid_timestamp(time_stamp) edge_id_keys = ["in_connected", "in_disconnected", "cross", "between_connected", "between_disconnected"] edge_aff_keys = ["in_connected", "in_disconnected", "between_connected", @@ -1957,16 +1948,7 @@ def _write_out_connected_components(args) -> None: if len(rows) > 0: self.bulk_write(rows) - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - + time_stamp = get_valid_timestamp(time_stamp) # 1 -------------------------------------------------------------------- # The first part is concerned with reading data from the child nodes # of this layer and pre-processing it for the second part @@ -2258,18 +2240,8 @@ def get_roots(self, node_ids: Sequence[np.uint64], :param time_stamp: None or datetime :return: np.uint64 """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - + time_stamp = get_valid_timestamp(time_stamp) parent_ids = np.array(node_ids) - if stop_layer is not None: stop_layer = min(self.n_layers, stop_layer) else: @@ -2310,16 +2282,7 @@ def get_root(self, node_id: np.uint64, :param time_stamp: None or datetime :return: np.uint64 """ - if time_stamp is None: - time_stamp = datetime.datetime.utcnow() - - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - + time_stamp = get_valid_timestamp(time_stamp) parent_id = node_id all_parent_ids = [] @@ -2485,12 +2448,7 @@ def lock_single_root(self, root_id: np.uint64, operation_id: np.uint64 filter_=combined_filter) # Set row lock if condition returns no results (state == False) - time_stamp = datetime.datetime.utcnow() - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - + time_stamp = get_valid_timestamp(None) root_row.set_cell(lock_column.family_id, lock_column.key, operation_id_b, state=False, timestamp=time_stamp) @@ -2733,15 +2691,8 @@ def get_future_root_ids(self, root_id: np.uint64, None=search whole future :return: array of uint64 """ - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - + time_stamp = get_valid_timestamp(time_stamp) id_history = [] - next_ids = [root_id] while len(next_ids): temp_next_ids = [] @@ -2783,15 +2734,8 @@ def get_past_root_ids(self, root_id: np.uint64, None=search whole future :return: array of uint64 """ - if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) - - # Comply to resolution of BigTables TimeRange - time_stamp = get_google_compatible_time_stamp(time_stamp, - round_up=False) - + time_stamp = get_valid_timestamp(time_stamp) id_history = [] - next_ids = [root_id] while len(next_ids): temp_next_ids = [] diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index 5c2412250..44af98599 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -229,3 +229,14 @@ def partial_row_data_to_column_dict(partial_row_data: bigtable.row_data.PartialR new_column_dict[column] = column_values return new_column_dict + + +def get_valid_timestamp(timestamp): + if timestamp is None: + timestamp = datetime.datetime.utcnow() + + if timestamp.tzinfo is None: + timestamp = pytz.UTC.localize(timestamp) + + # Comply to resolution of BigTables TimeRange + return get_google_compatible_time_stamp(timestamp, round_up=False) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index fe275107d..061afff02 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -10,7 +10,7 @@ from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.backend import flatgraph_utils -from pychunkedgraph.backend.chunkedgraph_utils import get_google_compatible_time_stamp +from pychunkedgraph.backend.chunkedgraph_utils import get_valid_timestamp from pychunkedgraph.backend.utils import serializers, column_keys @@ -207,7 +207,7 @@ def _write_out_connected_components(args) -> None: if len(rows) > 0: self.bulk_write(rows) - time_stamp = _get_valid_timestamp(time_stamp) + time_stamp = get_valid_timestamp(time_stamp) # 1 -------------------------------------------------------------------- # The first part is concerned with reading data from the child nodes @@ -281,16 +281,4 @@ def _write_out_connected_components(args) -> None: _write_out_connected_components, multi_args, n_threads=n_threads ) # to track worker completion - return str(layer_id) - - -def _get_valid_timestamp(timestamp): - if timestamp is None: - timestamp = datetime.datetime.utcnow() - - if timestamp.tzinfo is None: - timestamp = pytz.UTC.localize(timestamp) - - # Comply to resolution of BigTables TimeRange - return get_google_compatible_time_stamp(timestamp, round_up=False) - + return str(layer_id) \ No newline at end of file diff --git a/pychunkedgraph/ingest/initialization/atomic_layer.py b/pychunkedgraph/ingest/initialization/atomic_layer.py index c5c191688..a7002c4af 100644 --- a/pychunkedgraph/ingest/initialization/atomic_layer.py +++ b/pychunkedgraph/ingest/initialization/atomic_layer.py @@ -11,7 +11,7 @@ from ...backend.chunkedgraph import ChunkedGraph from ...backend.utils import basetypes, serializers, column_keys from ...backend.definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK, TYPES as EDGE_TYPES -from ...backend.chunkedgraph_utils import compute_indices_pandas, get_google_compatible_time_stamp +from ...backend.chunkedgraph_utils import compute_indices_pandas, get_valid_timestamp from ...backend.flatgraph_utils import build_gt_graph, connected_components @@ -38,7 +38,7 @@ def add_atomic_edges( parent_ids = cg_instance.get_unique_node_id_range(parent_chunk_id, step=len(ccs)) sparse_indices, remapping = _get_remapping(chunk_edges_d) - time_stamp = _get_valid_timestamp(time_stamp) + time_stamp = get_valid_timestamp(time_stamp) rows = [] for i_cc, component in enumerate(ccs): _rows = _process_component( @@ -94,17 +94,6 @@ def _get_remapping(chunk_edges_d: dict): return sparse_indices, remapping -def _get_valid_timestamp(timestamp): - if timestamp is None: - timestamp = datetime.datetime.utcnow() - - if timestamp.tzinfo is None: - timestamp = pytz.UTC.localize(timestamp) - - # Comply to resolution of BigTables TimeRange - return get_google_compatible_time_stamp(timestamp, round_up=False) - - def _process_component( cg_instance, chunk_edges_d, From 8ee80198ce7ca4306540ca94b951f7b0bffec23d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 5 Sep 2019 15:34:32 +0000 Subject: [PATCH 0188/1097] wip: refactor add_layer --- .../ingest/initialization/abstract_layers.py | 393 +++++++++--------- pychunkedgraph/ingest/ran_ingestion_v2.py | 3 +- 2 files changed, 195 insertions(+), 201 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 061afff02..f2580f49d 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -15,213 +15,23 @@ def add_layer( - self, + cg_instance, layer_id: int, - child_chunk_coords: Sequence[Sequence[int]], + chunk_coords: Sequence[Sequence[int]], + *, time_stamp: Optional[datetime.datetime] = None, n_threads: int = 20, ) -> None: - def _read_subchunks_thread(chunk_coord): - # Get start and end key - x, y, z = chunk_coord - - columns = [column_keys.Hierarchy.Child] + [ - column_keys.Connectivity.CrossChunkEdge[l] - for l in range(layer_id - 1, self.n_layers) - ] - range_read = self.range_read_chunk(layer_id - 1, x, y, z, columns=columns) - - # Due to restarted jobs some nodes in the layer below might be - # duplicated. We want to ignore the earlier created node(s) because - # they belong to the failed job. We can find these duplicates only - # by comparing their children because each node has a unique id. - # However, we can use that more recently created nodes have higher - # segment ids (not true on root layer but we do not have that here. - # We are only interested in the latest version of any duplicated - # parents. - - # Deserialize row keys and store child with highest id for - # comparison - row_cell_dict = {} - segment_ids = [] - row_ids = [] - max_child_ids = [] - for row_id, row_data in range_read.items(): - segment_id = self.get_segment_id(row_id) - - cross_edge_columns = { - k: v - for (k, v) in row_data.items() - if k.family_id == self.cross_edge_family_id - } - if cross_edge_columns: - row_cell_dict[row_id] = cross_edge_columns - - node_child_ids = row_data[column_keys.Hierarchy.Child][0].value - - max_child_ids.append(np.max(node_child_ids)) - segment_ids.append(segment_id) - row_ids.append(row_id) - - segment_ids = np.array(segment_ids, dtype=np.uint64) - row_ids = np.array(row_ids) - max_child_ids = np.array(max_child_ids, dtype=np.uint64) - - sorting = np.argsort(segment_ids)[::-1] - row_ids = row_ids[sorting] - max_child_ids = max_child_ids[sorting] - - counter = collections.defaultdict(int) - max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) - for i_row in range(len(max_child_ids)): - max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] - counter[max_child_ids[i_row]] += 1 - - # Filter last occurences (we inverted the list) of each node - m = max_child_ids_occ_so_far == 0 - row_ids = row_ids[m] - ll_node_ids.extend(row_ids) - - # Loop through nodes from this chunk - for row_id in row_ids: - if row_id in row_cell_dict: - cross_edge_dict[row_id] = {} - - cell_family = row_cell_dict[row_id] - - for l in range(layer_id - 1, self.n_layers): - row_key = column_keys.Connectivity.CrossChunkEdge[l] - if row_key in cell_family: - cross_edge_dict[row_id][l] = cell_family[row_key][0].value - - if int(layer_id - 1) in cross_edge_dict[row_id]: - atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] - - if len(atomic_cross_edges) > 0: - atomic_partner_id_dict[row_id] = atomic_cross_edges[:, 1] - - new_pairs = zip( - atomic_cross_edges[:, 0], [row_id] * len(atomic_cross_edges) - ) - atomic_child_id_dict_pairs.extend(new_pairs) - - def _resolve_cross_chunk_edges_thread(args) -> None: - start, end = args - - for i_child_key, child_key in enumerate(atomic_partner_id_dict_keys[start:end]): - this_atomic_partner_ids = atomic_partner_id_dict[child_key] - - partners = { - atomic_child_id_dict[atomic_cross_id] - for atomic_cross_id in this_atomic_partner_ids - if atomic_child_id_dict[atomic_cross_id] != 0 - } - - if len(partners) > 0: - partners = np.array(list(partners), dtype=np.uint64)[:, None] - - this_ids = np.array([child_key] * len(partners), dtype=np.uint64)[ - :, None - ] - these_edges = np.concatenate([this_ids, partners], axis=1) - - edge_ids.extend(these_edges) - - def _write_out_connected_components(args) -> None: - start, end = args - - # Collect cc info - parent_layer_ids = range(layer_id, self.n_layers + 1) - cc_connections = {l: [] for l in parent_layer_ids} - for i_cc, cc in enumerate(ccs[start:end]): - node_ids = unique_graph_ids[cc] - - parent_cross_edges = collections.defaultdict(list) - - # Collect row info for nodes that are in this chunk - for node_id in node_ids: - if node_id in cross_edge_dict: - # Extract edges relevant to this node - for l in range(layer_id, self.n_layers): - if ( - l in cross_edge_dict[node_id] - and len(cross_edge_dict[node_id][l]) > 0 - ): - parent_cross_edges[l].append(cross_edge_dict[node_id][l]) - - if self.use_skip_connections and len(node_ids) == 1: - for l in parent_layer_ids: - if l == self.n_layers or len(parent_cross_edges[l]) > 0: - cc_connections[l].append([node_ids, parent_cross_edges]) - break - else: - cc_connections[layer_id].append([node_ids, parent_cross_edges]) - - # Write out cc info - rows = [] - - # Iterate through layers - for parent_layer_id in parent_layer_ids: - if len(cc_connections[parent_layer_id]) == 0: - continue - - parent_chunk_id = parent_chunk_id_dict[parent_layer_id] - reserved_parent_ids = self.get_unique_node_id_range( - parent_chunk_id, step=len(cc_connections[parent_layer_id]) - ) - - for i_cc, cc_info in enumerate(cc_connections[parent_layer_id]): - node_ids, parent_cross_edges = cc_info - - parent_id = reserved_parent_ids[i_cc] - val_dict = {column_keys.Hierarchy.Parent: parent_id} - - for node_id in node_ids: - rows.append( - self.mutate_row( - serializers.serialize_uint64(node_id), - val_dict, - time_stamp=time_stamp, - ) - ) - - val_dict = {column_keys.Hierarchy.Child: node_ids} - for l in range(parent_layer_id, self.n_layers): - if l in parent_cross_edges and len(parent_cross_edges[l]) > 0: - val_dict[ - column_keys.Connectivity.CrossChunkEdge[l] - ] = np.concatenate(parent_cross_edges[l]) - - rows.append( - self.mutate_row( - serializers.serialize_uint64(parent_id), - val_dict, - time_stamp=time_stamp, - ) - ) - - if len(rows) > 100000: - self.bulk_write(rows) - rows = [] - - if len(rows) > 0: - self.bulk_write(rows) - time_stamp = get_valid_timestamp(time_stamp) # 1 -------------------------------------------------------------------- # The first part is concerned with reading data from the child nodes # of this layer and pre-processing it for the second part atomic_partner_id_dict = {} - cross_edge_dict = {} atomic_child_id_dict_pairs = [] ll_node_ids = [] - multi_args = child_chunk_coords - n_jobs = np.min([n_threads, len(multi_args)]) - - if n_jobs > 0: - mu.multithread_func(_read_subchunks_thread, multi_args, n_threads=n_jobs) + cross_edge_dict = _read_chunks(cg_instance, layer_id, chunk_coord) d = dict(atomic_child_id_dict_pairs) atomic_child_id_dict = collections.defaultdict(np.uint64, d) @@ -235,7 +45,9 @@ def _write_out_connected_components(args) -> None: list(atomic_partner_id_dict.keys()), dtype=np.uint64 ) - n_jobs = np.min([n_threads * 3 if n_threads > 1 else 1, len(atomic_partner_id_dict_keys)]) + n_jobs = np.min( + [n_threads * 3 if n_threads > 1 else 1, len(atomic_partner_id_dict_keys)] + ) if n_jobs > 0: spacing = np.linspace(0, len(atomic_partner_id_dict_keys), n_jobs + 1).astype( np.int @@ -252,10 +64,10 @@ def _write_out_connected_components(args) -> None: # BigTable and updates the childs # Make parent id creation easier - x, y, z = np.min(child_chunk_coords, axis=0) // self.fan_out - chunk_id = self.get_chunk_id(layer=layer_id, x=x, y=y, z=z) + x, y, z = np.min(chunk_coords, axis=0) // cg_instance.fan_out + chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) - parent_chunk_id_dict = self.get_parent_chunk_id_dict(chunk_id) + parent_chunk_id_dict = cg_instance.get_parent_chunk_id_dict(chunk_id) # Extract connected components isolated_node_mask = ~np.in1d(ll_node_ids, np.unique(edge_ids)) @@ -281,4 +93,187 @@ def _write_out_connected_components(args) -> None: _write_out_connected_components, multi_args, n_threads=n_threads ) # to track worker completion - return str(layer_id) \ No newline at end of file + return str(layer_id) + + +def _read_chunks(cg_instance, layer_id, chunk_coords): + cross_edge_dict = {} + + +def _read_chunk(cg_instance, layer_id, chunk_coord): + x, y, z = chunk_coord + columns = [column_keys.Hierarchy.Child] + [ + column_keys.Connectivity.CrossChunkEdge[l] + for l in range(layer_id - 1, cg_instance.n_layers) + ] + range_read = cg_instance.range_read_chunk(layer_id - 1, x, y, z, columns=columns) + + # Deserialize row keys and store child with highest id for + # comparison + row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) + segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) + row_cell_dict = {} + max_child_ids = [] + for row_id, row_data in range_read.items(): + cross_edge_columns = { + k: v + for (k, v) in row_data.items() + if k.family_id == cg_instance.cross_edge_family_id + } + if cross_edge_columns: + row_cell_dict[row_id] = cross_edge_columns + node_child_ids = row_data[column_keys.Hierarchy.Child][0].value + max_child_ids.append(np.max(node_child_ids)) + + max_child_ids = np.array(max_child_ids, dtype=np.uint64) + sorting = np.argsort(segment_ids)[::-1] + row_ids = row_ids[sorting] + max_child_ids = max_child_ids[sorting] + + counter = collections.defaultdict(int) + max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) + for i_row in range(len(max_child_ids)): + max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] + counter[max_child_ids[i_row]] += 1 + return row_ids + + +def _process_chunk(cg_instance, layer_id, chunk_coord): + """ + Due to restarted jobs some nodes in the layer below might be + duplicated. We want to ignore the earlier created node(s) because + they belong to the failed job. We can find these duplicates only + by comparing their children because each node has a unique id. + However, we can use that more recently created nodes have higher + segment ids (not true on root layer but we do not have that here. + We are only interested in the latest version of any duplicated + parents. + """ + row_ids = _read_chunk(cg_instance, layer_id, chunk_coord) + + # Filter last occurences (we inverted the list) of each node + m = max_child_ids_occ_so_far == 0 + row_ids = row_ids[m] + ll_node_ids.extend(row_ids) + + # Loop through nodes from this chunk + for row_id in row_ids: + if row_id in row_cell_dict: + cross_edge_dict[row_id] = {} + cell_family = row_cell_dict[row_id] + + for l in range(layer_id - 1, cg_instance.n_layers): + row_key = column_keys.Connectivity.CrossChunkEdge[l] + if row_key in cell_family: + cross_edge_dict[row_id][l] = cell_family[row_key][0].value + + if int(layer_id - 1) in cross_edge_dict[row_id]: + atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] + if len(atomic_cross_edges) > 0: + atomic_partner_id_dict[row_id] = atomic_cross_edges[:, 1] + new_pairs = zip( + atomic_cross_edges[:, 0], [row_id] * len(atomic_cross_edges) + ) + atomic_child_id_dict_pairs.extend(new_pairs) + + +def _resolve_cross_chunk_edges_thread(args) -> None: + start, end = args + + for child_key in atomic_partner_id_dict_keys[start:end]: + this_atomic_partner_ids = atomic_partner_id_dict[child_key] + + partners = { + atomic_child_id_dict[atomic_cross_id] + for atomic_cross_id in this_atomic_partner_ids + if atomic_child_id_dict[atomic_cross_id] != 0 + } + + if len(partners) > 0: + partners = np.array(list(partners), dtype=np.uint64)[:, None] + + this_ids = np.array([child_key] * len(partners), dtype=np.uint64)[:, None] + these_edges = np.concatenate([this_ids, partners], axis=1) + + edge_ids.extend(these_edges) + + +def _write_out_connected_components(args) -> None: + start, end = args + + # Collect cc info + parent_layer_ids = range(layer_id, cg_instance.n_layers + 1) + cc_connections = {l: [] for l in parent_layer_ids} + for i_cc, cc in enumerate(ccs[start:end]): + node_ids = unique_graph_ids[cc] + + parent_cross_edges = collections.defaultdict(list) + + # Collect row info for nodes that are in this chunk + for node_id in node_ids: + if node_id in cross_edge_dict: + # Extract edges relevant to this node + for l in range(layer_id, cg_instance.n_layers): + if ( + l in cross_edge_dict[node_id] + and len(cross_edge_dict[node_id][l]) > 0 + ): + parent_cross_edges[l].append(cross_edge_dict[node_id][l]) + + if cg_instance.use_skip_connections and len(node_ids) == 1: + for l in parent_layer_ids: + if l == cg_instance.n_layers or len(parent_cross_edges[l]) > 0: + cc_connections[l].append([node_ids, parent_cross_edges]) + break + else: + cc_connections[layer_id].append([node_ids, parent_cross_edges]) + + # Write out cc info + rows = [] + + # Iterate through layers + for parent_layer_id in parent_layer_ids: + if len(cc_connections[parent_layer_id]) == 0: + continue + + parent_chunk_id = parent_chunk_id_dict[parent_layer_id] + reserved_parent_ids = cg_instance.get_unique_node_id_range( + parent_chunk_id, step=len(cc_connections[parent_layer_id]) + ) + + for i_cc, cc_info in enumerate(cc_connections[parent_layer_id]): + node_ids, parent_cross_edges = cc_info + + parent_id = reserved_parent_ids[i_cc] + val_dict = {column_keys.Hierarchy.Parent: parent_id} + + for node_id in node_ids: + rows.append( + cg_instance.mutate_row( + serializers.serialize_uint64(node_id), + val_dict, + time_stamp=time_stamp, + ) + ) + + val_dict = {column_keys.Hierarchy.Child: node_ids} + for l in range(parent_layer_id, cg_instance.n_layers): + if l in parent_cross_edges and len(parent_cross_edges[l]) > 0: + val_dict[ + column_keys.Connectivity.CrossChunkEdge[l] + ] = np.concatenate(parent_cross_edges[l]) + + rows.append( + cg_instance.mutate_row( + serializers.serialize_uint64(parent_id), + val_dict, + time_stamp=time_stamp, + ) + ) + + if len(rows) > 100000: + cg_instance.bulk_write(rows) + rows = [] + + if len(rows) > 0: + cg_instance.bulk_write(rows) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index f03e542f3..83e79706e 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -14,7 +14,6 @@ import numpy.lib.recfunctions as rfn import zstandard as zstd from flask import current_app -from multiwrapper import multiprocessing_utils as mu from rq import Queue from redis import Redis @@ -165,7 +164,7 @@ def create_atomic_chunk(imanager, coord): sv_ids2 = sv_ids2[active] affinities = affinities[active] areas = areas[active] - chunk_edges[edge_type] = Edges( + chunk_edges_active[edge_type] = Edges( sv_ids1, sv_ids2, affinities=affinities, areas=areas ) no_edges = no_edges and not sv_ids1.size From 296a73f055b0acdef69c258ce851955f39d56fd4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 5 Sep 2019 15:48:37 +0000 Subject: [PATCH 0189/1097] wip: refactor add_layer --- pychunkedgraph/ingest/initialization/abstract_layers.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index f2580f49d..2290b1322 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -125,16 +125,16 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): node_child_ids = row_data[column_keys.Hierarchy.Child][0].value max_child_ids.append(np.max(node_child_ids)) - max_child_ids = np.array(max_child_ids, dtype=np.uint64) sorting = np.argsort(segment_ids)[::-1] row_ids = row_ids[sorting] - max_child_ids = max_child_ids[sorting] + max_child_ids = np.array(max_child_ids, dtype=np.uint64)[sorting] counter = collections.defaultdict(int) max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) for i_row in range(len(max_child_ids)): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] counter[max_child_ids[i_row]] += 1 + row_ids = row_ids[max_child_ids_occ_so_far == 0] return row_ids @@ -152,8 +152,6 @@ def _process_chunk(cg_instance, layer_id, chunk_coord): row_ids = _read_chunk(cg_instance, layer_id, chunk_coord) # Filter last occurences (we inverted the list) of each node - m = max_child_ids_occ_so_far == 0 - row_ids = row_ids[m] ll_node_ids.extend(row_ids) # Loop through nodes from this chunk From 97aa5760633dd47cf9852842037fc951c54d1fe9 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 5 Sep 2019 18:38:52 +0000 Subject: [PATCH 0190/1097] wip: refactor add_layer --- .../ingest/initialization/abstract_layers.py | 75 +++++++------------ 1 file changed, 28 insertions(+), 47 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 2290b1322..66494f4ce 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -23,19 +23,19 @@ def add_layer( n_threads: int = 20, ) -> None: time_stamp = get_valid_timestamp(time_stamp) - - # 1 -------------------------------------------------------------------- - # The first part is concerned with reading data from the child nodes - # of this layer and pre-processing it for the second part atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] ll_node_ids = [] - cross_edge_dict = _read_chunks(cg_instance, layer_id, chunk_coord) + cross_edge_dict = {} + for chunk_coord in chunk_coords: + ids, cross_edge_d = _process_chunk(cg_instance, layer_id, chunk_coord) + ll_node_ids.append(ids) + cross_edge_dict = {**cross_edge_dict, **cross_edge_d} d = dict(atomic_child_id_dict_pairs) atomic_child_id_dict = collections.defaultdict(np.uint64, d) - ll_node_ids = np.array(ll_node_ids, dtype=np.uint64) + ll_node_ids = np.concatenate(ll_node_ids, dtype=np.uint64) # Extract edges from remaining cross chunk edges # and maintain unused cross chunk edges @@ -96,8 +96,26 @@ def add_layer( return str(layer_id) -def _read_chunks(cg_instance, layer_id, chunk_coords): +def _process_chunk(cg_instance, layer_id, chunk_coord): cross_edge_dict = {} + row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) + for row_id in cross_edge_columns_d: + cross_edge_dict[row_id] = {} + cell_family = cross_edge_columns_d[row_id] + for l in range(layer_id - 1, cg_instance.n_layers): + row_key = column_keys.Connectivity.CrossChunkEdge[l] + if row_key in cell_family: + cross_edge_dict[row_id][l] = cell_family[row_key][0].value + + if int(layer_id - 1) in cross_edge_dict[row_id]: + atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] + if len(atomic_cross_edges) > 0: + atomic_partner_id_dict[row_id] = atomic_cross_edges[:, 1] + new_pairs = zip( + atomic_cross_edges[:, 0], [row_id] * len(atomic_cross_edges) + ) + atomic_child_id_dict_pairs.extend(new_pairs) + return row_ids, cross_edge_dict def _read_chunk(cg_instance, layer_id, chunk_coord): @@ -112,7 +130,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): # comparison row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - row_cell_dict = {} + cross_edge_columns_d = {} max_child_ids = [] for row_id, row_data in range_read.items(): cross_edge_columns = { @@ -121,7 +139,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): if k.family_id == cg_instance.cross_edge_family_id } if cross_edge_columns: - row_cell_dict[row_id] = cross_edge_columns + cross_edge_columns_d[row_id] = cross_edge_columns node_child_ids = row_data[column_keys.Hierarchy.Child][0].value max_child_ids.append(np.max(node_child_ids)) @@ -135,44 +153,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] counter[max_child_ids[i_row]] += 1 row_ids = row_ids[max_child_ids_occ_so_far == 0] - return row_ids - - -def _process_chunk(cg_instance, layer_id, chunk_coord): - """ - Due to restarted jobs some nodes in the layer below might be - duplicated. We want to ignore the earlier created node(s) because - they belong to the failed job. We can find these duplicates only - by comparing their children because each node has a unique id. - However, we can use that more recently created nodes have higher - segment ids (not true on root layer but we do not have that here. - We are only interested in the latest version of any duplicated - parents. - """ - row_ids = _read_chunk(cg_instance, layer_id, chunk_coord) - - # Filter last occurences (we inverted the list) of each node - ll_node_ids.extend(row_ids) - - # Loop through nodes from this chunk - for row_id in row_ids: - if row_id in row_cell_dict: - cross_edge_dict[row_id] = {} - cell_family = row_cell_dict[row_id] - - for l in range(layer_id - 1, cg_instance.n_layers): - row_key = column_keys.Connectivity.CrossChunkEdge[l] - if row_key in cell_family: - cross_edge_dict[row_id][l] = cell_family[row_key][0].value - - if int(layer_id - 1) in cross_edge_dict[row_id]: - atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] - if len(atomic_cross_edges) > 0: - atomic_partner_id_dict[row_id] = atomic_cross_edges[:, 1] - new_pairs = zip( - atomic_cross_edges[:, 0], [row_id] * len(atomic_cross_edges) - ) - atomic_child_id_dict_pairs.extend(new_pairs) + return row_ids, cross_edge_columns_d def _resolve_cross_chunk_edges_thread(args) -> None: From dee9aca2d42924239ea45aa28eccf853701d7b40 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 5 Sep 2019 19:02:36 +0000 Subject: [PATCH 0191/1097] wip: refactor add_layer --- .../ingest/initialization/abstract_layers.py | 27 +++++-------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 66494f4ce..a1a48d7d4 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -22,7 +22,6 @@ def add_layer( time_stamp: Optional[datetime.datetime] = None, n_threads: int = 20, ) -> None: - time_stamp = get_valid_timestamp(time_stamp) atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] ll_node_ids = [] @@ -80,17 +79,8 @@ def add_layer( ) ccs = flatgraph_utils.connected_components(graph) - - # Add rows for nodes that are in this chunk - # a connected component at a time - n_jobs = np.min([n_threads * 3 if n_threads > 1 else 1, len(ccs)]) - - spacing = np.linspace(0, len(ccs), n_jobs + 1).astype(np.int) - starts = spacing[:-1] - ends = spacing[1:] - multi_args = list(zip(starts, ends)) - mu.multithread_func( - _write_out_connected_components, multi_args, n_threads=n_threads + _write_out_connected_components( + cg_instance, layer_id, ccs, cross_edge_dict, time_stamp ) # to track worker completion return str(layer_id) @@ -158,7 +148,6 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): def _resolve_cross_chunk_edges_thread(args) -> None: start, end = args - for child_key in atomic_partner_id_dict_keys[start:end]: this_atomic_partner_ids = atomic_partner_id_dict[child_key] @@ -177,15 +166,14 @@ def _resolve_cross_chunk_edges_thread(args) -> None: edge_ids.extend(these_edges) -def _write_out_connected_components(args) -> None: - start, end = args - - # Collect cc info +def _write_out_connected_components( + cg_instance, layer_id, ccs, cross_edge_dict, time_stamp +) -> None: + time_stamp = get_valid_timestamp(time_stamp) parent_layer_ids = range(layer_id, cg_instance.n_layers + 1) cc_connections = {l: [] for l in parent_layer_ids} - for i_cc, cc in enumerate(ccs[start:end]): + for i_cc, cc in enumerate(ccs): node_ids = unique_graph_ids[cc] - parent_cross_edges = collections.defaultdict(list) # Collect row info for nodes that are in this chunk @@ -209,7 +197,6 @@ def _write_out_connected_components(args) -> None: # Write out cc info rows = [] - # Iterate through layers for parent_layer_id in parent_layer_ids: if len(cc_connections[parent_layer_id]) == 0: From 10bdada63d71453b5b39e8e7adf24da076a789b2 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 5 Sep 2019 20:08:50 +0000 Subject: [PATCH 0192/1097] wip: refactor add_layer --- pychunkedgraph/ingest/initialization/abstract_layers.py | 8 ++++---- pychunkedgraph/ingest/initialization/atomic_layer.py | 1 - pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index a1a48d7d4..f3c666ae1 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -74,13 +74,13 @@ def add_layer( add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T edge_ids.extend(add_edge_ids) - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( + graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True ) ccs = flatgraph_utils.connected_components(graph) _write_out_connected_components( - cg_instance, layer_id, ccs, cross_edge_dict, time_stamp + cg_instance, layer_id, ccs, cross_edge_dict, graph_ids, time_stamp ) # to track worker completion return str(layer_id) @@ -167,13 +167,13 @@ def _resolve_cross_chunk_edges_thread(args) -> None: def _write_out_connected_components( - cg_instance, layer_id, ccs, cross_edge_dict, time_stamp + cg_instance, layer_id, ccs, cross_edge_dict, graph_ids, time_stamp ) -> None: time_stamp = get_valid_timestamp(time_stamp) parent_layer_ids = range(layer_id, cg_instance.n_layers + 1) cc_connections = {l: [] for l in parent_layer_ids} for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] + node_ids = graph_ids[cc] parent_cross_edges = collections.defaultdict(list) # Collect row info for nodes that are in this chunk diff --git a/pychunkedgraph/ingest/initialization/atomic_layer.py b/pychunkedgraph/ingest/initialization/atomic_layer.py index a7002c4af..949b7fe7e 100644 --- a/pychunkedgraph/ingest/initialization/atomic_layer.py +++ b/pychunkedgraph/ingest/initialization/atomic_layer.py @@ -109,7 +109,6 @@ def _process_component( _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) chunk_out_edges.append(_edges) val_dict = {column_keys.Hierarchy.Parent: parent_id} - r_key = serializers.serialize_uint64(node_id) rows.append(cg_instance.mutate_row(r_key, val_dict, time_stamp=time_stamp)) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 83e79706e..601d453fd 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -148,7 +148,6 @@ def create_atomic_chunk(imanager, coord): chunk_edges_all = {} chunk_edges_active = {} for edge_type in EDGE_TYPES: - active = active_edge_d[edge_type] sv_ids1 = edge_dict[edge_type]["sv1"] sv_ids2 = edge_dict[edge_type]["sv2"] areas = np.ones(len(sv_ids1)) @@ -160,6 +159,7 @@ def create_atomic_chunk(imanager, coord): chunk_edges_all[edge_type] = Edges( sv_ids1, sv_ids2, affinities=affinities, areas=areas ) + active = active_edge_d[edge_type] sv_ids1 = sv_ids1[active] sv_ids2 = sv_ids2[active] affinities = affinities[active] From 0203e61f7e25098028a348cbc23750a880cf5deb Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 5 Sep 2019 20:18:29 +0000 Subject: [PATCH 0193/1097] wip: refactor add_layer --- .../ingest/initialization/abstract_layers.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index f3c666ae1..d0b82153e 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -24,17 +24,17 @@ def add_layer( ) -> None: atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] - ll_node_ids = [] + descendant_node_ids = [] cross_edge_dict = {} for chunk_coord in chunk_coords: ids, cross_edge_d = _process_chunk(cg_instance, layer_id, chunk_coord) - ll_node_ids.append(ids) + descendant_node_ids.append(ids) cross_edge_dict = {**cross_edge_dict, **cross_edge_d} d = dict(atomic_child_id_dict_pairs) atomic_child_id_dict = collections.defaultdict(np.uint64, d) - ll_node_ids = np.concatenate(ll_node_ids, dtype=np.uint64) + descendant_node_ids = np.concatenate(descendant_node_ids, dtype=np.uint64) # Extract edges from remaining cross chunk edges # and maintain unused cross chunk edges @@ -69,8 +69,8 @@ def add_layer( parent_chunk_id_dict = cg_instance.get_parent_chunk_id_dict(chunk_id) # Extract connected components - isolated_node_mask = ~np.in1d(ll_node_ids, np.unique(edge_ids)) - add_node_ids = ll_node_ids[isolated_node_mask].squeeze() + isolated_node_mask = ~np.in1d(descendant_node_ids, np.unique(edge_ids)) + add_node_ids = descendant_node_ids[isolated_node_mask].squeeze() add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T edge_ids.extend(add_edge_ids) From d1d118ed1e6782bbfe94e0762780d3826d6fc047 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 6 Sep 2019 18:41:12 +0000 Subject: [PATCH 0194/1097] wip: refactor add_layer --- .../ingest/initialization/abstract_layers.py | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index d0b82153e..86d98857a 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -24,17 +24,12 @@ def add_layer( ) -> None: atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] - descendant_node_ids = [] - cross_edge_dict = {} - for chunk_coord in chunk_coords: - ids, cross_edge_d = _process_chunk(cg_instance, layer_id, chunk_coord) - descendant_node_ids.append(ids) - cross_edge_dict = {**cross_edge_dict, **cross_edge_d} + cross_edge_dict, node_ids = _process_chunks(cg_instance, layer_id, chunk_coords) d = dict(atomic_child_id_dict_pairs) atomic_child_id_dict = collections.defaultdict(np.uint64, d) - descendant_node_ids = np.concatenate(descendant_node_ids, dtype=np.uint64) + descendant_node_ids = np.concatenate(node_ids, dtype=np.uint64) # Extract edges from remaining cross chunk edges # and maintain unused cross chunk edges @@ -86,6 +81,16 @@ def add_layer( return str(layer_id) +def _process_chunks(cg_instance, layer_id, chunk_coords): + node_ids = [] + cross_edge_dict = {} + for chunk_coord in chunk_coords: + ids, cross_edge_d = _process_chunk(cg_instance, layer_id, chunk_coord) + node_ids.append(ids) + cross_edge_dict = {**cross_edge_dict, **cross_edge_d} + return cross_edge_dict, node_ids + + def _process_chunk(cg_instance, layer_id, chunk_coord): cross_edge_dict = {} row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) @@ -93,9 +98,9 @@ def _process_chunk(cg_instance, layer_id, chunk_coord): cross_edge_dict[row_id] = {} cell_family = cross_edge_columns_d[row_id] for l in range(layer_id - 1, cg_instance.n_layers): - row_key = column_keys.Connectivity.CrossChunkEdge[l] - if row_key in cell_family: - cross_edge_dict[row_id][l] = cell_family[row_key][0].value + cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] + if cross_edges_key in cell_family: + cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value if int(layer_id - 1) in cross_edge_dict[row_id]: atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] From 904088fbbb95a6cb525f59f4e4149b4ce9de9c5e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 8 Sep 2019 01:17:48 +0000 Subject: [PATCH 0195/1097] wip: refactor add_layer --- .../ingest/initialization/abstract_layers.py | 100 +++++++----------- 1 file changed, 38 insertions(+), 62 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 86d98857a..d5d1da984 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -2,8 +2,8 @@ Functions for creating parents in level 3 and above """ -import collections import datetime +from collections import defaultdict from typing import Optional, Sequence import numpy as np @@ -22,50 +22,15 @@ def add_layer( time_stamp: Optional[datetime.datetime] = None, n_threads: int = 20, ) -> None: - atomic_partner_id_dict = {} - atomic_child_id_dict_pairs = [] - - cross_edge_dict, node_ids = _process_chunks(cg_instance, layer_id, chunk_coords) - - d = dict(atomic_child_id_dict_pairs) - atomic_child_id_dict = collections.defaultdict(np.uint64, d) - descendant_node_ids = np.concatenate(node_ids, dtype=np.uint64) - - # Extract edges from remaining cross chunk edges - # and maintain unused cross chunk edges - edge_ids = [] - # u_atomic_child_ids = np.unique(atomic_child_ids) - atomic_partner_id_dict_keys = np.array( - list(atomic_partner_id_dict.keys()), dtype=np.uint64 - ) + cross_edge_dict, child_ids = _process_chunks(cg_instance, layer_id, chunk_coords) + edge_ids = _resolve_cross_chunk_edges_thread(layer_id, child_ids, cross_edge_dict) - n_jobs = np.min( - [n_threads * 3 if n_threads > 1 else 1, len(atomic_partner_id_dict_keys)] - ) - if n_jobs > 0: - spacing = np.linspace(0, len(atomic_partner_id_dict_keys), n_jobs + 1).astype( - np.int - ) - starts = spacing[:-1] - ends = spacing[1:] - multi_args = list(zip(starts, ends)) - mu.multithread_func( - _resolve_cross_chunk_edges_thread, multi_args, n_threads=n_threads - ) - - # 2 -------------------------------------------------------------------- - # The second part finds connected components, writes the parents to - # BigTable and updates the childs - - # Make parent id creation easier x, y, z = np.min(chunk_coords, axis=0) // cg_instance.fan_out - chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) - - parent_chunk_id_dict = cg_instance.get_parent_chunk_id_dict(chunk_id) + parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) # Extract connected components - isolated_node_mask = ~np.in1d(descendant_node_ids, np.unique(edge_ids)) - add_node_ids = descendant_node_ids[isolated_node_mask].squeeze() + isolated_node_mask = ~np.in1d(child_ids, np.unique(edge_ids)) + add_node_ids = child_ids[isolated_node_mask].squeeze() add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T edge_ids.extend(add_edge_ids) @@ -75,7 +40,13 @@ def add_layer( ccs = flatgraph_utils.connected_components(graph) _write_out_connected_components( - cg_instance, layer_id, ccs, cross_edge_dict, graph_ids, time_stamp + cg_instance, + layer_id, + parent_chunk_id, + ccs, + cross_edge_dict, + graph_ids, + time_stamp, ) # to track worker completion return str(layer_id) @@ -88,28 +59,18 @@ def _process_chunks(cg_instance, layer_id, chunk_coords): ids, cross_edge_d = _process_chunk(cg_instance, layer_id, chunk_coord) node_ids.append(ids) cross_edge_dict = {**cross_edge_dict, **cross_edge_d} - return cross_edge_dict, node_ids + return cross_edge_dict, np.concatenate(node_ids, dtype=np.uint64) def _process_chunk(cg_instance, layer_id, chunk_coord): - cross_edge_dict = {} + cross_edge_dict = defaultdict(dict) row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) for row_id in cross_edge_columns_d: - cross_edge_dict[row_id] = {} cell_family = cross_edge_columns_d[row_id] for l in range(layer_id - 1, cg_instance.n_layers): cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] if cross_edges_key in cell_family: cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value - - if int(layer_id - 1) in cross_edge_dict[row_id]: - atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] - if len(atomic_cross_edges) > 0: - atomic_partner_id_dict[row_id] = atomic_cross_edges[:, 1] - new_pairs = zip( - atomic_cross_edges[:, 0], [row_id] * len(atomic_cross_edges) - ) - atomic_child_id_dict_pairs.extend(new_pairs) return row_ids, cross_edge_dict @@ -142,7 +103,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): row_ids = row_ids[sorting] max_child_ids = np.array(max_child_ids, dtype=np.uint64)[sorting] - counter = collections.defaultdict(int) + counter = defaultdict(int) max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) for i_row in range(len(max_child_ids)): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] @@ -151,17 +112,30 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): return row_ids, cross_edge_columns_d -def _resolve_cross_chunk_edges_thread(args) -> None: - start, end = args - for child_key in atomic_partner_id_dict_keys[start:end]: - this_atomic_partner_ids = atomic_partner_id_dict[child_key] +def _resolve_cross_chunk_edges_thread(layer_id, node_ids, cross_edge_dict) -> None: + atomic_partner_id_dict = {} + atomic_child_id_dict_pairs = [] + for node_id in node_ids: + if int(layer_id - 1) in cross_edge_dict[node_id]: + atomic_cross_edges = cross_edge_dict[node_id][layer_id - 1] + if len(atomic_cross_edges) > 0: + atomic_partner_id_dict[node_id] = atomic_cross_edges[:, 1] + new_pairs = zip( + atomic_cross_edges[:, 0], [node_id] * len(atomic_cross_edges) + ) + atomic_child_id_dict_pairs.extend(new_pairs) + + d = dict(atomic_child_id_dict_pairs) + atomic_child_id_dict = defaultdict(np.uint64, d) + edge_ids = [] + for child_key in atomic_partner_id_dict: + this_atomic_partner_ids = atomic_partner_id_dict[child_key] partners = { atomic_child_id_dict[atomic_cross_id] for atomic_cross_id in this_atomic_partner_ids if atomic_child_id_dict[atomic_cross_id] != 0 } - if len(partners) > 0: partners = np.array(list(partners), dtype=np.uint64)[:, None] @@ -169,17 +143,18 @@ def _resolve_cross_chunk_edges_thread(args) -> None: these_edges = np.concatenate([this_ids, partners], axis=1) edge_ids.extend(these_edges) + return edge_ids def _write_out_connected_components( - cg_instance, layer_id, ccs, cross_edge_dict, graph_ids, time_stamp + cg_instance, layer_id, parent_chunk_id, ccs, cross_edge_dict, graph_ids, time_stamp ) -> None: time_stamp = get_valid_timestamp(time_stamp) parent_layer_ids = range(layer_id, cg_instance.n_layers + 1) cc_connections = {l: [] for l in parent_layer_ids} for i_cc, cc in enumerate(ccs): node_ids = graph_ids[cc] - parent_cross_edges = collections.defaultdict(list) + parent_cross_edges = defaultdict(list) # Collect row info for nodes that are in this chunk for node_id in node_ids: @@ -202,6 +177,7 @@ def _write_out_connected_components( # Write out cc info rows = [] + parent_chunk_id_dict = cg_instance.get_parent_chunk_id_dict(parent_chunk_id) # Iterate through layers for parent_layer_id in parent_layer_ids: if len(cc_connections[parent_layer_id]) == 0: From 0ed9e4f4ec5180450a1b45d71882baf33167a9d6 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 8 Sep 2019 19:34:14 +0000 Subject: [PATCH 0196/1097] .devcontainer.json --- .devcontainer/devcontainer.json | 36 +++++++++++++++++++++++++++++++++ .gitignore | 3 +-- 2 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..4f33f176d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,36 @@ +// For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at +// https://github.com/microsoft/vscode-dev-containers/tree/master/containers/docker-existing-dockerfile +{ + // See https://aka.ms/vscode-remote/devcontainer.json for format details. + "name": "Existing Dockerfile", + + // Sets the run context to one level up instead of the .devcontainer folder. + "context": "..", + + // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. + "dockerFile": "../Dockerfile", + + // The optional 'runArgs' property can be used to specify additional runtime arguments. + "runArgs": [ + // Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-in-docker for details. + // "-v","/var/run/docker.sock:/var/run/docker.sock", + + // Uncomment the next line if you will be using a ptrace-based debugger like C++, Go, and Rust. + // "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" + + // Uncomment the next line to use a non-root user. See https://aka.ms/vscode-remote/containers/non-root-user. + // "-u", "1000" + ], + + // Uncomment the next line if you want to publish any ports. + // "appPort": [], + + // Uncomment the next line if you want to add in default container specific settings.json values + // "settings": { "workbench.colorTheme": "Quiet Light" }, + + // Uncomment the next line to run commands after the container is created - for example installing git. + // "postCreateCommand": "apt-get update && apt-get install -y git", + + // Add the IDs of any extensions you want installed in the array below. + "extensions": [] +} diff --git a/.gitignore b/.gitignore index b16998b36..ef8107b32 100644 --- a/.gitignore +++ b/.gitignore @@ -111,5 +111,4 @@ venv.bak/ # local dev stuff output.txt -src/ -/.devcontainer \ No newline at end of file +src/ \ No newline at end of file From cdeb6d18dcae55eb50cc10295dddd9046a3a70ce Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 14:50:13 +0000 Subject: [PATCH 0197/1097] refactor, remove old code --- pychunkedgraph/backend/chunkedgraph.py | 579 +----------------- .../ingest/initialization/abstract_layers.py | 1 - pychunkedgraph/ingest/ran_ingestion.py | 556 ----------------- pychunkedgraph/ingest/ran_ingestion_v2.py | 41 +- 4 files changed, 29 insertions(+), 1148 deletions(-) delete mode 100644 pychunkedgraph/ingest/ran_ingestion.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 3221c760f..e7bd04fc7 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -10,6 +10,7 @@ import re import itertools import logging +from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple from itertools import chain from functools import reduce @@ -28,8 +29,6 @@ MulticutOperation, SplitOperation, ) -from pychunkedgraph.io.edges import get_chunk_edges -# from pychunkedgraph.meshing import meshgen from google.api_core.retry import Retry, if_exception_type from google.api_core.exceptions import Aborted, DeadlineExceeded, \ @@ -43,10 +42,11 @@ from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.column_family import MaxVersionsGCRule -from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple from .definitions.edges import Edges from .utils.edge_utils import ( concatenate_chunk_edges, filter_edges, get_active_edges) +from ..io.edges import get_chunk_edges + HOME = os.path.expanduser("~") N_DIGITS_UINT64 = len(str(np.iinfo(np.uint64).max)) @@ -1479,583 +1479,14 @@ def add_atomic_edges_in_chunks(self, edge_id_dict: dict, isolated_node_ids: Sequence[np.uint64], verbose: bool = True, time_stamp: Optional[datetime.datetime] = None): - """ Creates atomic nodes in first abstraction layer for a SINGLE chunk - and all abstract nodes in the second for the same chunk - - Alle edges (edge_ids) need to be from one chunk and no nodes should - exist for this chunk prior to calling this function. All cross edges - (cross_edge_ids) have to point out the chunk (first entry is the id - within the chunk) - - :param edge_id_dict: dict - :param edge_aff_dict: dict - :param edge_area_dict: dict - :param isolated_node_ids: list of uint64s - ids of nodes that have no edge in the chunked graph - :param verbose: bool - :param time_stamp: datetime - """ - time_stamp = get_valid_timestamp(time_stamp) - edge_id_keys = ["in_connected", "in_disconnected", "cross", - "between_connected", "between_disconnected"] - edge_aff_keys = ["in_connected", "in_disconnected", "between_connected", - "between_disconnected"] - - # Check if keys exist and include an empty array if not - n_edge_ids = 0 - chunk_id = None - for edge_id_key in edge_id_keys: - if not edge_id_key in edge_id_dict: - empty_edges = np.array([], dtype=np.uint64).reshape(0, 2) - edge_id_dict[edge_id_key] = empty_edges - else: - n_edge_ids += len(edge_id_dict[edge_id_key]) - - if len(edge_id_dict[edge_id_key]) > 0: - node_id = edge_id_dict[edge_id_key][0, 0] - chunk_id = self.get_chunk_id(node_id) - - for edge_aff_key in edge_aff_keys: - if not edge_aff_key in edge_aff_dict: - edge_aff_dict[edge_aff_key] = np.array([], dtype=np.float32) - - time_start = time.time() - - # Catch trivial case - if n_edge_ids == 0 and len(isolated_node_ids) == 0: - return 0 - - # Make parent id creation easier - if chunk_id is None: - chunk_id = self.get_chunk_id(isolated_node_ids[0]) - - chunk_id_c = self.get_chunk_coordinates(chunk_id) - parent_chunk_id = self.get_chunk_id(layer=2, x=chunk_id_c[0], - y=chunk_id_c[1], z=chunk_id_c[2]) - - # Get connected component within the chunk - chunk_node_ids = np.concatenate([ - isolated_node_ids.astype(np.uint64), - np.unique(edge_id_dict["in_connected"]), - np.unique(edge_id_dict["in_disconnected"]), - np.unique(edge_id_dict["cross"][:, 0]), - np.unique(edge_id_dict["between_connected"][:, 0]), - np.unique(edge_id_dict["between_disconnected"][:, 0])]) - - chunk_node_ids = np.unique(chunk_node_ids) - - node_chunk_ids = np.array([self.get_chunk_id(c) - for c in chunk_node_ids], - dtype=np.uint64) - - u_node_chunk_ids, c_node_chunk_ids = np.unique(node_chunk_ids, - return_counts=True) - if len(u_node_chunk_ids) > 1: - raise Exception("%d: %d chunk ids found in node id list. " - "Some edges might be in the wrong order. " - "Number of occurences:" % - (chunk_id, len(u_node_chunk_ids)), c_node_chunk_ids) - - add_edge_ids = np.vstack([chunk_node_ids, chunk_node_ids]).T - edge_ids = np.concatenate([edge_id_dict["in_connected"].copy(), - add_edge_ids]) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True) - - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - self.logger.debug("CC in chunk: %.3fs" % (time.time() - time_start)) - - # Add rows for nodes that are in this chunk - # a connected component at a time - node_c = 0 # Just a counter for the log / speed measurement - - n_ccs = len(ccs) - - parent_ids = self.get_unique_node_id_range(parent_chunk_id, step=n_ccs) - time_start = time.time() - - time_dict = collections.defaultdict(list) - - time_start_1 = time.time() - sparse_indices = {} - remapping = {} - for k in edge_id_dict.keys(): - # Circumvent datatype issues - - u_ids, inv_ids = np.unique(edge_id_dict[k], return_inverse=True) - mapped_ids = np.arange(len(u_ids), dtype=np.int32) - remapped_arr = mapped_ids[inv_ids].reshape(edge_id_dict[k].shape) - - sparse_indices[k] = compute_indices_pandas(remapped_arr) - remapping[k] = dict(zip(u_ids, mapped_ids)) - - time_dict["sparse_indices"].append(time.time() - time_start_1) - - rows = [] - - for i_cc, cc in enumerate(ccs): - node_ids = unique_graph_ids[cc] - - u_chunk_ids = np.unique([self.get_chunk_id(n) for n in node_ids]) - - if len(u_chunk_ids) > 1: - self.logger.error(f"Found multiple chunk ids: {u_chunk_ids}") - raise Exception() - - # Create parent id - parent_id = parent_ids[i_cc] - - parent_cross_edges = np.array([], dtype=np.uint64).reshape(0, 2) - - # Add rows for nodes that are in this chunk - for i_node_id, node_id in enumerate(node_ids): - # Extract edges relevant to this node - - # in chunk + connected - time_start_2 = time.time() - if node_id in remapping["in_connected"]: - row_ids, column_ids = sparse_indices["in_connected"][remapping["in_connected"][node_id]] - - inv_column_ids = (column_ids + 1) % 2 - - connected_ids = edge_id_dict["in_connected"][row_ids, inv_column_ids] - connected_affs = edge_aff_dict["in_connected"][row_ids] - connected_areas = edge_area_dict["in_connected"][row_ids] - time_dict["in_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - connected_ids = np.array([], dtype=np.uint64) - connected_affs = np.array([], dtype=np.float32) - connected_areas = np.array([], dtype=np.uint64) - - # in chunk + disconnected - if node_id in remapping["in_disconnected"]: - row_ids, column_ids = sparse_indices["in_disconnected"][remapping["in_disconnected"][node_id]] - inv_column_ids = (column_ids + 1) % 2 - - disconnected_ids = edge_id_dict["in_disconnected"][row_ids, inv_column_ids] - disconnected_affs = edge_aff_dict["in_disconnected"][row_ids] - disconnected_areas = edge_area_dict["in_disconnected"][row_ids] - time_dict["in_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - else: - disconnected_ids = np.array([], dtype=np.uint64) - disconnected_affs = np.array([], dtype=np.float32) - disconnected_areas = np.array([], dtype=np.uint64) - - # out chunk + connected - if node_id in remapping["between_connected"]: - row_ids, column_ids = sparse_indices["between_connected"][remapping["between_connected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_connected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["between_connected"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, edge_aff_dict["between_connected"][row_ids]]) - connected_areas = np.concatenate([connected_areas, edge_area_dict["between_connected"][row_ids]]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["between_connected"][row_ids]]) - - time_dict["out_connected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # out chunk + disconnected - if node_id in remapping["between_disconnected"]: - row_ids, column_ids = sparse_indices["between_disconnected"][remapping["between_disconnected"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["out_disconnected_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - disconnected_ids = np.concatenate([disconnected_ids, edge_id_dict["between_disconnected"][row_ids, inv_column_ids]]) - disconnected_affs = np.concatenate([disconnected_affs, edge_aff_dict["between_disconnected"][row_ids]]) - disconnected_areas = np.concatenate([disconnected_areas, edge_area_dict["between_disconnected"][row_ids]]) - - time_dict["out_disconnected"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # cross - if node_id in remapping["cross"]: - row_ids, column_ids = sparse_indices["cross"][remapping["cross"][node_id]] - - row_ids = row_ids[column_ids == 0] - column_ids = column_ids[column_ids == 0] - inv_column_ids = (column_ids + 1) % 2 - time_dict["cross_mask"].append(time.time() - time_start_2) - time_start_2 = time.time() - - connected_ids = np.concatenate([connected_ids, edge_id_dict["cross"][row_ids, inv_column_ids]]) - connected_affs = np.concatenate([connected_affs, np.full((len(row_ids)), np.inf, dtype=np.float32)]) - connected_areas = np.concatenate([connected_areas, np.ones((len(row_ids)), dtype=np.uint64)]) - - parent_cross_edges = np.concatenate([parent_cross_edges, edge_id_dict["cross"][row_ids]]) - time_dict["cross"].append(time.time() - time_start_2) - time_start_2 = time.time() - - # Create node - partners = np.concatenate([connected_ids, disconnected_ids]) - affinities = np.concatenate([connected_affs, disconnected_affs]) - areas = np.concatenate([connected_areas, disconnected_areas]) - connected = np.arange(len(connected_ids), dtype=np.int) - - val_dict = {column_keys.Connectivity.Partner: partners, - column_keys.Connectivity.Affinity: affinities, - column_keys.Connectivity.Area: areas, - column_keys.Connectivity.Connected: connected, - column_keys.Hierarchy.Parent: parent_id} - - rows.append(self.mutate_row(serializers.serialize_uint64(node_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - time_dict["creating_lv1_row"].append(time.time() - time_start_2) - - time_start_1 = time.time() - # Create parent node - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - {column_keys.Hierarchy.Child: node_ids}, - time_stamp=time_stamp)) - - time_dict["creating_lv2_row"].append(time.time() - time_start_1) - time_start_1 = time.time() - - cce_layers = self.get_cross_chunk_edges_layer(parent_cross_edges) - u_cce_layers = np.unique(cce_layers) - - val_dict = {} - for cc_layer in u_cce_layers: - layer_cross_edges = parent_cross_edges[cce_layers == cc_layer] - - if len(layer_cross_edges) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ - layer_cross_edges - - if len(val_dict) > 0: - rows.append(self.mutate_row(serializers.serialize_uint64(parent_id), - val_dict, time_stamp=time_stamp)) - node_c += 1 - - time_dict["adding_cross_edges"].append(time.time() - time_start_1) - - if len(rows) > 100000: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if len(rows) > 0: - time_start_1 = time.time() - self.bulk_write(rows) - time_dict["writing"].append(time.time() - time_start_1) - - if verbose: - self.logger.debug("Time creating rows: %.3fs for %d ccs with %d nodes" % - (time.time() - time_start, len(ccs), node_c)) - - for k in time_dict.keys(): - self.logger.debug("%s -- %.3fms for %d instances -- avg = %.3fms" % - (k, np.sum(time_dict[k])*1000, len(time_dict[k]), - np.mean(time_dict[k])*1000)) + raise NotImplementedError() def add_layer(self, layer_id: int, child_chunk_coords: Sequence[Sequence[int]], time_stamp: Optional[datetime.datetime] = None, verbose: bool = True, n_threads: int = 20) -> None: - """ Creates the abstract nodes for a given chunk in a given layer - - :param layer_id: int - :param child_chunk_coords: int array of length 3 - coords in chunk space - :param time_stamp: datetime - :param verbose: bool - :param n_threads: int - """ - def _read_subchunks_thread(chunk_coord): - # Get start and end key - x, y, z = chunk_coord - - columns = [column_keys.Hierarchy.Child] + \ - [column_keys.Connectivity.CrossChunkEdge[l] - for l in range(layer_id - 1, self.n_layers)] - range_read = self.range_read_chunk(layer_id - 1, x, y, z, - columns=columns) - - # Due to restarted jobs some nodes in the layer below might be - # duplicated. We want to ignore the earlier created node(s) because - # they belong to the failed job. We can find these duplicates only - # by comparing their children because each node has a unique id. - # However, we can use that more recently created nodes have higher - # segment ids (not true on root layer but we do not have that here. - # We are only interested in the latest version of any duplicated - # parents. - - # Deserialize row keys and store child with highest id for - # comparison - row_cell_dict = {} - segment_ids = [] - row_ids = [] - max_child_ids = [] - for row_id, row_data in range_read.items(): - segment_id = self.get_segment_id(row_id) - - cross_edge_columns = {k: v for (k, v) in row_data.items() - if k.family_id == self.cross_edge_family_id} - if cross_edge_columns: - row_cell_dict[row_id] = cross_edge_columns - - node_child_ids = row_data[column_keys.Hierarchy.Child][0].value - - max_child_ids.append(np.max(node_child_ids)) - segment_ids.append(segment_id) - row_ids.append(row_id) - - segment_ids = np.array(segment_ids, dtype=np.uint64) - row_ids = np.array(row_ids) - max_child_ids = np.array(max_child_ids, dtype=np.uint64) - - sorting = np.argsort(segment_ids)[::-1] - row_ids = row_ids[sorting] - max_child_ids = max_child_ids[sorting] - - counter = collections.defaultdict(int) - max_child_ids_occ_so_far = np.zeros(len(max_child_ids), - dtype=np.int) - for i_row in range(len(max_child_ids)): - max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] - counter[max_child_ids[i_row]] += 1 - - # Filter last occurences (we inverted the list) of each node - m = max_child_ids_occ_so_far == 0 - row_ids = row_ids[m] - ll_node_ids.extend(row_ids) - - # Loop through nodes from this chunk - for row_id in row_ids: - if row_id in row_cell_dict: - cross_edge_dict[row_id] = {} - - cell_family = row_cell_dict[row_id] - - for l in range(layer_id - 1, self.n_layers): - row_key = column_keys.Connectivity.CrossChunkEdge[l] - if row_key in cell_family: - cross_edge_dict[row_id][l] = cell_family[row_key][0].value - - if int(layer_id - 1) in cross_edge_dict[row_id]: - atomic_cross_edges = cross_edge_dict[row_id][layer_id - 1] - - if len(atomic_cross_edges) > 0: - atomic_partner_id_dict[row_id] = \ - atomic_cross_edges[:, 1] - - new_pairs = zip(atomic_cross_edges[:, 0], - [row_id] * len(atomic_cross_edges)) - atomic_child_id_dict_pairs.extend(new_pairs) - - def _resolve_cross_chunk_edges_thread(args) -> None: - start, end = args - - for i_child_key, child_key in\ - enumerate(atomic_partner_id_dict_keys[start: end]): - this_atomic_partner_ids = atomic_partner_id_dict[child_key] - - partners = {atomic_child_id_dict[atomic_cross_id] - for atomic_cross_id in this_atomic_partner_ids - if atomic_child_id_dict[atomic_cross_id] != 0} - - if len(partners) > 0: - partners = np.array(list(partners), dtype=np.uint64)[:, None] - - this_ids = np.array([child_key] * len(partners), - dtype=np.uint64)[:, None] - these_edges = np.concatenate([this_ids, partners], axis=1) - - edge_ids.extend(these_edges) - - def _write_out_connected_components(args) -> None: - start, end = args - - # Collect cc info - parent_layer_ids = range(layer_id, self.n_layers + 1) - cc_connections = {l: [] for l in parent_layer_ids} - for i_cc, cc in enumerate(ccs[start: end]): - node_ids = unique_graph_ids[cc] - - parent_cross_edges = collections.defaultdict(list) - - # Collect row info for nodes that are in this chunk - for node_id in node_ids: - if node_id in cross_edge_dict: - # Extract edges relevant to this node - for l in range(layer_id, self.n_layers): - if l in cross_edge_dict[node_id] and \ - len(cross_edge_dict[node_id][l]) > 0: - parent_cross_edges[l].append(cross_edge_dict[node_id][l]) - - if self.use_skip_connections and len(node_ids) == 1: - for l in parent_layer_ids: - if l == self.n_layers or len(parent_cross_edges[l]) > 0: - cc_connections[l].append([node_ids, - parent_cross_edges]) - break - else: - cc_connections[layer_id].append([node_ids, - parent_cross_edges]) - - # Write out cc info - rows = [] - - # Iterate through layers - for parent_layer_id in parent_layer_ids: - if len(cc_connections[parent_layer_id]) == 0: - continue - - parent_chunk_id = parent_chunk_id_dict[parent_layer_id] - reserved_parent_ids = self.get_unique_node_id_range( - parent_chunk_id, step=len(cc_connections[parent_layer_id])) - - for i_cc, cc_info in enumerate(cc_connections[parent_layer_id]): - node_ids, parent_cross_edges = cc_info - - parent_id = reserved_parent_ids[i_cc] - val_dict = {column_keys.Hierarchy.Parent: parent_id} - - for node_id in node_ids: - rows.append(self.mutate_row( - serializers.serialize_uint64(node_id), - val_dict, time_stamp=time_stamp)) - - val_dict = {column_keys.Hierarchy.Child: node_ids} - for l in range(parent_layer_id, self.n_layers): - if l in parent_cross_edges and len(parent_cross_edges[l]) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[l]] = \ - np.concatenate(parent_cross_edges[l]) - - rows.append( - self.mutate_row(serializers.serialize_uint64(parent_id), - val_dict, time_stamp=time_stamp)) - - if len(rows) > 100000: - self.bulk_write(rows) - rows = [] - - if len(rows) > 0: - self.bulk_write(rows) - - time_stamp = get_valid_timestamp(time_stamp) - # 1 -------------------------------------------------------------------- - # The first part is concerned with reading data from the child nodes - # of this layer and pre-processing it for the second part - - time_start = time.time() - - atomic_partner_id_dict = {} - cross_edge_dict = {} - atomic_child_id_dict_pairs = [] - ll_node_ids = [] - - multi_args = child_chunk_coords - n_jobs = np.min([n_threads, len(multi_args)]) - - if n_jobs > 0: - mu.multithread_func(_read_subchunks_thread, multi_args, - n_threads=n_jobs) - - d = dict(atomic_child_id_dict_pairs) - atomic_child_id_dict = collections.defaultdict(np.uint64, d) - ll_node_ids = np.array(ll_node_ids, dtype=np.uint64) - - if verbose: - self.logger.debug("Time iterating through subchunks: %.3fs" % - (time.time() - time_start)) - time_start = time.time() - - # Extract edges from remaining cross chunk edges - # and maintain unused cross chunk edges - edge_ids = [] - # u_atomic_child_ids = np.unique(atomic_child_ids) - atomic_partner_id_dict_keys = \ - np.array(list(atomic_partner_id_dict.keys()), dtype=np.uint64) - - if n_threads > 1: - n_jobs = n_threads * 3 # Heuristic - else: - n_jobs = 1 - - n_jobs = np.min([n_jobs, len(atomic_partner_id_dict_keys)]) - - if n_jobs > 0: - spacing = np.linspace(0, len(atomic_partner_id_dict_keys), - n_jobs+1).astype(np.int) - starts = spacing[:-1] - ends = spacing[1:] - - multi_args = list(zip(starts, ends)) - - mu.multithread_func(_resolve_cross_chunk_edges_thread, multi_args, - n_threads=n_threads) - - if verbose: - self.logger.debug("Time resolving cross chunk edges: %.3fs" % - (time.time() - time_start)) - time_start = time.time() - - # 2 -------------------------------------------------------------------- - # The second part finds connected components, writes the parents to - # BigTable and updates the childs - - # Make parent id creation easier - x, y, z = np.min(child_chunk_coords, axis=0) // self.fan_out - chunk_id = self.get_chunk_id(layer=layer_id, x=x, y=y, z=z) - - parent_chunk_id_dict = self.get_parent_chunk_id_dict(chunk_id) - - # Extract connected components - isolated_node_mask = ~np.in1d(ll_node_ids, np.unique(edge_ids)) - add_node_ids = ll_node_ids[isolated_node_mask].squeeze() - add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T - edge_ids.extend(add_edge_ids) - - graph, _, _, unique_graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True) - - ccs = flatgraph_utils.connected_components(graph) - - if verbose: - self.logger.debug("Time connected components: %.3fs" % - (time.time() - time_start)) - time_start = time.time() - - # Add rows for nodes that are in this chunk - # a connected component at a time - if n_threads > 1: - n_jobs = n_threads * 3 # Heuristic - else: - n_jobs = 1 - - n_jobs = np.min([n_jobs, len(ccs)]) - - spacing = np.linspace(0, len(ccs), n_jobs+1).astype(np.int) - starts = spacing[:-1] - ends = spacing[1:] - - multi_args = list(zip(starts, ends)) - - mu.multithread_func(_write_out_connected_components, multi_args, - n_threads=n_threads) - - if verbose: - self.logger.debug("Time writing %d connected components in layer %d: %.3fs" % - (len(ccs), layer_id, time.time() - time_start)) + raise NotImplementedError() - # to track worker completion - return str(layer_id) def get_atomic_cross_edge_dict(self, node_id: np.uint64, layer_ids: Sequence[int] = None): diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index d5d1da984..c27d74aac 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -7,7 +7,6 @@ from typing import Optional, Sequence import numpy as np -from multiwrapper import multiprocessing_utils as mu from pychunkedgraph.backend import flatgraph_utils from pychunkedgraph.backend.chunkedgraph_utils import get_valid_timestamp diff --git a/pychunkedgraph/ingest/ran_ingestion.py b/pychunkedgraph/ingest/ran_ingestion.py deleted file mode 100644 index 7a742d1bf..000000000 --- a/pychunkedgraph/ingest/ran_ingestion.py +++ /dev/null @@ -1,556 +0,0 @@ -import collections -import time -import random - -import pandas as pd -import cloudvolume -import networkx as nx -import numpy as np -import numpy.lib.recfunctions as rfn -import zstandard as zstd -from multiwrapper import multiprocessing_utils as mu - -from pychunkedgraph.ingest import ingestionmanager, ingestion_utils as iu - - -def ingest_into_chunkedgraph(storage_path, ws_cv_path, cg_table_id, - chunk_size=[256, 256, 512], - use_skip_connections=True, - s_bits_atomic_layer=None, - fan_out=2, aff_dtype=np.float32, - size=None, - instance_id=None, project_id=None, - start_layer=1, n_threads=[64, 64]): - """ Creates a chunkedgraph from a Ran Agglomerattion - - :param storage_path: str - Google cloud bucket path (agglomeration) - example: gs://ranl-scratch/minnie_test_2 - :param ws_cv_path: str - Google cloud bucket path (watershed segmentation) - example: gs://microns-seunglab/minnie_v0/minnie10/ws_minnie_test_2/agg - :param cg_table_id: str - chunkedgraph table name - :param fan_out: int - fan out of chunked graph (2 == Octree) - :param aff_dtype: np.dtype - affinity datatype (np.float32 or np.float64) - :param instance_id: str - Google instance id - :param project_id: str - Google project id - :param start_layer: int - :param n_threads: list of ints - number of threads to use - :return: - """ - storage_path = storage_path.strip("/") - ws_cv_path = ws_cv_path.strip("/") - - cg_mesh_dir = f"{cg_table_id}_meshes" - chunk_size = np.array(chunk_size, dtype=np.uint64) - - _, n_layers_agg = iu.initialize_chunkedgraph(cg_table_id=cg_table_id, - ws_cv_path=ws_cv_path, - chunk_size=chunk_size, - size=size, - use_skip_connections=use_skip_connections, - s_bits_atomic_layer=s_bits_atomic_layer, - cg_mesh_dir=cg_mesh_dir, - fan_out=fan_out, - instance_id=instance_id, - project_id=project_id) - - im = ingestionmanager.IngestionManager(storage_path=storage_path, - cg_table_id=cg_table_id, - n_layers=n_layers_agg, - instance_id=instance_id, - project_id=project_id) - - # #TODO: Remove later: - # logging.basicConfig(level=logging.DEBUG) - # im.cg.logger = logging.getLogger(__name__) - # ------------------------------------------ - if start_layer < 3: - create_atomic_chunks(im, aff_dtype=aff_dtype, n_threads=n_threads[0]) - - create_abstract_layers(im, n_threads=n_threads[1], start_layer=start_layer) - - return im - - -def create_abstract_layers(im, start_layer=3, n_threads=1): - """ Creates abstract of chunkedgraph (> 2) - - :param im: IngestionManager - :param n_threads: int - number of threads to use - :return: - """ - if start_layer < 3: - start_layer = 3 - - assert start_layer < int(im.cg.n_layers + 1) - - for layer_id in range(start_layer, int(im.cg.n_layers + 1)): - create_layer(im, layer_id, n_threads=n_threads) - - -def create_layer(im, layer_id, block_size=100, n_threads=1): - """ Creates abstract layer of chunkedgraph - - Abstract layers have to be build in sequence. Abstract layers are all layers - above the first layer (1). `create_atomic_chunks` creates layer 2 as well. - Hence, this function is responsible for every creating layers > 2. - - :param im: IngestionManager - :param layer_id: int - > 2 - :param n_threads: int - number of threads to use - :return: - """ - assert layer_id > 2 - - child_chunk_coords = im.chunk_coords // im.cg.fan_out ** (layer_id - 3) - child_chunk_coords = child_chunk_coords.astype(np.int) - child_chunk_coords = np.unique(child_chunk_coords, axis=0) - - parent_chunk_coords = child_chunk_coords // im.cg.fan_out - parent_chunk_coords = parent_chunk_coords.astype(np.int) - parent_chunk_coords, inds = np.unique(parent_chunk_coords, axis=0, - return_inverse=True) - - im_info = im.get_serialized_info() - multi_args = [] - - # Randomize chunks - order = np.arange(len(parent_chunk_coords), dtype=np.int) - np.random.shuffle(order) - - # Block chunks - block_size = min(block_size, int(np.ceil(len(order) / n_threads / 3))) - n_blocks = int(len(order) / block_size) - blocks = np.array_split(order, n_blocks) - - for i_block, block in enumerate(blocks): - chunks = [] - for idx in block: - chunks.append(child_chunk_coords[inds == idx]) - - multi_args.append([im_info, layer_id, len(order), n_blocks, i_block, - chunks]) - - if n_threads == 1: - mu.multiprocess_func( - _create_layers, multi_args, n_threads=n_threads, - verbose=True, debug=n_threads == 1) - else: - mu.multisubprocess_func(_create_layers, multi_args, n_threads=n_threads, - suffix=f"{layer_id}") - - -def _create_layers(args): - """ Multiprocessing helper for create_layer """ - im_info, layer_id, n_chunks, n_blocks, i_block, chunks = args - im = ingestionmanager.IngestionManager(**im_info) - - for i_chunk, child_chunk_coords in enumerate(chunks): - time_start = time.time() - - im.cg.add_layer(layer_id, child_chunk_coords, n_threads=8, verbose=True) - - print(f"Layer {layer_id} - Job {i_block + 1} / {n_blocks} - " - f"{i_chunk + 1} / {len(chunks)} -- %.3fs" % - (time.time() - time_start)) - - -def create_atomic_chunks(im, aff_dtype=np.float32, n_threads=1, block_size=100): - """ Creates all atomic chunks - - :param im: IngestionManager - :param aff_dtype: np.dtype - affinity datatype (np.float32 or np.float64) - :param n_threads: int - number of threads to use - :return: - """ - - im_info = im.get_serialized_info() - - multi_args = [] - - # Randomize chunk order - chunk_coords = list(im.chunk_coord_gen) - order = np.arange(len(chunk_coords), dtype=np.int) - np.random.shuffle(order) - - # Block chunks - block_size = min(block_size, int(np.ceil(len(chunk_coords) / n_threads / 3))) - n_blocks = int(len(chunk_coords) / block_size) - blocks = np.array_split(order, n_blocks) - - for i_block, block in enumerate(blocks): - chunks = [] - for b_idx in block: - chunks.append(chunk_coords[b_idx]) - - multi_args.append([im_info, aff_dtype, n_blocks, i_block, chunks]) - - if n_threads == 1: - mu.multiprocess_func( - _create_atomic_chunk, multi_args, n_threads=n_threads, - verbose=True, debug=n_threads == 1) - else: - mu.multisubprocess_func( - _create_atomic_chunk, multi_args, n_threads=n_threads) - - -def _create_atomic_chunk(args): - """ Multiprocessing helper for create_atomic_chunks """ - im_info, aff_dtype, n_blocks, i_block, chunks = args - im = ingestionmanager.IngestionManager(**im_info) - - for i_chunk, chunk_coord in enumerate(chunks): - time_start = time.time() - - create_atomic_chunk(im, chunk_coord, aff_dtype=aff_dtype, verbose=True) - - print(f"Layer 1 - {chunk_coord} - Job {i_block + 1} / {n_blocks} - " - f"{i_chunk + 1} / {len(chunks)} -- %.3fs" % - (time.time() - time_start)) - - -def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True): - """ Creates single atomic chunk - - :param im: IngestionManager - :param chunk_coord: np.ndarray - array of three ints - :param aff_dtype: np.dtype - np.float64 or np.float32 - :param verbose: bool - :return: - """ - chunk_coord = np.array(list(chunk_coord), dtype=np.int) - - edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype) - mapping = collect_agglomeration_data(im, chunk_coord) - active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping) - - edge_ids = {} - edge_affs = {} - edge_areas = {} - - for k in edge_dict.keys(): - if k == "cross": - edge_ids[k] = np.concatenate([edge_dict[k]["sv1"][:, None], - edge_dict[k]["sv2"][:, None]], - axis=1) - continue - - sv1_conn = edge_dict[k]["sv1"][active_edge_dict[k]] - sv2_conn = edge_dict[k]["sv2"][active_edge_dict[k]] - aff_conn = edge_dict[k]["aff"][active_edge_dict[k]] - area_conn = edge_dict[k]["area"][active_edge_dict[k]] - edge_ids[f"{k}_connected"] = np.concatenate([sv1_conn[:, None], - sv2_conn[:, None]], - axis=1) - edge_affs[f"{k}_connected"] = aff_conn.astype(np.float32) - edge_areas[f"{k}_connected"] = area_conn - - sv1_disconn = edge_dict[k]["sv1"][~active_edge_dict[k]] - sv2_disconn = edge_dict[k]["sv2"][~active_edge_dict[k]] - aff_disconn = edge_dict[k]["aff"][~active_edge_dict[k]] - area_disconn = edge_dict[k]["area"][~active_edge_dict[k]] - edge_ids[f"{k}_disconnected"] = np.concatenate([sv1_disconn[:, None], - sv2_disconn[:, None]], - axis=1) - edge_affs[f"{k}_disconnected"] = aff_disconn.astype(np.float32) - edge_areas[f"{k}_disconnected"] = area_disconn - - im.cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas, - isolated_node_ids=isolated_ids) - - return edge_ids, edge_affs, edge_areas - - -def _get_cont_chunk_coords(im, chunk_coord_a, chunk_coord_b): - """ Computes chunk coordinates that compute data between the named chunks - - :param im: IngestionManagaer - :param chunk_coord_a: np.ndarray - array of three ints - :param chunk_coord_b: np.ndarray - array of three ints - :return: np.ndarray - """ - - diff = chunk_coord_a - chunk_coord_b - - dir_dim = np.where(diff != 0)[0] - assert len(dir_dim) == 1 - dir_dim = dir_dim[0] - - if diff[dir_dim] > 0: - chunk_coord_l = chunk_coord_a - else: - chunk_coord_l = chunk_coord_b - - c_chunk_coords = [] - for dx in [-1, 0]: - for dy in [-1, 0]: - for dz in [-1, 0]: - if dz == dy == dx == 0: - continue - - c_chunk_coord = chunk_coord_l + np.array([dx, dy, dz]) - - if [dx, dy, dz][dir_dim] == 0: - continue - - if im.is_out_of_bounce(c_chunk_coord): - continue - - c_chunk_coords.append(c_chunk_coord) - - return c_chunk_coords - - -def collect_edge_data(im, chunk_coord, aff_dtype=np.float32): - """ Loads edge for single chunk - - :param im: IngestionManager - :param chunk_coord: np.ndarray - array of three ints - :param aff_dtype: np.dtype - :return: dict of np.ndarrays - """ - subfolder = "chunked_rg" - - base_path = f"{im.storage_path}/{subfolder}/" - - chunk_coord = np.array(chunk_coord) - - chunk_id = im.cg.get_chunk_id(layer=1, x=chunk_coord[0], y=chunk_coord[1], - z=chunk_coord[2]) - - filenames = collections.defaultdict(list) - swap = collections.defaultdict(list) - for x in [chunk_coord[0] - 1, chunk_coord[0]]: - for y in [chunk_coord[1] - 1, chunk_coord[1]]: - for z in [chunk_coord[2] - 1, chunk_coord[2]]: - - if im.is_out_of_bounce(np.array([x, y, z])): - continue - - # EDGES WITHIN CHUNKS - filename = f"in_chunk_0_{x}_{y}_{z}_{chunk_id}.data" - filenames["in"].append(filename) - - for d in [-1, 1]: - for dim in range(3): - diff = np.zeros([3], dtype=np.int) - diff[dim] = d - - adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = im.cg.get_chunk_id(layer=1, - x=adjacent_chunk_coord[0], - y=adjacent_chunk_coord[1], - z=adjacent_chunk_coord[2]) - - if im.is_out_of_bounce(adjacent_chunk_coord): - continue - - c_chunk_coords = _get_cont_chunk_coords(im, chunk_coord, - adjacent_chunk_coord) - - larger_id = np.max([chunk_id, adjacent_chunk_id]) - smaller_id = np.min([chunk_id, adjacent_chunk_id]) - chunk_id_string = f"{smaller_id}_{larger_id}" - - for c_chunk_coord in c_chunk_coords: - x, y, z = c_chunk_coord - - # EDGES BETWEEN CHUNKS - filename = f"between_chunks_0_{x}_{y}_{z}_{chunk_id_string}.data" - filenames["between"].append(filename) - - swap[filename] = larger_id == chunk_id - - # EDGES FROM CUTS OF SVS - filename = f"fake_0_{x}_{y}_{z}_{chunk_id_string}.data" - filenames["cross"].append(filename) - - swap[filename] = larger_id == chunk_id - - edge_data = {} - read_counter = collections.Counter() - - dtype = [("sv1", np.uint64), ("sv2", np.uint64), - ("aff", aff_dtype), ("area", np.uint64)] - for k in filenames: - # print(k, len(filenames[k])) - - with cloudvolume.Storage(base_path, n_threads=10) as stor: - files = stor.get_files(filenames[k]) - - data = [] - for file in files: - if file["content"] is None: - # print(f"{file['filename']} not created or empty") - continue - - if file["error"] is not None: - # print(f"error reading {file['filename']}") - continue - - if swap[file["filename"]]: - this_dtype = [dtype[1], dtype[0], dtype[2], dtype[3]] - content = np.frombuffer(file["content"], dtype=this_dtype) - else: - content = np.frombuffer(file["content"], dtype=dtype) - - data.append(content) - - read_counter[k] += 1 - - try: - edge_data[k] = rfn.stack_arrays(data, usemask=False) - except: - raise() - - edge_data_df = pd.DataFrame(edge_data[k]) - edge_data_dfg = edge_data_df.groupby(["sv1", "sv2"]).aggregate(np.sum).reset_index() - edge_data[k] = edge_data_dfg.to_records() - - # # TEST - # with cloudvolume.Storage(base_path, n_threads=10) as stor: - # files = list(stor.list_files()) - # - # true_counter = collections.Counter() - # for file in files: - # if str(chunk_id) in file: - # true_counter[file.split("_")[0]] += 1 - # - # print("Truth", true_counter) - # print("Reality", read_counter) - - return edge_data - - -def _read_agg_files(filenames, base_path): - with cloudvolume.Storage(base_path, n_threads=10) as stor: - files = stor.get_files(filenames) - - edge_list = [] - for file in files: - if file["content"] is None: - continue - - if file["error"] is not None: - continue - - content = zstd.ZstdDecompressor().decompressobj().decompress(file["content"]) - edge_list.append(np.frombuffer(content, dtype=np.uint64).reshape(-1, 2)) - - return edge_list - - -def collect_agglomeration_data(im, chunk_coord): - """ Collects agglomeration information & builds connected component mapping - - :param im: IngestionManager - :param chunk_coord: np.ndarray - array of three ints - :return: dictionary - """ - subfolder = "remap" - base_path = f"{im.storage_path}/{subfolder}/" - - chunk_coord = np.array(chunk_coord) - - chunk_id = im.cg.get_chunk_id(layer=1, x=chunk_coord[0], y=chunk_coord[1], - z=chunk_coord[2]) - - filenames = [] - for mip_level in range(0, int(im.n_layers - 1)): - x, y, z = np.array(chunk_coord / 2 ** mip_level, dtype=np.int) - filenames.append(f"done_{mip_level}_{x}_{y}_{z}_{chunk_id}.data.zst") - - for d in [-1, 1]: - for dim in range(3): - diff = np.zeros([3], dtype=np.int) - diff[dim] = d - - adjacent_chunk_coord = chunk_coord + diff - - adjacent_chunk_id = im.cg.get_chunk_id(layer=1, - x=adjacent_chunk_coord[0], - y=adjacent_chunk_coord[1], - z=adjacent_chunk_coord[2]) - - for mip_level in range(0, int(im.n_layers - 1)): - x, y, z = np.array(adjacent_chunk_coord / 2 ** mip_level, dtype=np.int) - filenames.append(f"done_{mip_level}_{x}_{y}_{z}_{adjacent_chunk_id}.data.zst") - - # print(filenames) - edge_list = _read_agg_files(filenames, base_path) - - edges = np.concatenate(edge_list) - - G = nx.Graph() - G.add_edges_from(edges) - ccs = nx.connected_components(G) - - mapping = {} - for i_cc, cc in enumerate(ccs): - cc = list(cc) - mapping.update(dict(zip(cc, [i_cc] * len(cc)))) - - return mapping - - -def define_active_edges(edge_dict, mapping): - """ Labels edges as within or across segments and extracts isolated ids - - :param edge_dict: dict of np.ndarrays - :param mapping: dict - :return: dict of np.ndarrays, np.ndarray - bool arrays; True: connected (within same segment) - isolated node ids - """ - def _mapping_default(key): - if key in mapping: - return mapping[key] - else: - return -1 - - mapping_vec = np.vectorize(_mapping_default) - - active = {} - isolated = [[]] - for k in edge_dict: - if len(edge_dict[k]["sv1"]) > 0: - agg_id_1 = mapping_vec(edge_dict[k]["sv1"]) - else: - assert len(edge_dict[k]["sv2"]) == 0 - active[k] = np.array([], dtype=np.bool) - continue - - agg_id_2 = mapping_vec(edge_dict[k]["sv2"]) - - active[k] = agg_id_1 == agg_id_2 - - # Set those with two -1 to False - agg_1_m = agg_id_1 == -1 - agg_2_m = agg_id_2 == -1 - active[k][agg_1_m] = False - - isolated.append(edge_dict[k]["sv1"][agg_1_m]) - - if k == "in": - isolated.append(edge_dict[k]["sv2"][agg_2_m]) - - return active, np.unique(np.concatenate(isolated).astype(np.uint64)) - diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 601d453fd..1e20948cc 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -2,11 +2,8 @@ Module for ingesting in chunkedgraph format with edges stored outside bigtable """ -import os import collections -import time -import click import pandas as pd import cloudvolume import networkx as nx @@ -14,8 +11,6 @@ import numpy.lib.recfunctions as rfn import zstandard as zstd from flask import current_app -from rq import Queue -from redis import Redis from ..utils.redis import redis_job, REDIS_URL from . import ingestionmanager, ingestion_utils as iu @@ -138,15 +133,12 @@ def create_atomic_chunk(imanager, coord): coord = np.array(list(coord), dtype=np.int) edge_dict = collect_edge_data(imanager, coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) - mapping = collect_agglomeration_data(imanager, coord) - active_edge_d, isolated_ids = define_active_edges(edge_dict, mapping) # flag to check if chunk has edges # avoid writing to cloud storage if there are no edges # unnecessary write operation no_edges = True chunk_edges_all = {} - chunk_edges_active = {} for edge_type in EDGE_TYPES: sv_ids1 = edge_dict[edge_type]["sv1"] sv_ids2 = edge_dict[edge_type]["sv2"] @@ -159,24 +151,39 @@ def create_atomic_chunk(imanager, coord): chunk_edges_all[edge_type] = Edges( sv_ids1, sv_ids2, affinities=affinities, areas=areas ) - active = active_edge_d[edge_type] - sv_ids1 = sv_ids1[active] - sv_ids2 = sv_ids2[active] - affinities = affinities[active] - areas = areas[active] - chunk_edges_active[edge_type] = Edges( - sv_ids1, sv_ids2, affinities=affinities, areas=areas - ) no_edges = no_edges and not sv_ids1.size # if not no_edges: # put_chunk_edges(cg.edge_dir, coord, chunk_edges_all, ZSTD_COMPRESSION_LEVEL) + chunk_edges_active, isolated_ids = _get_active_edges( + imanager, coord, edge_dict, chunk_edges_all + ) add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) # to track workers completion, layer = 2 return str(2) +def _get_active_edges(imanager, coord, raw_edges_d, processed_edges_d): + + mapping = collect_agglomeration_data(imanager, coord) + active_edges_flag_d, isolated_ids = define_active_edges(raw_edges_d, mapping) + + chunk_edges_active = {} + for edge_type in EDGE_TYPES: + edges = processed_edges_d[edge_type] + active = active_edges_flag_d[edge_type] + + sv_ids1 = edges.sv_ids1[active] + sv_ids2 = edges.sv_ids2[active] + affinities = edges.affinities[active] + areas = edges.areas[active] + chunk_edges_active[edge_type] = Edges( + sv_ids1, sv_ids2, affinities=affinities, areas=areas + ) + return chunk_edges_active, isolated_ids + + def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): """ Computes chunk coordinates that compute data between the named chunks @@ -326,7 +333,7 @@ def collect_edge_data(imanager, chunk_coord): try: edge_data[k] = rfn.stack_arrays(data, usemask=False) except: - raise () + raise ValueError() edge_data_df = pd.DataFrame(edge_data[k]) edge_data_dfg = ( From 0d30d41dc86c30d66d824b4df739e30be2bb2577 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 15:19:59 +0000 Subject: [PATCH 0198/1097] more refactor --- pychunkedgraph/ingest/ran_ingestion_v2.py | 32 +++++++++-------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 1e20948cc..6121be6f6 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -3,6 +3,7 @@ """ import collections +import itertools import pandas as pd import cloudvolume @@ -201,28 +202,19 @@ def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): assert len(dir_dim) == 1 dir_dim = dir_dim[0] - if diff[dir_dim] > 0: - chunk_coord_l = chunk_coord_a - else: - chunk_coord_l = chunk_coord_b - + chunk_coord_l = chunk_coord_a if diff[dir_dim] > 0 else chunk_coord_b c_chunk_coords = [] - for dx in [-1, 0]: - for dy in [-1, 0]: - for dz in [-1, 0]: - if dz == dy == dx == 0: - continue - - c_chunk_coord = chunk_coord_l + np.array([dx, dy, dz]) - - if [dx, dy, dz][dir_dim] == 0: - continue - - if imanager.is_out_of_bounce(c_chunk_coord): - continue - - c_chunk_coords.append(c_chunk_coord) + for dx, dy, dz in itertools.product([0, -1], [0, -1], [0, -1]): + if dz == dy == dx == 0: + continue + if [dx, dy, dz][dir_dim] == 0: + continue + c_chunk_coord = chunk_coord_l + np.array([dx, dy, dz]) + if imanager.is_out_of_bounce(c_chunk_coord): + continue + c_chunk_coords.append(c_chunk_coord) + return c_chunk_coords From c67e4a5a3ecca10fe8a775d1d0cfa4eceff37bb5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 15:28:53 +0000 Subject: [PATCH 0199/1097] more refactor --- pychunkedgraph/ingest/ingestionmanager.py | 81 +++++++++++++++-------- pychunkedgraph/ingest/ran_ingestion_v2.py | 15 +++-- 2 files changed, 64 insertions(+), 32 deletions(-) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 8b5eb2e63..80814e883 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -5,8 +5,16 @@ class IngestionManager(object): - def __init__(self, storage_path, cg_table_id=None, n_layers=None, - instance_id=None, project_id=None, data_version=2): + def __init__( + self, + storage_path, + cg_table_id=None, + n_layers=None, + instance_id=None, + project_id=None, + data_version=2, + create_edges=True, + ): self._storage_path = storage_path self._cg_table_id = cg_table_id self._instance_id = instance_id @@ -14,6 +22,7 @@ def __init__(self, storage_path, cg_table_id=None, n_layers=None, self._cg = None self._n_layers = n_layers self._data_version = data_version + self._create_edges = create_edges @property def storage_path(self): @@ -27,18 +36,34 @@ def data_version(self): @property def edge_dtype(self): if self.data_version == 4: - dtype = [("sv1", np.uint64), ("sv2", np.uint64), - ("aff_x", np.float32), ("area_x", np.uint64), - ("aff_y", np.float32), ("area_y", np.uint64), - ("aff_z", np.float32), ("area_z", np.uint64)] + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff_x", np.float32), + ("area_x", np.uint64), + ("aff_y", np.float32), + ("area_y", np.uint64), + ("aff_z", np.float32), + ("area_z", np.uint64), + ] elif self.data_version == 3: - dtype = [("sv1", np.uint64), ("sv2", np.uint64), - ("aff_x", np.float64), ("area_x", np.uint64), - ("aff_y", np.float64), ("area_y", np.uint64), - ("aff_z", np.float64), ("area_z", np.uint64)] + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff_x", np.float64), + ("area_x", np.uint64), + ("aff_y", np.float64), + ("area_y", np.uint64), + ("aff_z", np.float64), + ("area_z", np.uint64), + ] elif self.data_version == 2: - dtype = [("sv1", np.uint64), ("sv2", np.uint64), - ("aff", np.float32), ("area", np.uint64)] + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff", np.float32), + ("area", np.uint64), + ] else: raise Exception() @@ -55,8 +80,7 @@ def cg(self): if self._project_id is not None: kwargs["project_id"] = self._project_id - self._cg = chunkedgraph.ChunkedGraph(table_id=self._cg_table_id, - **kwargs) + self._cg = chunkedgraph.ChunkedGraph(table_id=self._cg_table_id, **kwargs) return self._cg @@ -64,7 +88,6 @@ def cg(self): def bounds(self): bounds = self.cg.vx_vol_bounds.copy() bounds -= self.cg.vx_vol_bounds[:, 0:1] - return bounds @property @@ -85,22 +108,24 @@ def n_layers(self): self._n_layers = self.cg.n_layers return self._n_layers + @property + def create_edges(self): + return self._create_edges + def get_serialized_info(self): - info = {"storage_path": self.storage_path, - "cg_table_id": self._cg_table_id, - "n_layers": self.n_layers, - "instance_id": self._instance_id, - "project_id": self._project_id, - "data_version": self.data_version} + info = { + "storage_path": self.storage_path, + "cg_table_id": self._cg_table_id, + "n_layers": self.n_layers, + "instance_id": self._instance_id, + "project_id": self._project_id, + "data_version": self.data_version, + } return info def is_out_of_bounce(self, chunk_coordinate): - if np.any(chunk_coordinate < 0): - return True - - if np.any(chunk_coordinate > 2**self.cg.bitmasks[1]): - return True - - return False + return np.any(chunk_coordinate < 0) or np.any( + chunk_coordinate > 2 ** self.cg.bitmasks[1] + ) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 6121be6f6..e26a1dc5f 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -21,7 +21,7 @@ from ..io.edges import put_chunk_edges -ZSTD_COMPRESSION_LEVEL = 17 +ZSTD_LEVEL = 17 INGEST_CHANNEL = "ingest" @@ -39,7 +39,13 @@ def ingest_into_chunkedgraph( edge_dir=None, n_chunks=None, is_new=True, + create_edges=True, ): + """ + :param create_edges: + Set this to false if the edges have already been processed. + This is needed because processing edges and bulding chunkedgraph is completely de-coupled. + """ storage_path = storage_path.strip("/") ws_cv_path = ws_cv_path.strip("/") @@ -68,6 +74,7 @@ def ingest_into_chunkedgraph( instance_id=instance_id, project_id=project_id, data_version=4, + create_edges=create_edges ) if layer < 3: @@ -154,8 +161,8 @@ def create_atomic_chunk(imanager, coord): ) no_edges = no_edges and not sv_ids1.size - # if not no_edges: - # put_chunk_edges(cg.edge_dir, coord, chunk_edges_all, ZSTD_COMPRESSION_LEVEL) + if imanager.create_edges and not no_edges: + put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges_all, ZSTD_LEVEL) chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, edge_dict, chunk_edges_all ) @@ -214,7 +221,7 @@ def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): if imanager.is_out_of_bounce(c_chunk_coord): continue c_chunk_coords.append(c_chunk_coord) - + return c_chunk_coords From 32315c5377ab4f37c7ac85e1ce946041df5f0481 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 17:52:38 +0000 Subject: [PATCH 0200/1097] make dev setup easy --- .devcontainer/Dockerfile | 59 +++++++++++++++++++++++++++++++++ .devcontainer/devcontainer.json | 7 ++-- .gitignore | 3 +- 3 files changed, 65 insertions(+), 4 deletions(-) create mode 100644 .devcontainer/Dockerfile diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 000000000..7095bb908 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,59 @@ +FROM tiangolo/uwsgi-nginx-flask:python3.6 + + +COPY override/timeout.conf /etc/nginx/conf.d/timeout.conf +COPY override/supervisord.conf /etc/supervisor/conf.d/supervisord.conf +COPY requirements.txt /app +RUN apt-get update \ + && apt-get install -y \ + # Boost and g++ for compiling DracoPy and graph_tool + build-essential \ + libboost-dev \ + # Required for adding graph-tools and cloud-sdk to the apt source list + lsb-release \ + curl \ + apt-transport-https \ + # GOOGLE-CLOUD-SDK + && pip install --no-cache-dir --upgrade crcmod \ + && echo "deb https://packages.cloud.google.com/apt cloud-sdk-$(lsb_release -c -s) main" > /etc/apt/sources.list.d/google-cloud-sdk.list \ + && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ + && apt-get update \ + && apt-get install -y google-cloud-sdk google-cloud-sdk-bigtable-emulator \ + # PYTHON-GRAPH-TOOL + # WARNING: This is ugly, graph-tools will use Debian's Python version and install as dist-util, + # but all our packages use the site-util Python version - we just create a sym_link, + # because it _seems_ to work and saves 80 minutes compilation time ... + && echo "deb http://downloads.skewed.de/apt/$(lsb_release -s -c) $(lsb_release -s -c) main" > /etc/apt/sources.list.d/graph-tool.list \ + && echo "deb-src http://downloads.skewed.de/apt/$(lsb_release -s -c) $(lsb_release -s -c) main" >> /etc/apt/sources.list.d/graph-tool.list \ + && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com --recv-key 612DEFB798507F25 \ + && apt-get update \ + && apt-get install -y python3-graph-tool \ + && ln -s /usr/lib/python3/dist-packages/graph_tool /usr/local/lib/python3.6/site-packages/graph_tool \ + && pip install --no-cache-dir --upgrade scipy \ + # PYCHUNKEDGRAPH + # Need pip 18.1 for process-dependency-links flag support + && pip install --no-cache-dir pip==18.1 \ + # Need numpy to prevent install issue with cloud-volume / fpzip + && pip install --no-cache-dir --upgrade numpy \ + && pip install --no-cache-dir --upgrade --process-dependency-links -r requirements.txt \ + # Tests + && pip install tox codecov \ + # CLEANUP + # libboost-dev and build-essentials will be required by tox to build python dependencies + && apt-get remove --purge -y lsb-release curl \ + && apt-get autoremove --purge -y \ + && rm -rf /var/lib/apt/lists/* \ + && find /usr/local/lib/python3* -depth \ + \( \ + \( -type d -a \( -name __pycache__ \) \) \ + -o \ + \( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \ + \) -exec rm -rf '{}' + \ + && find /usr/lib/python3* -depth \ + \( \ + \( -type d -a \( -name __pycache__ \) \) \ + -o \ + \( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \ + \) -exec rm -rf '{}' + + +COPY . /app \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 4f33f176d..628c4ba15 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -8,7 +8,7 @@ "context": "..", // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. - "dockerFile": "../Dockerfile", + "dockerFile": "Dockerfile", // The optional 'runArgs' property can be used to specify additional runtime arguments. "runArgs": [ @@ -20,16 +20,17 @@ // Uncomment the next line to use a non-root user. See https://aka.ms/vscode-remote/containers/non-root-user. // "-u", "1000" + "-v", "/usr/people/halageri/secrets:/root/.cloudvolume/secrets" ], // Uncomment the next line if you want to publish any ports. - // "appPort": [], + "appPort": [8888], // Uncomment the next line if you want to add in default container specific settings.json values // "settings": { "workbench.colorTheme": "Quiet Light" }, // Uncomment the next line to run commands after the container is created - for example installing git. - // "postCreateCommand": "apt-get update && apt-get install -y git", + "postCreateCommand": "pip install jupyter && git clone https://github.com/akhileshh/notebooks.git && jupyter notebook --config `pwd`/override/jupyter_notebook_config.py --allow-root", // Add the IDs of any extensions you want installed in the array below. "extensions": [] diff --git a/.gitignore b/.gitignore index ef8107b32..573288276 100644 --- a/.gitignore +++ b/.gitignore @@ -111,4 +111,5 @@ venv.bak/ # local dev stuff output.txt -src/ \ No newline at end of file +src/ +notebooks/ \ No newline at end of file From de34c0db71db196c4f9d419ba1d138be03befe9c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 18:48:04 +0000 Subject: [PATCH 0201/1097] update dev stuff --- .devcontainer/Dockerfile | 4 ++-- .devcontainer/devcontainer.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 7095bb908..3507c2f84 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -37,7 +37,7 @@ RUN apt-get update \ && pip install --no-cache-dir --upgrade numpy \ && pip install --no-cache-dir --upgrade --process-dependency-links -r requirements.txt \ # Tests - && pip install tox codecov \ + && pip install tox codecov jupyter\ # CLEANUP # libboost-dev and build-essentials will be required by tox to build python dependencies && apt-get remove --purge -y lsb-release curl \ @@ -56,4 +56,4 @@ RUN apt-get update \ \( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \ \) -exec rm -rf '{}' + -COPY . /app \ No newline at end of file +COPY . /app diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 628c4ba15..46e7dbac3 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -30,7 +30,7 @@ // "settings": { "workbench.colorTheme": "Quiet Light" }, // Uncomment the next line to run commands after the container is created - for example installing git. - "postCreateCommand": "pip install jupyter && git clone https://github.com/akhileshh/notebooks.git && jupyter notebook --config `pwd`/override/jupyter_notebook_config.py --allow-root", + "postCreateCommand": "rm -rf notebooks && git clone https://github.com/akhileshh/notebooks.git", // Add the IDs of any extensions you want installed in the array below. "extensions": [] From d7dda1086c2dd770b997815e2d5ba416eb800b67 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 18:50:00 +0000 Subject: [PATCH 0202/1097] remove k8s yamls --- kube-dev/1.redis.yml | 35 -------------------- kube-dev/2.pcg.yml | 67 --------------------------------------- kube-dev/3.pcg-worker.yml | 57 --------------------------------- kube-dev/4.hpa.yml | 15 --------- 4 files changed, 174 deletions(-) delete mode 100644 kube-dev/1.redis.yml delete mode 100644 kube-dev/2.pcg.yml delete mode 100644 kube-dev/3.pcg-worker.yml delete mode 100644 kube-dev/4.hpa.yml diff --git a/kube-dev/1.redis.yml b/kube-dev/1.redis.yml deleted file mode 100644 index 4e9db473a..000000000 --- a/kube-dev/1.redis.yml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: redis - labels: - app: pcg-redis -spec: - type: NodePort - ports: - - port: 6379 - targetPort: 6379 - selector: - app: pcg-redis ---- -apiVersion: v1 -kind: Pod -metadata: - name: redis - labels: - app: pcg-redis -spec: - restartPolicy: Never - containers: - - name: redis - image: redis:5.0.4-alpine - imagePullPolicy: Always - resources: - requests: - memory: 10Gi - command: ["redis-server"] - args: ["--requirepass", "dev", "--save", "", "--appendonly", "no"] - ports: - - containerPort: 6379 - nodeSelector: - nodetype: redis-server \ No newline at end of file diff --git a/kube-dev/2.pcg.yml b/kube-dev/2.pcg.yml deleted file mode 100644 index 78a0549b0..000000000 --- a/kube-dev/2.pcg.yml +++ /dev/null @@ -1,67 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: pcg-svc - labels: - app: pcg-master -spec: - type: LoadBalancer - ports: - - port: 4000 - targetPort: 4000 - name: pcg - - port: 9181 - targetPort: 9181 - name: rq-dashboard - selector: - app: pcg-master ---- -apiVersion: v1 -kind: Pod -metadata: - name: pcg-master - labels: - app: pcg-master -spec: - restartPolicy: Never # for development - volumes: - - name: google-secret - secret: - secretName: google-secret - - name: microns-seunglab-google-secret - secret: - secretName: microns-seunglab-google-secret - - name: seunglab2-google-secret - secret: - secretName: seunglab2-google-secret - containers: - - name: pcg - image: gcr.io/neuromancer-seung-import/pychunkedgraph:akhilesh-fafb-mesh-worker - imagePullPolicy: Always - resources: - requests: - memory: 2Gi - env: - - name: APP_SETTINGS - value: pychunkedgraph.app.config.DeploymentWithRedisConfig - - name: FLASK_APP - value: run_dev_cli.py - - name: REDIS_PASSWORD - value: dev # for development - ports: - - containerPort: 4000 - volumeMounts: - - name: google-secret - mountPath: /root/.cloudvolume/secrets/google-secret.json - subPath: google-secret.json - readOnly: true - - name: microns-seunglab-google-secret - mountPath: /root/.cloudvolume/secrets/microns-seunglab-google-secret.json - subPath: microns-seunglab-google-secret.json - readOnly: true - - name: seunglab2-google-secret - mountPath: /root/.cloudvolume/secrets/seunglab2-google-secret.json - subPath: seunglab2-google-secret.json - readOnly: true - nodeSelector: - nodetype: pcg-master \ No newline at end of file diff --git a/kube-dev/3.pcg-worker.yml b/kube-dev/3.pcg-worker.yml deleted file mode 100644 index c69df0481..000000000 --- a/kube-dev/3.pcg-worker.yml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pcg-worker - labels: - app: pcg-worker -spec: - selector: - matchLabels: - app: pcg-worker - replicas: 1 - template: - metadata: - labels: - app: pcg-worker - spec: - dnsPolicy: Default - volumes: - - name: google-secret - secret: - secretName: google-secret - - name: microns-seunglab-google-secret - secret: - secretName: microns-seunglab-google-secret - - name: seunglab2-google-secret - secret: - secretName: seunglab2-google-secret - containers: - - name: pcg-worker - image: gcr.io/neuromancer-seung-import/pychunkedgraph:akhilesh-fafb-mesh-worker - imagePullPolicy: Always - resources: - requests: - memory: 3Gi - env: - - name: APP_SETTINGS - value: pychunkedgraph.app.config.DeploymentWithRedisConfig - - name: FLASK_APP - value: run_dev_cli.py - - name: REDIS_PASSWORD - value: dev # for development - volumeMounts: - - name: google-secret - mountPath: /root/.cloudvolume/secrets/google-secret.json - subPath: google-secret.json - readOnly: true - - name: microns-seunglab-google-secret - mountPath: /root/.cloudvolume/secrets/microns-seunglab-google-secret.json - subPath: microns-seunglab-google-secret.json - readOnly: true - - name: seunglab2-google-secret - mountPath: /root/.cloudvolume/secrets/seunglab2-google-secret.json - subPath: seunglab2-google-secret.json - readOnly: true - command: ["rq"] - args: ["worker", "-c", "rq_workers.test_worker"] - \ No newline at end of file diff --git a/kube-dev/4.hpa.yml b/kube-dev/4.hpa.yml deleted file mode 100644 index 3d19210ac..000000000 --- a/kube-dev/4.hpa.yml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: pcg-hpa - namespace: default - labels: - app: pcg-worker -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: pcg-worker - minReplicas: 1 - maxReplicas: 8 - targetCPUUtilizationPercentage: 80 \ No newline at end of file From 74c38bfe9b427686b43fc155071a1ac501146b5b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 19:21:40 +0000 Subject: [PATCH 0203/1097] pytz import --- .devcontainer/devcontainer.json | 3 ++- pychunkedgraph/backend/chunkedgraph.py | 2 -- pychunkedgraph/backend/chunkedgraph_utils.py | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 46e7dbac3..593a0d017 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -20,7 +20,8 @@ // Uncomment the next line to use a non-root user. See https://aka.ms/vscode-remote/containers/non-root-user. // "-u", "1000" - "-v", "/usr/people/halageri/secrets:/root/.cloudvolume/secrets" + "-w", "${env:HOME}/projects/sl-pychunkedgraph", + "-v", "${env:HOME}/secrets:/root/.cloudvolume/secrets" ], // Uncomment the next line if you want to publish any ports. diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index e7bd04fc7..0729fd20a 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -5,7 +5,6 @@ import os import sys import networkx as nx -import pytz import cloudvolume import re import itertools @@ -52,7 +51,6 @@ N_DIGITS_UINT64 = len(str(np.iinfo(np.uint64).max)) N_BITS_PER_ROOT_COUNTER = np.uint64(8) LOCK_EXPIRED_TIME_DELTA = datetime.timedelta(minutes=3, seconds=0) -UTC = pytz.UTC # Setting environment wide credential path os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = \ diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index 44af98599..3449e3668 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -3,6 +3,7 @@ import numpy as np import pandas as pd +import pytz from google.cloud import bigtable from google.cloud.bigtable.row_filters import TimestampRange, \ From 8136e0b1eb59dc82920623598f66b1bed0f8847b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 19:52:35 +0000 Subject: [PATCH 0204/1097] remove more code --- pychunkedgraph/backend/chunkedgraph.py | 79 +++----------------------- 1 file changed, 9 insertions(+), 70 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 0729fd20a..d3ed06442 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2401,76 +2401,15 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, connected_edges=True, verbose: bool = True ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ Return all atomic edges between supervoxels belonging to the - specified agglomeration ID within the defined bounding box - - :param agglomeration_id: int - :param bounding_box: [[x_l, y_l, z_l], [x_h, y_h, z_h]] - :param bb_is_coordinate: bool - :param verbose: bool - :return: edge list - """ - - if self._edge_dir: - return self.get_subgraph_edges_v2( - np.array([agglomeration_id]), - bbox=bounding_box, - bbox_is_coordinate=bb_is_coordinate - ) - - def _get_subgraph_layer2_edges(node_ids) -> \ - Tuple[List[np.ndarray], List[np.float32], List[np.uint64]]: - return self.get_subgraph_chunk(node_ids, - connected_edges=connected_edges, - time_stamp=time_stamp) - - time_stamp = self.read_node_id_row(agglomeration_id, - columns=column_keys.Hierarchy.Child)[0].timestamp - - bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) - - # Layer 3+ - child_ids = self._get_subgraph_higher_layer_nodes( - node_id=agglomeration_id, bounding_box=bounding_box, - return_layers=[2], verbose=verbose)[2] - - # Layer 2 - if verbose: - time_start = time.time() - - - child_chunk_ids = self.get_chunk_ids_from_node_ids(child_ids) - u_ccids = np.unique(child_chunk_ids) - - child_blocks = [] - # Make blocks of child ids that are in the same chunk - for u_ccid in u_ccids: - child_blocks.append(child_ids[child_chunk_ids == u_ccid]) - - n_child_ids = len(child_ids) - this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) - - edge_infos = mu.multithread_func( - _get_subgraph_layer2_edges, - np.array_split(child_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1) - - affinities = np.array([], dtype=np.float32) - areas = np.array([], dtype=np.uint64) - edges = np.array([], dtype=np.uint64).reshape(0, 2) - - for edge_info in edge_infos: - _edges, _affinities, _areas = edge_info - areas = np.concatenate([areas, _areas]) - affinities = np.concatenate([affinities, _affinities]) - edges = np.concatenate([edges, _edges]) - - if verbose: - self.logger.debug("Layer %d: %.3fms for %d childs with %d threads" % - (2, (time.time() - time_start) * 1000, - n_child_ids, this_n_threads)) - - return edges, affinities, areas + """ + Return all atomic edges between supervoxels belonging to the + specified agglomeration ID within the defined bounding box + """ + return self.get_subgraph_edges_v2( + np.array([agglomeration_id]), + bbox=bounding_box, + bbox_is_coordinate=bb_is_coordinate + ) def get_subgraph_edges_v2( self, From 1fad15c6299a5bd28904f045e2dcc1b392bd1dbf Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 9 Sep 2019 20:19:12 +0000 Subject: [PATCH 0205/1097] fix formatting and unused imports --- pychunkedgraph/backend/chunkedgraph_edits.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index d3738343e..e2f1af5ca 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -2,19 +2,19 @@ import numpy as np import collections -from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union,\ - NamedTuple +from typing import Any, Dict, Iterable, List, Union +from typing import Optional, Sequence, Tuple, NamedTuple from multiwrapper import multiprocessing_utils as mu -from pychunkedgraph.backend.chunkedgraph_utils \ - import get_google_compatible_time_stamp, combine_cross_chunk_edge_dicts -from pychunkedgraph.backend.utils import column_keys, serializers -from pychunkedgraph.backend import flatgraph_utils +from . import flatgraph_utils +from .chunkedgraph_utils import combine_cross_chunk_edge_dicts +from .utils import column_keys, serializers from .utils.helpers import get_bounding_box from .utils.edge_utils import filter_fake_edges from .utils.edge_utils import map_edges_to_chunks + def _write_atomic_merge_edges(cg, atomic_edges, affinities, areas, time_stamp): rows = [] @@ -153,10 +153,6 @@ def _read_cc_edges_thread(node_ids): atomic_edges = np.array(atomic_edges, dtype=column_keys.Connectivity.Partner.basetype) - # # Comply to resolution of BigTables TimeRange - # time_stamp = get_google_compatible_time_stamp(time_stamp, - # round_up=False) - if affinities is None: affinities = np.ones(len(atomic_edges), dtype=column_keys.Connectivity.Affinity.basetype) From a048a5e53d900e7dcaa81862b9c06cae7a4d2495 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 14:08:07 +0000 Subject: [PATCH 0206/1097] helper to validate edges --- pychunkedgraph/backend/chunkedgraph_edits.py | 58 ++++++++++---------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index e2f1af5ca..b45460e49 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -132,13 +132,11 @@ def add_edges(cg, affinities: Optional[Sequence[np.float32]] = None ): """ Add edges to chunkedgraph - - Computes all new rows to be written to the chunkedgraph + Computes all new rows to be written to the chunkedgraph :param cg: ChunkedGraph instance :param operation_id: np.uint64 - :param atomic_edges: list of list of np.uint64 - edges between supervoxels + :param atomic_edges: list of list of np.uint64 edges between supervoxels :param time_stamp: datetime.datetime :param areas: list of np.uint64 :param affinities: list of np.float32 @@ -150,24 +148,7 @@ def _read_cc_edges_thread(node_ids): cc_dict = {} - atomic_edges = np.array(atomic_edges, - dtype=column_keys.Connectivity.Partner.basetype) - - if affinities is None: - affinities = np.ones(len(atomic_edges), - dtype=column_keys.Connectivity.Affinity.basetype) - else: - affinities = np.array(affinities, - dtype=column_keys.Connectivity.Affinity.basetype) - - if areas is None: - areas = np.ones(len(atomic_edges), - dtype=column_keys.Connectivity.Area.basetype) * np.inf - else: - areas = np.array(areas, - dtype=column_keys.Connectivity.Area.basetype) - - assert len(affinities) == len(atomic_edges) + atomic_edges, affinities, areas = _validate_edges(atomic_edges, affinities, areas) rows = [] # list of rows to be written to BigTable lvl2_dict = {} @@ -239,9 +220,29 @@ def _read_cc_edges_thread(node_ids): rows.extend(new_rows) else: new_root_ids = np.array(list(lvl2_dict.keys())) + return new_root_ids, list(lvl2_dict.keys()), rows - return new_root_ids, list(lvl2_dict.keys()), rows +def _validate_edges(atomic_edges, affinities=None, areas=None): + atomic_edges = np.array(atomic_edges, + dtype=column_keys.Connectivity.Partner.basetype) + + if affinities is None: + affinities = np.ones(len(atomic_edges), + dtype=column_keys.Connectivity.Affinity.basetype) + else: + affinities = np.array(affinities, + dtype=column_keys.Connectivity.Affinity.basetype) + + if areas is None: + areas = np.ones(len(atomic_edges), + dtype=column_keys.Connectivity.Area.basetype) * np.inf + else: + areas = np.array(areas, + dtype=column_keys.Connectivity.Area.basetype) + + assert len(affinities) == len(atomic_edges) + return atomic_edges, affinities, areas def add_fake_edges( @@ -303,7 +304,7 @@ def remove_edges(cg, operation_id: np.uint64, # Analyze atomic_edges --> translate them to lvl2 edges and extract cross # chunk edges to be removed - lvl2_edges, old_cross_edge_dict = analyze_atomic_edges(cg, atomic_edges) + lvl2_edges, _ = analyze_atomic_edges(cg, atomic_edges) lvl2_node_ids = np.unique(lvl2_edges) for lvl2_node_id in lvl2_node_ids: @@ -482,7 +483,7 @@ def compute_cross_chunk_connected_components(eh, node_ids, layer): # nodes. In practice, we (1) gather all relevant parents in the next # layer and then (2) acquire their children - old_this_layer_node_ids, old_next_layer_node_ids, \ + _, _, \ old_this_layer_partner_ids = \ old_parent_childrens(eh, node_ids, layer) @@ -543,6 +544,7 @@ def update_root_id_lineage(cg, new_root_ids, former_root_ids, operation_id, return rows + def create_parent_children_rows(cg, parent_id, children_ids, parent_cross_chunk_edge_dict, time_stamp): """ Generates BigTable rows @@ -578,6 +580,7 @@ def create_parent_children_rows(cg, parent_id, children_ids, return rows + def propagate_edits_to_root(cg, lvl2_dict: Dict, lvl2_cross_chunk_edge_dict: Dict, @@ -586,8 +589,7 @@ def propagate_edits_to_root(cg, """ Propagates changes through layers :param cg: ChunkedGraph instance - :param lvl2_dict: dict - maps new ids to old ids + :param lvl2_dict: dict maps new ids to old ids :param lvl2_cross_chunk_edge_dict: dict :param operation_id: np.uint64 :param time_stamp: datetime.datetime @@ -819,7 +821,7 @@ def get_layer_parent(self, node_id, layer, layer_only=False, parent_id = self.get_parent(next_parent_id) if parent_id is None: - raise() + raise ValueError() if self.cg.get_chunk_layer(parent_id) < layer: next_parent_ids.append(parent_id) From 45b1414ce6ef2f32126f254e369f595208a6920a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 14:12:39 +0000 Subject: [PATCH 0207/1097] helper to validate edges --- pychunkedgraph/backend/chunkedgraph_edits.py | 35 ++++++++++---------- pychunkedgraph/backend/temp.py | 1 + 2 files changed, 19 insertions(+), 17 deletions(-) create mode 100644 pychunkedgraph/backend/temp.py diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index b45460e49..de0770745 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -147,7 +147,6 @@ def _read_cc_edges_thread(node_ids): cc_dict[node_id] = cg.read_cross_chunk_edges(node_id) cc_dict = {} - atomic_edges, affinities, areas = _validate_edges(atomic_edges, affinities, areas) rows = [] # list of rows to be written to BigTable @@ -223,23 +222,25 @@ def _read_cc_edges_thread(node_ids): return new_root_ids, list(lvl2_dict.keys()), rows -def _validate_edges(atomic_edges, affinities=None, areas=None): - atomic_edges = np.array(atomic_edges, - dtype=column_keys.Connectivity.Partner.basetype) - - if affinities is None: - affinities = np.ones(len(atomic_edges), - dtype=column_keys.Connectivity.Affinity.basetype) - else: - affinities = np.array(affinities, - dtype=column_keys.Connectivity.Affinity.basetype) +def _validate_edges(atomic_edges, _affinities=None, _areas=None): + atomic_edges = np.array( + atomic_edges, dtype=column_keys.Connectivity.Partner.basetype + ) - if areas is None: - areas = np.ones(len(atomic_edges), - dtype=column_keys.Connectivity.Area.basetype) * np.inf - else: - areas = np.array(areas, - dtype=column_keys.Connectivity.Area.basetype) + affinities = np.ones( + len(atomic_edges), dtype=column_keys.Connectivity.Affinity.basetype + ) + if _affinities: + affinities = np.array( + _affinities, dtype=column_keys.Connectivity.Affinity.basetype + ) + + areas = ( + np.ones(len(atomic_edges), dtype=column_keys.Connectivity.Area.basetype) + * np.inf + ) + if _areas: + areas = np.array(areas, dtype=column_keys.Connectivity.Area.basetype) assert len(affinities) == len(atomic_edges) return atomic_edges, affinities, areas diff --git a/pychunkedgraph/backend/temp.py b/pychunkedgraph/backend/temp.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/pychunkedgraph/backend/temp.py @@ -0,0 +1 @@ + From 9032acb99d5ae556f4cb9f0484585abd9005eb7a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 18:19:57 +0000 Subject: [PATCH 0208/1097] get_subgraph_edges returns dict for easier edge processing --- pychunkedgraph/backend/chunkedgraph.py | 18 +++++++++++------- pychunkedgraph/backend/chunkedgraph_edits.py | 8 ++++---- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index d3ed06442..8883583fd 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2419,7 +2419,7 @@ def get_subgraph_edges_v2( cv_threads: int = 1, active_edges: bool = True, timestamp: datetime.datetime = None - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> Dict: """ 1. get level 2 children ids belonging to the agglomerations 2. get relevant chunk ids from level 2 ids @@ -2428,6 +2428,7 @@ def get_subgraph_edges_v2( 5. filter the edges with supervoxel ids 6. optionally for each edge (v1,v2) active if parent(v1) == parent(v2) inactive otherwise + 7. returns dict {"level_2_id": [Edges]} """ def _read_edges(chunk_ids) -> dict: @@ -2467,12 +2468,15 @@ def _read_edges(chunk_ids) -> dict: fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) edges += fake_edges - children_d = self.get_children(level2_ids) - sv_ids = np.concatenate(list(children_d.values())) - edges = filter_edges(sv_ids, edges) - if active_edges: - edges = get_active_edges(edges, children_d) - return edges.get_pairs(), edges.affinities, edges.areas + level2id_children_d = self.get_children(level2_ids) + level2id_edges_d = {} + for level2_id in level2id_children_d: + sv_ids = level2id_children_d[level2_id] + filtered_edges = filter_edges(sv_ids, edges) + if active_edges: + filtered_edges = get_active_edges(filtered_edges, level2id_children_d) + level2id_edges_d[level2_id] = filtered_edges + return level2id_edges_d def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index de0770745..2cda0dba7 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -258,17 +258,17 @@ def add_fake_edges( if there is no path between sv1 and sv2 (from added_edges) in the subgraph, add "fake" edges, these are stored in a row per chunk """ - if cg_instance._edge_dir: + if not cg_instance._edge_dir: return [] - root_ids = np.unique(cg_instance.get_roots(added_edges.ravel())) - subgraph_edges, _, _ = cg_instance.get_subgraph_edges_v2( - agglomeration_ids = root_ids, + level2id_edges_d = cg_instance.get_subgraph_edges_v2( + agglomeration_ids = np.unique(cg_instance.get_roots(added_edges.ravel())), bbox = get_bounding_box(source_coords, sink_coords), bbox_is_coordinate = True, cv_threads = 4, active_edges = False, timestamp=timestamp ) + subgraph_edges = reduce(lambda x, y: x+y, level2id_edges_d.values()) fake_edges = filter_fake_edges(added_edges, subgraph_edges) node_ids, r_indices = np.unique(fake_edges, return_inverse=True) r_indices = r_indices.reshape(-1, 2) From b65b023be870615ae5bc5ce1f7d2e4c7dc17cba3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 18:36:26 +0000 Subject: [PATCH 0209/1097] import reduce --- pychunkedgraph/backend/chunkedgraph_edits.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 2cda0dba7..70eb9941d 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -2,6 +2,7 @@ import numpy as np import collections +from functools import reduce from typing import Any, Dict, Iterable, List, Union from typing import Optional, Sequence, Tuple, NamedTuple From 63feba3afded5a7f59155af854ac1d9bc5906e56 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 18:40:55 +0000 Subject: [PATCH 0210/1097] shorter variable names --- pychunkedgraph/backend/chunkedgraph.py | 17 +++++++++-------- pychunkedgraph/backend/chunkedgraph_edits.py | 4 ++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 8883583fd..64d8ed742 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2468,15 +2468,16 @@ def _read_edges(chunk_ids) -> dict: fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) edges += fake_edges - level2id_children_d = self.get_children(level2_ids) - level2id_edges_d = {} - for level2_id in level2id_children_d: - sv_ids = level2id_children_d[level2_id] - filtered_edges = filter_edges(sv_ids, edges) + l2id_children_d = self.get_children(level2_ids) + l2id_edges_d = {} + for l2id in l2id_children_d: + l2id_edges_d[l2id] = filter_edges(l2id_children_d[l2id], edges) if active_edges: - filtered_edges = get_active_edges(filtered_edges, level2id_children_d) - level2id_edges_d[level2_id] = filtered_edges - return level2id_edges_d + l2id_edges_d[l2id] = get_active_edges( + l2id_edges_d[l2id], + l2id_children_d + ) + return l2id_edges_d def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 70eb9941d..df1bf9490 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -261,7 +261,7 @@ def add_fake_edges( """ if not cg_instance._edge_dir: return [] - level2id_edges_d = cg_instance.get_subgraph_edges_v2( + l2id_edges_d = cg_instance.get_subgraph_edges_v2( agglomeration_ids = np.unique(cg_instance.get_roots(added_edges.ravel())), bbox = get_bounding_box(source_coords, sink_coords), bbox_is_coordinate = True, @@ -269,7 +269,7 @@ def add_fake_edges( active_edges = False, timestamp=timestamp ) - subgraph_edges = reduce(lambda x, y: x+y, level2id_edges_d.values()) + subgraph_edges = reduce(lambda x, y: x+y, l2id_edges_d.values()) fake_edges = filter_fake_edges(added_edges, subgraph_edges) node_ids, r_indices = np.unique(fake_edges, return_inverse=True) r_indices = r_indices.reshape(-1, 2) From c08deb072fdb8a8c3bb2bbe58ad5070d1462decb Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 19:04:05 +0000 Subject: [PATCH 0211/1097] fix: check both vertices for filtering edges --- pychunkedgraph/backend/chunkedgraph_edits.py | 31 ++++++++++---------- pychunkedgraph/backend/utils/edge_utils.py | 16 ++++++---- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index df1bf9490..6825be602 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -270,21 +270,22 @@ def add_fake_edges( timestamp=timestamp ) subgraph_edges = reduce(lambda x, y: x+y, l2id_edges_d.values()) - fake_edges = filter_fake_edges(added_edges, subgraph_edges) - node_ids, r_indices = np.unique(fake_edges, return_inverse=True) - r_indices = r_indices.reshape(-1, 2) - chunk_ids = cg_instance.get_chunk_ids_from_node_ids(node_ids) - chunk_edges_d = map_edges_to_chunks(fake_edges, chunk_ids, r_indices) - rows = [] - for chunk_id in chunk_edges_d: - row_key = serializers.serialize_uint64(chunk_id) - fake_edges = chunk_edges_d[chunk_id] - val_d = { - column_keys.Connectivity.FakeEdges: fake_edges, - column_keys.OperationLogs.OperationID: operation_id} - rows.append(cg_instance.mutate_row( - row_key, val_d, time_stamp=timestamp)) - return rows + + # fake_edges = filter_fake_edges(added_edges, subgraph_edges) + # node_ids, r_indices = np.unique(fake_edges, return_inverse=True) + # r_indices = r_indices.reshape(-1, 2) + # chunk_ids = cg_instance.get_chunk_ids_from_node_ids(node_ids) + # chunk_edges_d = map_edges_to_chunks(fake_edges, chunk_ids, r_indices) + # rows = [] + # for chunk_id in chunk_edges_d: + # row_key = serializers.serialize_uint64(chunk_id) + # fake_edges = chunk_edges_d[chunk_id] + # val_d = { + # column_keys.Connectivity.FakeEdges: fake_edges, + # column_keys.OperationLogs.OperationID: operation_id + # } + # rows.append(cg_instance.mutate_row(row_key, val_d, time_stamp=timestamp)) + # return rows def remove_edges(cg, operation_id: np.uint64, diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 733d781ea..d1a4f891c 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -41,13 +41,17 @@ def concatenate_chunk_edges(chunk_edge_dicts: List) -> Dict: def filter_edges(node_ids: np.ndarray, edges: Edges) -> Edges: """find edges for the given node_ids""" xsorted = np.argsort(edges.node_ids1) - indices = np.searchsorted(edges.node_ids1[xsorted], node_ids) - indices = indices[indices < xsorted.size] + indices1 = np.searchsorted(edges.node_ids1[xsorted], node_ids) + indices1 = indices1[indices1 < xsorted.size] - ids1 = edges.node_ids1[indices] - ids2 = edges.node_ids2[indices] - affinities = edges.affinities[indices] - areas = edges.areas[indices] + xsorted = np.argsort(edges.node_ids2) + indices2 = np.searchsorted(edges.node_ids2[xsorted], node_ids) + indices2 = indices2[indices2 < xsorted.size] + + ids1 = edges.node_ids1[indices1 + indices2] + ids2 = edges.node_ids2[indices1 + indices2] + affinities = edges.affinities[indices1 + indices2] + areas = edges.areas[indices1 + indices2] return Edges(ids1, ids2, affinities=affinities, areas=areas) From a1e81ff48e12861e6602e3e01147d9e52e5a6ae3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 19:42:24 +0000 Subject: [PATCH 0212/1097] wip: edges for merge --- pychunkedgraph/backend/chunkedgraph_edits.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 6825be602..f850fd87c 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -251,7 +251,7 @@ def add_fake_edges( cg_instance, *, operation_id: np.uint64, - added_edges: Sequence[Sequence[np.uint64]], + added_edges: np.ndarray, source_coords: Sequence[np.uint64], sink_coords: Sequence[np.uint64], timestamp: datetime.datetime) -> List["bigtable.row.Row"]: @@ -269,8 +269,6 @@ def add_fake_edges( active_edges = False, timestamp=timestamp ) - subgraph_edges = reduce(lambda x, y: x+y, l2id_edges_d.values()) - # fake_edges = filter_fake_edges(added_edges, subgraph_edges) # node_ids, r_indices = np.unique(fake_edges, return_inverse=True) # r_indices = r_indices.reshape(-1, 2) @@ -288,6 +286,10 @@ def add_fake_edges( # return rows +def _determine_new_edges(added_edges: np.ndarray, l2id_edges_d: Dict): + subgraph_edges = reduce(lambda x, y: x+y, l2id_edges_d.values()) + + def remove_edges(cg, operation_id: np.uint64, atomic_edges: Sequence[Sequence[np.uint64]], time_stamp: datetime.datetime): From 93ede46d2ce08cfeaceeb8926c5db60ea14e46d3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 20:12:16 +0000 Subject: [PATCH 0213/1097] wip: edges for merge --- pychunkedgraph/backend/chunkedgraph_edits.py | 5 +++++ pychunkedgraph/backend/utils/edge_utils.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index f850fd87c..788bb7f7d 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -14,6 +14,7 @@ from .utils.helpers import get_bounding_box from .utils.edge_utils import filter_fake_edges from .utils.edge_utils import map_edges_to_chunks +from .utils.edge_utils import get_linking_edges def _write_atomic_merge_edges(cg, atomic_edges, affinities, areas, time_stamp): @@ -269,6 +270,10 @@ def add_fake_edges( active_edges = False, timestamp=timestamp ) + # edges = reduce(lambda x, y: x+y, edges_dict.values()) + l2id_children_d = cg_instance.get_children(list(l2id_edges_d.keys())) + edges = reduce(lambda x, y: x+y, l2id_edges_d.values()) + linking_edges = get_linking_edges(edges, l2id_children_d, ) # fake_edges = filter_fake_edges(added_edges, subgraph_edges) # node_ids, r_indices = np.unique(fake_edges, return_inverse=True) # r_indices = r_indices.reshape(-1, 2) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index d1a4f891c..5612604a5 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -104,3 +104,8 @@ def map_edges_to_chunks( continue chunk_ids_d[chunk_ids[sv2_index]].append(edges[i][::-1]) return {chunk_id: np.array(chunk_ids_d[chunk_id]) for chunk_id in chunk_ids_d} + + +def get_linking_edges(edges: Edges, parent_children_d: Dict, parent_id1: np.uint64, parent_id2: np.uint64): + child_parent_d = reverse_dictionary(parent_children_d) + From 22f291fdcbcd2da6a25ed1997302a86d25da5253 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 10 Sep 2019 20:18:35 +0000 Subject: [PATCH 0214/1097] wip: edges for merge --- pychunkedgraph/backend/utils/edge_utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 5612604a5..f1ed57560 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -106,6 +106,12 @@ def map_edges_to_chunks( return {chunk_id: np.array(chunk_ids_d[chunk_id]) for chunk_id in chunk_ids_d} -def get_linking_edges(edges: Edges, parent_children_d: Dict, parent_id1: np.uint64, parent_id2: np.uint64): - child_parent_d = reverse_dictionary(parent_children_d) +def get_linking_edges( + edges: Edges, parent_children_d: Dict, parent_id1: np.uint64, parent_id2: np.uint64 +): + """ + Find edges that link two level 2 ids + (sv1, sv2) or (sv2, sv1) -> parent(sv1) == parent_id1 and parent(sv2) == parent_id2 + """ + child_parent_d = reverse_dictionary(parent_children_d) From 0a88a41470dad8db9d8c5ad7cf4cb5f59bfe5c64 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 14:57:26 +0000 Subject: [PATCH 0215/1097] helper funtion to get edges linking 2 components --- pychunkedgraph/backend/chunkedgraph.py | 19 ++++++------ pychunkedgraph/backend/utils/edge_utils.py | 35 +++++++++++++++++----- 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 64d8ed742..9cb2a6f94 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2459,15 +2459,16 @@ def _read_edges(chunk_ids) -> dict: ) edges_dict = concatenate_chunk_edges(chunk_edge_dicts) edges = reduce(lambda x, y: x+y, edges_dict.values()) - # include fake edges - chunk_fake_edges_d = self.read_node_id_rows( - node_ids=chunk_ids, - columns=column_keys.Connectivity.FakeEdges) - fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) - if fake_edges.size: - fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) - edges += fake_edges - + # # include fake edges + # chunk_fake_edges_d = self.read_node_id_rows( + # node_ids=chunk_ids, + # columns=column_keys.Connectivity.FakeEdges) + # fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) + # if fake_edges.size: + # fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) + # edges += fake_edges + + # group edges based on level 2 ids l2id_children_d = self.get_children(level2_ids) l2id_edges_d = {} for l2id in l2id_children_d: diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index f1ed57560..75acd4ce0 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -39,7 +39,10 @@ def concatenate_chunk_edges(chunk_edge_dicts: List) -> Dict: def filter_edges(node_ids: np.ndarray, edges: Edges) -> Edges: - """find edges for the given node_ids""" + """ + find edges for the given node_ids + given an edge (sv1, sv2), include if node_id == sv1 or node_id == sv2 + """ xsorted = np.argsort(edges.node_ids1) indices1 = np.searchsorted(edges.node_ids1[xsorted], node_ids) indices1 = indices1[indices1 < xsorted.size] @@ -64,15 +67,14 @@ def get_active_edges(edges: Edges, parent_children_d: Dict) -> Edges: sv_ids1 = edges.node_ids1 sv_ids2 = edges.node_ids2 - affinities = edges.affinities - areas = edges.areas parent_ids1 = np.array([child_parent_d.get(sv_id, sv_id) for sv_id in sv_ids1]) parent_ids2 = np.array([child_parent_d.get(sv_id, sv_id) for sv_id in sv_ids2]) - sv_ids1 = sv_ids1[parent_ids1 == parent_ids2] - sv_ids2 = sv_ids2[parent_ids1 == parent_ids2] - affinities = affinities[parent_ids1 == parent_ids2] - areas = areas[parent_ids1 == parent_ids2] + mask = parent_ids1 == parent_ids2 + sv_ids1 = sv_ids1[mask] + sv_ids2 = sv_ids2[mask] + affinities = edges.affinities[mask] + areas = edges.areas[mask] return Edges(sv_ids1, sv_ids2, affinities=affinities, areas=areas) @@ -111,7 +113,24 @@ def get_linking_edges( ): """ Find edges that link two level 2 ids - (sv1, sv2) or (sv2, sv1) -> parent(sv1) == parent_id1 and parent(sv2) == parent_id2 + include (sv1, sv2) if parent(sv1) == parent_id1 and parent(sv2) == parent_id2 + or include (sv1, sv2) if parent(sv1) == parent_id2 and parent(sv2) == parent_id1 """ child_parent_d = reverse_dictionary(parent_children_d) + sv_ids1 = edges.node_ids1 + sv_ids2 = edges.node_ids2 + + parent_ids1 = np.array([child_parent_d.get(sv_id, sv_id) for sv_id in sv_ids1]) + parent_ids2 = np.array([child_parent_d.get(sv_id, sv_id) for sv_id in sv_ids2]) + + mask = (parent_ids1 == parent_id1) & (parent_ids2 == parent_id2) + mask |= (parent_ids1 == parent_id2) & (parent_ids2 == parent_id1) + + sv_ids1 = sv_ids1[mask] + sv_ids2 = sv_ids2[mask] + affinities = edges.affinities[mask] + areas = edges.areas[mask] + + return Edges(sv_ids1, sv_ids2, affinities=affinities, areas=areas) + From 626fbc917f7236d5246322de525ad96df976dbd3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 15:53:25 +0000 Subject: [PATCH 0216/1097] wip: edges for merge: agglomeration class --- pychunkedgraph/backend/chunkedgraph.py | 19 +++++----- pychunkedgraph/backend/chunkedgraph_edits.py | 35 ++++++++++++------- .../backend/definitions/agglomeration.py | 21 +++++++++++ pychunkedgraph/backend/temp.py | 1 - pychunkedgraph/backend/utils/edge_utils.py | 3 +- 5 files changed, 56 insertions(+), 23 deletions(-) create mode 100644 pychunkedgraph/backend/definitions/agglomeration.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 9cb2a6f94..db49ecf7c 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -42,6 +42,7 @@ from google.cloud.bigtable.column_family import MaxVersionsGCRule from .definitions.edges import Edges +from .definitions.agglomeration import Agglomeration from .utils.edge_utils import ( concatenate_chunk_edges, filter_edges, get_active_edges) from ..io.edges import get_chunk_edges @@ -2419,7 +2420,7 @@ def get_subgraph_edges_v2( cv_threads: int = 1, active_edges: bool = True, timestamp: datetime.datetime = None - ) -> Dict: + ) -> Tuple[Dict, Dict]: """ 1. get level 2 children ids belonging to the agglomerations 2. get relevant chunk ids from level 2 ids @@ -2428,7 +2429,7 @@ def get_subgraph_edges_v2( 5. filter the edges with supervoxel ids 6. optionally for each edge (v1,v2) active if parent(v1) == parent(v2) inactive otherwise - 7. returns dict {"level_2_id": [Edges]} + 7. returns tuple of dicts {"level_2_id": [Edges]}, {"level_2_id"} """ def _read_edges(chunk_ids) -> dict: @@ -2468,17 +2469,19 @@ def _read_edges(chunk_ids) -> dict: # fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) # edges += fake_edges - # group edges based on level 2 ids + # group nodes and edges based on level 2 ids + l2id_agglomeration_d = {} l2id_children_d = self.get_children(level2_ids) - l2id_edges_d = {} for l2id in l2id_children_d: - l2id_edges_d[l2id] = filter_edges(l2id_children_d[l2id], edges) + supervoxels = l2id_children_d[l2id] + filtered_edges = filter_edges(l2id_children_d[l2id], edges) if active_edges: - l2id_edges_d[l2id] = get_active_edges( - l2id_edges_d[l2id], + filtered_edges = get_active_edges( + filtered_edges, l2id_children_d ) - return l2id_edges_d + l2id_agglomeration_d[l2id] = Agglomeration(supervoxels, filtered_edges) + return l2id_agglomeration_d def get_subgraph_nodes(self, agglomeration_id: np.uint64, bounding_box: Optional[Sequence[Sequence[int]]] = None, diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 788bb7f7d..4d39bc08c 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -255,25 +255,36 @@ def add_fake_edges( added_edges: np.ndarray, source_coords: Sequence[np.uint64], sink_coords: Sequence[np.uint64], - timestamp: datetime.datetime) -> List["bigtable.row.Row"]: + timestamp: datetime.datetime +) -> List["bigtable.row.Row"]: """ if there is no path between sv1 and sv2 (from added_edges) in the subgraph, add "fake" edges, these are stored in a row per chunk """ if not cg_instance._edge_dir: return [] - l2id_edges_d = cg_instance.get_subgraph_edges_v2( - agglomeration_ids = np.unique(cg_instance.get_roots(added_edges.ravel())), - bbox = get_bounding_box(source_coords, sink_coords), - bbox_is_coordinate = True, - cv_threads = 4, - active_edges = False, - timestamp=timestamp + l2id_agglomeration_d = cg_instance.get_subgraph_edges_v2( + agglomeration_ids=np.unique(cg_instance.get_roots(added_edges.ravel())), + bbox=get_bounding_box(source_coords, sink_coords), + bbox_is_coordinate=True, + cv_threads=4, + active_edges=False, + timestamp=timestamp, ) - # edges = reduce(lambda x, y: x+y, edges_dict.values()) - l2id_children_d = cg_instance.get_children(list(l2id_edges_d.keys())) - edges = reduce(lambda x, y: x+y, l2id_edges_d.values()) - linking_edges = get_linking_edges(edges, l2id_children_d, ) + # added_edges is assumed to have just one edge + parent_id1, parent_id2 = cg_instance.get_parents(added_edges.ravel()) + l2id_children_d = { + l2id: l2id_agglomeration_d[l2id].supervoxels for l2id in l2id_agglomeration_d + } + subgraph_edges = reduce( + lambda x, y: x + y, [agg.edges for agg in l2id_agglomeration_d.values()] + ) + + fake_edges = filter_fake_edges(added_edges, subgraph_edges) + linking_edges = get_linking_edges( + subgraph_edges, l2id_children_d, parent_id1, parent_id2 + ) + # fake_edges = filter_fake_edges(added_edges, subgraph_edges) # node_ids, r_indices = np.unique(fake_edges, return_inverse=True) # r_indices = r_indices.reshape(-1, 2) diff --git a/pychunkedgraph/backend/definitions/agglomeration.py b/pychunkedgraph/backend/definitions/agglomeration.py new file mode 100644 index 000000000..59e146400 --- /dev/null +++ b/pychunkedgraph/backend/definitions/agglomeration.py @@ -0,0 +1,21 @@ +""" +Agglomeration +""" + +from typing import Optional + +import numpy as np + +from .edges import Edges + + +class Agglomeration: + """ + An agglomeration is a connected component at a given level. + Composed of supervoxel ids and the edges between them. + """ + + def __init__(self, supervoxels: np.ndarray, edges: Edges, level: Optional[int] = 2): + self.supervoxels = supervoxels + self.edges = edges + self.level = level diff --git a/pychunkedgraph/backend/temp.py b/pychunkedgraph/backend/temp.py index 8b1378917..e69de29bb 100644 --- a/pychunkedgraph/backend/temp.py +++ b/pychunkedgraph/backend/temp.py @@ -1 +0,0 @@ - diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index 75acd4ce0..f92ea8b6d 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -114,10 +114,9 @@ def get_linking_edges( """ Find edges that link two level 2 ids include (sv1, sv2) if parent(sv1) == parent_id1 and parent(sv2) == parent_id2 - or include (sv1, sv2) if parent(sv1) == parent_id2 and parent(sv2) == parent_id1 + or if parent(sv1) == parent_id2 and parent(sv2) == parent_id1 """ child_parent_d = reverse_dictionary(parent_children_d) - sv_ids1 = edges.node_ids1 sv_ids2 = edges.node_ids2 From 9abc7c9db975d67d0972044c0f4127b93c0f9b3c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 15:56:26 +0000 Subject: [PATCH 0217/1097] wip: edges for merge: fix docs --- pychunkedgraph/backend/chunkedgraph.py | 7 +++---- pychunkedgraph/backend/chunkedgraph_edits.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index db49ecf7c..688dc8d77 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -2406,13 +2406,13 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, Return all atomic edges between supervoxels belonging to the specified agglomeration ID within the defined bounding box """ - return self.get_subgraph_edges_v2( + return self.get_subgraph( np.array([agglomeration_id]), bbox=bounding_box, bbox_is_coordinate=bb_is_coordinate ) - def get_subgraph_edges_v2( + def get_subgraph( self, agglomeration_ids: np.ndarray, bbox: Optional[Sequence[Sequence[int]]] = None, @@ -2429,9 +2429,8 @@ def get_subgraph_edges_v2( 5. filter the edges with supervoxel ids 6. optionally for each edge (v1,v2) active if parent(v1) == parent(v2) inactive otherwise - 7. returns tuple of dicts {"level_2_id": [Edges]}, {"level_2_id"} + 7. returns dict of Agglomerations """ - def _read_edges(chunk_ids) -> dict: return get_chunk_edges( self._edge_dir, diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 4d39bc08c..50bc6fc51 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -263,7 +263,7 @@ def add_fake_edges( """ if not cg_instance._edge_dir: return [] - l2id_agglomeration_d = cg_instance.get_subgraph_edges_v2( + l2id_agglomeration_d = cg_instance.get_subgraph( agglomeration_ids=np.unique(cg_instance.get_roots(added_edges.ravel())), bbox=get_bounding_box(source_coords, sink_coords), bbox_is_coordinate=True, From ce8930491ac4064ef66a98d3a325d40967e0c05f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 17:18:56 +0000 Subject: [PATCH 0218/1097] wip: edges for merge checkpoint --- pychunkedgraph/backend/chunkedgraph_edits.py | 4 ++-- pychunkedgraph/backend/graphoperation.py | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 50bc6fc51..6af56198f 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -248,7 +248,7 @@ def _validate_edges(atomic_edges, _affinities=None, _areas=None): return atomic_edges, affinities, areas -def add_fake_edges( +def add_edges_v2( cg_instance, *, operation_id: np.uint64, @@ -280,7 +280,7 @@ def add_fake_edges( lambda x, y: x + y, [agg.edges for agg in l2id_agglomeration_d.values()] ) - fake_edges = filter_fake_edges(added_edges, subgraph_edges) + # fake_edges = filter_fake_edges(added_edges, subgraph_edges) linking_edges = get_linking_edges( subgraph_edges, l2id_children_d, parent_id1, parent_id2 ) diff --git a/pychunkedgraph/backend/graphoperation.py b/pychunkedgraph/backend/graphoperation.py index f9fb00618..11578eab5 100644 --- a/pychunkedgraph/backend/graphoperation.py +++ b/pychunkedgraph/backend/graphoperation.py @@ -421,14 +421,14 @@ def _update_root_ids(self) -> np.ndarray: def _apply( self, *, operation_id, timestamp ) -> Tuple[np.ndarray, np.ndarray, List["bigtable.row.Row"]]: - fake_edge_rows = cg_edits.add_fake_edges( - self.cg, - operation_id=operation_id, - added_edges=self.added_edges, - source_coords=self.source_coords, - sink_coords=self.sink_coords, - timestamp=timestamp - ) + # fake_edge_rows = cg_edits.add_fake_edges( + # self.cg, + # operation_id=operation_id, + # added_edges=self.added_edges, + # source_coords=self.source_coords, + # sink_coords=self.sink_coords, + # timestamp=timestamp + # ) new_root_ids, new_lvl2_ids, rows = cg_edits.add_edges( self.cg, operation_id, @@ -436,7 +436,7 @@ def _apply( time_stamp=timestamp, affinities=self.affinities, ) - rows.extend(fake_edge_rows) + # rows.extend(fake_edge_rows) return new_root_ids, new_lvl2_ids, rows def _create_log_record(self, *, operation_id, timestamp, new_root_ids) -> "bigtable.row.Row": From 73f987f559a124afbd99572af719337f1e04d576 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 18:24:34 +0000 Subject: [PATCH 0219/1097] wip: improve ingestion --- pychunkedgraph/backend/chunkedgraph.py | 8 ++--- pychunkedgraph/backend/chunkedgraph_edits.py | 10 ++---- pychunkedgraph/ingest/ingestionmanager.py | 8 ++--- pychunkedgraph/ingest/ran_ingestion_v2.py | 37 +++++++++++--------- 4 files changed, 31 insertions(+), 32 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 688dc8d77..a81272aa6 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -106,6 +106,9 @@ def __init__(self, self._cv_path = self._dataset_info["data_dir"] # required self._mesh_dir = self._dataset_info.get("mesh", None) # optional + self._edge_dir = self.check_and_write_table_parameters( + column_keys.GraphSettings.EdgeDir, edge_dir, + required=False, is_new=is_new) self._n_layers = self.check_and_write_table_parameters( column_keys.GraphSettings.LayerCount, n_layers, @@ -127,9 +130,6 @@ def __init__(self, self._chunk_size = self.check_and_write_table_parameters( column_keys.GraphSettings.ChunkSize, chunk_size, required=True, is_new=is_new) - self._edge_dir = self.check_and_write_table_parameters( - column_keys.GraphSettings.EdgeDir, edge_dir, - required=False, is_new=is_new) self._dataset_info["graph"] = {"chunk_size": self.chunk_size} @@ -2433,7 +2433,7 @@ def get_subgraph( """ def _read_edges(chunk_ids) -> dict: return get_chunk_edges( - self._edge_dir, + self.cv_edges_path, [self.get_chunk_coordinates(chunk_id) for chunk_id in chunk_ids], cv_threads, ) diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 6af56198f..8c177dc44 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -208,7 +208,7 @@ def _read_cc_edges_thread(node_ids): cross_chunk_edge_dict, time_stamp)) - if not cg._edge_dir: + if not cg.cv_edges_path: # Write atomic nodes rows.extend(_write_atomic_merge_edges(cg, atomic_edges, affinities, areas, time_stamp=time_stamp)) @@ -261,7 +261,7 @@ def add_edges_v2( if there is no path between sv1 and sv2 (from added_edges) in the subgraph, add "fake" edges, these are stored in a row per chunk """ - if not cg_instance._edge_dir: + if not cg_instance.cv_edges_path: return [] l2id_agglomeration_d = cg_instance.get_subgraph( agglomeration_ids=np.unique(cg_instance.get_roots(added_edges.ravel())), @@ -302,10 +302,6 @@ def add_edges_v2( # return rows -def _determine_new_edges(added_edges: np.ndarray, l2id_edges_d: Dict): - subgraph_edges = reduce(lambda x, y: x+y, l2id_edges_d.values()) - - def remove_edges(cg, operation_id: np.uint64, atomic_edges: Sequence[Sequence[np.uint64]], time_stamp: datetime.datetime): @@ -421,7 +417,7 @@ def remove_edges(cg, operation_id: np.uint64, operation_id=operation_id, time_stamp=time_stamp)) - if not cg._edge_dir: + if not cg.cv_edges_path: # Write atomic nodes rows.extend(_write_atomic_split_edges(cg, atomic_edges, time_stamp=time_stamp)) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 80814e883..a0ff6108d 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -13,7 +13,7 @@ def __init__( instance_id=None, project_id=None, data_version=2, - create_edges=True, + use_raw_data=True, ): self._storage_path = storage_path self._cg_table_id = cg_table_id @@ -22,7 +22,7 @@ def __init__( self._cg = None self._n_layers = n_layers self._data_version = data_version - self._create_edges = create_edges + self._use_raw_data = use_raw_data @property def storage_path(self): @@ -109,8 +109,8 @@ def n_layers(self): return self._n_layers @property - def create_edges(self): - return self._create_edges + def use_raw_data(self): + return self._use_raw_data def get_serialized_info(self): info = { diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index e26a1dc5f..0eb7c2bcd 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -39,10 +39,10 @@ def ingest_into_chunkedgraph( edge_dir=None, n_chunks=None, is_new=True, - create_edges=True, + use_raw_data=True, ): """ - :param create_edges: + :param use_raw_data: Set this to false if the edges have already been processed. This is needed because processing edges and bulding chunkedgraph is completely de-coupled. """ @@ -74,7 +74,7 @@ def ingest_into_chunkedgraph( instance_id=instance_id, project_id=project_id, data_version=4, - create_edges=create_edges + use_raw_data=use_raw_data ) if layer < 3: @@ -161,10 +161,11 @@ def create_atomic_chunk(imanager, coord): ) no_edges = no_edges and not sv_ids1.size - if imanager.create_edges and not no_edges: + if imanager.use_raw_data and not no_edges: put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges_all, ZSTD_LEVEL) + chunk_edges_active, isolated_ids = _get_active_edges( - imanager, coord, edge_dict, chunk_edges_all + imanager, coord, chunk_edges_all ) add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) @@ -172,14 +173,19 @@ def create_atomic_chunk(imanager, coord): return str(2) -def _get_active_edges(imanager, coord, raw_edges_d, processed_edges_d): +def _get_chunk_data(imanager, coord): + """ + Based on `use_raw_data` read either raw data or processed data + """ + +def _get_active_edges(imanager, coord, edges_d): mapping = collect_agglomeration_data(imanager, coord) - active_edges_flag_d, isolated_ids = define_active_edges(raw_edges_d, mapping) + active_edges_flag_d, isolated_ids = define_active_edges(edges_d, mapping) chunk_edges_active = {} for edge_type in EDGE_TYPES: - edges = processed_edges_d[edge_type] + edges = edges_d[edge_type] active = active_edges_flag_d[edge_type] sv_ids1 = edges.sv_ids1[active] @@ -422,9 +428,6 @@ def collect_agglomeration_data(imanager, chunk_coord): def define_active_edges(edge_dict, mapping): """ Labels edges as within or across segments and extracts isolated ids - - :param edge_dict: dict of np.ndarrays - :param mapping: dict :return: dict of np.ndarrays, np.ndarray bool arrays; True: connected (within same segment) isolated node ids @@ -441,14 +444,14 @@ def _mapping_default(key): active = {} isolated = [[]] for k in edge_dict: - if len(edge_dict[k]["sv1"]) > 0: - agg_id_1 = mapping_vec(edge_dict[k]["sv1"]) + if len(edge_dict[k].node_ids1) > 0: + agg_id_1 = mapping_vec(edge_dict[k].node_ids1) else: - assert len(edge_dict[k]["sv2"]) == 0 + assert len(edge_dict[k].node_ids2) == 0 active[k] = np.array([], dtype=np.bool) continue - agg_id_2 = mapping_vec(edge_dict[k]["sv2"]) + agg_id_2 = mapping_vec(edge_dict[k].node_ids2) active[k] = agg_id_1 == agg_id_2 @@ -457,9 +460,9 @@ def _mapping_default(key): agg_2_m = agg_id_2 == -1 active[k][agg_1_m] = False - isolated.append(edge_dict[k]["sv1"][agg_1_m]) + isolated.append(edge_dict[k].node_ids1[agg_1_m]) if k == "in": - isolated.append(edge_dict[k]["sv2"][agg_2_m]) + isolated.append(edge_dict[k].node_ids2[agg_2_m]) return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) From 525c4c68186a84e0523db6aa2b807c2552d9a26e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 18:43:11 +0000 Subject: [PATCH 0220/1097] wip: improve ingestion --- pychunkedgraph/ingest/ran_ingestion_v2.py | 46 ++++++++++++++--------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 0eb7c2bcd..05dea70ce 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -139,6 +139,28 @@ def _create_atomic_chunk(im_info, chunk_coord): def create_atomic_chunk(imanager, coord): """ Creates single atomic chunk""" coord = np.array(list(coord), dtype=np.int) + chunk_edges_active, isolated_ids = _get_active_edges( + imanager, coord, chunk_edges_all + ) + add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) + + # to track workers completion, layer = 2 + return str(2) + + +def _get_chunk_data(imanager, coord): + """ + Helper to read either raw data or processed data + """ + chunk_edges = ( + _read_raw_edge_data(imanager, coord) + if imanager.use_raw_data + else _read_processed_edge_data(imanager, coord) + ) + + + +def _read_raw_edge_data(imanager, coord): edge_dict = collect_edge_data(imanager, coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) @@ -146,7 +168,7 @@ def create_atomic_chunk(imanager, coord): # avoid writing to cloud storage if there are no edges # unnecessary write operation no_edges = True - chunk_edges_all = {} + chunk_edges = {} for edge_type in EDGE_TYPES: sv_ids1 = edge_dict[edge_type]["sv1"] sv_ids2 = edge_dict[edge_type]["sv2"] @@ -156,27 +178,17 @@ def create_atomic_chunk(imanager, coord): affinities = edge_dict[edge_type]["aff"] areas = edge_dict[edge_type]["area"] - chunk_edges_all[edge_type] = Edges( + chunk_edges[edge_type] = Edges( sv_ids1, sv_ids2, affinities=affinities, areas=areas ) no_edges = no_edges and not sv_ids1.size - - if imanager.use_raw_data and not no_edges: - put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges_all, ZSTD_LEVEL) - - chunk_edges_active, isolated_ids = _get_active_edges( - imanager, coord, chunk_edges_all - ) - add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) - - # to track workers completion, layer = 2 - return str(2) + if no_edges: + return None + return chunk_edges -def _get_chunk_data(imanager, coord): - """ - Based on `use_raw_data` read either raw data or processed data - """ +def _read_processed_edge_data(imanager, coord): + pass def _get_active_edges(imanager, coord, edges_d): From e8eb34fbdd00e52a00516bcd43c4f800d0c3a843 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 18:49:50 +0000 Subject: [PATCH 0221/1097] wip: improve ingestion --- pychunkedgraph/ingest/ran_ingestion_v2.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 05dea70ce..3568365c5 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -74,7 +74,7 @@ def ingest_into_chunkedgraph( instance_id=instance_id, project_id=project_id, data_version=4, - use_raw_data=use_raw_data + use_raw_data=use_raw_data, ) if layer < 3: @@ -139,6 +139,8 @@ def _create_atomic_chunk(im_info, chunk_coord): def create_atomic_chunk(imanager, coord): """ Creates single atomic chunk""" coord = np.array(list(coord), dtype=np.int) + + chunk_edges_all = _get_chunk_data(imanager, coord) chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all ) @@ -157,7 +159,9 @@ def _get_chunk_data(imanager, coord): if imanager.use_raw_data else _read_processed_edge_data(imanager, coord) ) - + if imanager.use_raw_data: + put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges, ZSTD_LEVEL) + return chunk_edges def _read_raw_edge_data(imanager, coord): From 487dcf7683779fd4307982de9a7183a5b9e3027a Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 18:57:56 +0000 Subject: [PATCH 0222/1097] wip: improve ingestion --- pychunkedgraph/ingest/ran_ingestion_v2.py | 25 ++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 3568365c5..95b765ce5 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -140,12 +140,11 @@ def create_atomic_chunk(imanager, coord): """ Creates single atomic chunk""" coord = np.array(list(coord), dtype=np.int) - chunk_edges_all = _get_chunk_data(imanager, coord) + chunk_edges_all, mapping = _get_chunk_data(imanager, coord) chunk_edges_active, isolated_ids = _get_active_edges( - imanager, coord, chunk_edges_all + imanager, coord, chunk_edges_all, mapping ) add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) - # to track workers completion, layer = 2 return str(2) @@ -153,15 +152,21 @@ def create_atomic_chunk(imanager, coord): def _get_chunk_data(imanager, coord): """ Helper to read either raw data or processed data + If reading from raw data, save it as processed data """ chunk_edges = ( _read_raw_edge_data(imanager, coord) if imanager.use_raw_data else _read_processed_edge_data(imanager, coord) ) + mapping = ( + _read_raw_mapping(imanager, coord) + if imanager.use_raw_data + else _read_processed_mapping(imanager, coord) + ) if imanager.use_raw_data: put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges, ZSTD_LEVEL) - return chunk_edges + return chunk_edges, mapping def _read_raw_edge_data(imanager, coord): @@ -195,10 +200,16 @@ def _read_processed_edge_data(imanager, coord): pass -def _get_active_edges(imanager, coord, edges_d): - mapping = collect_agglomeration_data(imanager, coord) - active_edges_flag_d, isolated_ids = define_active_edges(edges_d, mapping) +def _read_raw_mapping(imanager, coord): + pass + +def _read_processed_mapping(imanager, coord): + pass + + +def _get_active_edges(imanager, coord, edges_d, mapping): + active_edges_flag_d, isolated_ids = define_active_edges(edges_d, mapping) chunk_edges_active = {} for edge_type in EDGE_TYPES: edges = edges_d[edge_type] From 03c29af511b031d5c1770020a96885a71c188e41 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 22:32:59 +0000 Subject: [PATCH 0223/1097] wip: improve ingestion use different flags for edges and agglomeration --- pychunkedgraph/ingest/ingestionmanager.py | 14 ++++++++++---- pychunkedgraph/ingest/ran_ingestion_v2.py | 16 ++++++++++------ 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index a0ff6108d..185cb164b 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -13,7 +13,8 @@ def __init__( instance_id=None, project_id=None, data_version=2, - use_raw_data=True, + use_raw_edge_data=True, + use_raw_agglomeration_data=True ): self._storage_path = storage_path self._cg_table_id = cg_table_id @@ -22,7 +23,8 @@ def __init__( self._cg = None self._n_layers = n_layers self._data_version = data_version - self._use_raw_data = use_raw_data + self._use_raw_edge_data = use_raw_edge_data + self._use_raw_agglomeration_data = use_raw_agglomeration_data @property def storage_path(self): @@ -109,8 +111,12 @@ def n_layers(self): return self._n_layers @property - def use_raw_data(self): - return self._use_raw_data + def use_raw_edge_data(self): + return self._use_raw_edge_data + + @property + def use_raw_agglomeration_data(self): + return self._use_raw_agglomeration_data def get_serialized_info(self): info = { diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 95b765ce5..2794b68c9 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -39,12 +39,15 @@ def ingest_into_chunkedgraph( edge_dir=None, n_chunks=None, is_new=True, - use_raw_data=True, + use_raw_edge_data=True, + use_raw_agglomeration_data=True ): """ - :param use_raw_data: + :param use_raw_edge_data: Set this to false if the edges have already been processed. - This is needed because processing edges and bulding chunkedgraph is completely de-coupled. + :param use_raw_agglomeration_data: + Set this to false if the agglomeration data has already been processed. + These are required because processing edges and bulding chunkedgraph is de-coupled. """ storage_path = storage_path.strip("/") ws_cv_path = ws_cv_path.strip("/") @@ -74,7 +77,8 @@ def ingest_into_chunkedgraph( instance_id=instance_id, project_id=project_id, data_version=4, - use_raw_data=use_raw_data, + use_raw_edge_data=use_raw_edge_data, + use_raw_agglomeration_data=use_raw_agglomeration_data ) if layer < 3: @@ -156,12 +160,12 @@ def _get_chunk_data(imanager, coord): """ chunk_edges = ( _read_raw_edge_data(imanager, coord) - if imanager.use_raw_data + if imanager.use_raw_edge_data else _read_processed_edge_data(imanager, coord) ) mapping = ( _read_raw_mapping(imanager, coord) - if imanager.use_raw_data + if imanager.use_raw_agglomeration_data else _read_processed_mapping(imanager, coord) ) if imanager.use_raw_data: From 323c51df76c4c8513fd25fb9f6962e3eee544908 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 11 Sep 2019 23:22:48 +0000 Subject: [PATCH 0224/1097] wip: improve ingestion --- .devcontainer/Dockerfile | 1 - pychunkedgraph/ingest/ran_ingestion_v2.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 3507c2f84..4855f1782 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,6 +1,5 @@ FROM tiangolo/uwsgi-nginx-flask:python3.6 - COPY override/timeout.conf /etc/nginx/conf.d/timeout.conf COPY override/supervisord.conf /etc/supervisor/conf.d/supervisord.conf COPY requirements.txt /app diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 2794b68c9..076ab3f85 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -168,8 +168,6 @@ def _get_chunk_data(imanager, coord): if imanager.use_raw_agglomeration_data else _read_processed_mapping(imanager, coord) ) - if imanager.use_raw_data: - put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges, ZSTD_LEVEL) return chunk_edges, mapping @@ -197,6 +195,7 @@ def _read_raw_edge_data(imanager, coord): no_edges = no_edges and not sv_ids1.size if no_edges: return None + put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges, ZSTD_LEVEL) return chunk_edges From 0a7a14885d17c04462c82b5a08a8a889d9001de7 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 14:56:48 +0000 Subject: [PATCH 0225/1097] feat: read/write agglomeration mapping --- pychunkedgraph/ingest/ingestionmanager.py | 10 ++++- pychunkedgraph/ingest/ran_ingestion_v2.py | 50 ++++++++--------------- pychunkedgraph/io/agglomeration.py | 22 ++++++++++ 3 files changed, 48 insertions(+), 34 deletions(-) create mode 100644 pychunkedgraph/io/agglomeration.py diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 185cb164b..30b44e25a 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -14,7 +14,8 @@ def __init__( project_id=None, data_version=2, use_raw_edge_data=True, - use_raw_agglomeration_data=True + use_raw_agglomeration_data=True, + agglomeration_dir=None ): self._storage_path = storage_path self._cg_table_id = cg_table_id @@ -25,6 +26,7 @@ def __init__( self._data_version = data_version self._use_raw_edge_data = use_raw_edge_data self._use_raw_agglomeration_data = use_raw_agglomeration_data + self._agglomeration_dir = agglomeration_dir @property def storage_path(self): @@ -116,7 +118,11 @@ def use_raw_edge_data(self): @property def use_raw_agglomeration_data(self): - return self._use_raw_agglomeration_data + return self._use_raw_agglomeration_data + + @property + def agglomeration_dir(self): + return self._agglomeration_dir def get_serialized_info(self): info = { diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 076ab3f85..8b1bbdf5f 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -4,6 +4,8 @@ import collections import itertools +import json +from typing import Dict, Tuple import pandas as pd import cloudvolume @@ -18,7 +20,8 @@ from .initialization.atomic_layer import add_atomic_edges from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES from ..backend.utils import basetypes -from ..io.edges import put_chunk_edges +from ..io.edges import get_chunk_edges, put_chunk_edges +from ..io.agglomeration import get_chunk_agglomeration, put_chunk_agglomeration ZSTD_LEVEL = 17 @@ -36,11 +39,12 @@ def ingest_into_chunkedgraph( instance_id=None, project_id=None, layer=1, - edge_dir=None, n_chunks=None, is_new=True, + edge_dir=None, + agglomeration_dir=None, use_raw_edge_data=True, - use_raw_agglomeration_data=True + use_raw_agglomeration_data=True, ): """ :param use_raw_edge_data: @@ -78,7 +82,8 @@ def ingest_into_chunkedgraph( project_id=project_id, data_version=4, use_raw_edge_data=use_raw_edge_data, - use_raw_agglomeration_data=use_raw_agglomeration_data + use_raw_agglomeration_data=use_raw_agglomeration_data, + agglomeration_dir=agglomeration_dir ) if layer < 3: @@ -153,7 +158,7 @@ def create_atomic_chunk(imanager, coord): return str(2) -def _get_chunk_data(imanager, coord): +def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: """ Helper to read either raw data or processed data If reading from raw data, save it as processed data @@ -161,17 +166,17 @@ def _get_chunk_data(imanager, coord): chunk_edges = ( _read_raw_edge_data(imanager, coord) if imanager.use_raw_edge_data - else _read_processed_edge_data(imanager, coord) + else get_chunk_edges(imanager.cg.cv_edges_path, coord) ) mapping = ( - _read_raw_mapping(imanager, coord) + _read_raw_agglomeration_data(imanager, coord) if imanager.use_raw_agglomeration_data - else _read_processed_mapping(imanager, coord) + else get_chunk_agglomeration(imanager.agglomeration_dir, coord) ) return chunk_edges, mapping -def _read_raw_edge_data(imanager, coord): +def _read_raw_edge_data(imanager, coord) -> Dict: edge_dict = collect_edge_data(imanager, coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) @@ -199,20 +204,8 @@ def _read_raw_edge_data(imanager, coord): return chunk_edges -def _read_processed_edge_data(imanager, coord): - pass - - -def _read_raw_mapping(imanager, coord): - pass - - -def _read_processed_mapping(imanager, coord): - pass - - def _get_active_edges(imanager, coord, edges_d, mapping): - active_edges_flag_d, isolated_ids = define_active_edges(edges_d, mapping) + active_edges_flag_d, isolated_ids = _define_active_edges(edges_d, mapping) chunk_edges_active = {} for edge_type in EDGE_TYPES: edges = edges_d[edge_type] @@ -397,7 +390,7 @@ def _read_agg_files(filenames, base_path): return edge_list -def collect_agglomeration_data(imanager, chunk_coord): +def _read_raw_agglomeration_data(imanager, chunk_coord): """ Collects agglomeration information & builds connected component mapping :param imanager: IngestionManager @@ -407,9 +400,7 @@ def collect_agglomeration_data(imanager, chunk_coord): """ subfolder = "remap" base_path = f"{imanager.storage_path}/{subfolder}/" - chunk_coord = np.array(chunk_coord) - chunk_id = imanager.cg.get_chunk_id( layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] ) @@ -423,9 +414,7 @@ def collect_agglomeration_data(imanager, chunk_coord): for dim in range(3): diff = np.zeros([3], dtype=np.int) diff[dim] = d - adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = imanager.cg.get_chunk_id( layer=1, x=adjacent_chunk_coord[0], @@ -439,24 +428,21 @@ def collect_agglomeration_data(imanager, chunk_coord): f"done_{mip_level}_{x}_{y}_{z}_{adjacent_chunk_id}.data.zst" ) - # print(filenames) edge_list = _read_agg_files(filenames, base_path) - edges = np.concatenate(edge_list) - G = nx.Graph() G.add_edges_from(edges) ccs = nx.connected_components(G) - mapping = {} for i_cc, cc in enumerate(ccs): cc = list(cc) mapping.update(dict(zip(cc, [i_cc] * len(cc)))) + put_chunk_agglomeration(imanager.agglomeration_dir, mapping, chunk_coord) return mapping -def define_active_edges(edge_dict, mapping): +def _define_active_edges(edge_dict, mapping): """ Labels edges as within or across segments and extracts isolated ids :return: dict of np.ndarrays, np.ndarray bool arrays; True: connected (within same segment) diff --git a/pychunkedgraph/io/agglomeration.py b/pychunkedgraph/io/agglomeration.py new file mode 100644 index 000000000..ae8f143ac --- /dev/null +++ b/pychunkedgraph/io/agglomeration.py @@ -0,0 +1,22 @@ +import json + +from cloudvolume.storage import SimpleStorage + + +def put_chunk_agglomeration(agglomeration_dir, mapping, chunk_coord): + # filename format - chunk_x_y_z.serliazation + file_name = f"chunk_{'_'.join(str(coord) for coord in chunk_coord)}.json" + with SimpleStorage(agglomeration_dir) as storage: + storage.put_file( + file_path=file_name, + content=json.dumps(mapping).encode("utf-8"), + compress="gzip", + cache_control="no-cache", + ) + +def get_chunk_agglomeration(agglomeration_dir, chunk_coord): + file_name = f"chunk_{'_'.join(str(coord) for coord in chunk_coord)}.json" + with SimpleStorage(agglomeration_dir) as storage: + content = storage.get_file(file_name) + return json.loads(content.decode('utf-8')) + From 6acffc47007888ccdca0260e9a77be905ab2502b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 15:21:54 +0000 Subject: [PATCH 0226/1097] fix: convert key type to int --- pychunkedgraph/io/agglomeration.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pychunkedgraph/io/agglomeration.py b/pychunkedgraph/io/agglomeration.py index ae8f143ac..9c2a8750d 100644 --- a/pychunkedgraph/io/agglomeration.py +++ b/pychunkedgraph/io/agglomeration.py @@ -18,5 +18,6 @@ def get_chunk_agglomeration(agglomeration_dir, chunk_coord): file_name = f"chunk_{'_'.join(str(coord) for coord in chunk_coord)}.json" with SimpleStorage(agglomeration_dir) as storage: content = storage.get_file(file_name) - return json.loads(content.decode('utf-8')) + mapping = json.loads(content.decode('utf-8')) + return {int(key): mapping[key] for key in mapping} From 874659d2f5a167f863243ffb24ed356b4d5f97c7 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 15:38:16 +0000 Subject: [PATCH 0227/1097] fix: too many parameters, use dict --- pychunkedgraph/ingest/ran_ingestion_v2.py | 29 ++++++++++++----------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 8b1bbdf5f..29fa8a001 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -40,18 +40,19 @@ def ingest_into_chunkedgraph( project_id=None, layer=1, n_chunks=None, - is_new=True, - edge_dir=None, - agglomeration_dir=None, - use_raw_edge_data=True, - use_raw_agglomeration_data=True, + is_new: bool = True, + data_source: Dict = None, ): """ - :param use_raw_edge_data: - Set this to false if the edges have already been processed. - :param use_raw_agglomeration_data: - Set this to false if the agglomeration data has already been processed. - These are required because processing edges and bulding chunkedgraph is de-coupled. + :param data_source: + :type Dict: + `data_source` can have the following keys. + Use these options to use either raw data or + processed data when building the chunkedgraph + edge_dir=None + agglomeration_dir=None + use_raw_edge_data=True + use_raw_agglomeration_data=True """ storage_path = storage_path.strip("/") ws_cv_path = ws_cv_path.strip("/") @@ -70,7 +71,7 @@ def ingest_into_chunkedgraph( fan_out=fan_out, instance_id=instance_id, project_id=project_id, - edge_dir=edge_dir, + edge_dir=data_source["edge_dir"], is_new=is_new, ) @@ -81,9 +82,9 @@ def ingest_into_chunkedgraph( instance_id=instance_id, project_id=project_id, data_version=4, - use_raw_edge_data=use_raw_edge_data, - use_raw_agglomeration_data=use_raw_agglomeration_data, - agglomeration_dir=agglomeration_dir + use_raw_edge_data=data_source["use_raw_edge_data"], + use_raw_agglomeration_data=data_source["use_raw_agglomeration_data"], + agglomeration_dir=data_source["agglomeration_dir"], ) if layer < 3: From 0559595cd54b7af66ab5895087ad14ed54d54a09 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 15:55:48 +0000 Subject: [PATCH 0228/1097] fix: use better param name --- pychunkedgraph/ingest/ran_ingestion_v2.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 29fa8a001..a1dbb502c 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -41,12 +41,12 @@ def ingest_into_chunkedgraph( layer=1, n_chunks=None, is_new: bool = True, - data_source: Dict = None, + data_config: Dict = None, ): """ - :param data_source: + :param data_config: :type Dict: - `data_source` can have the following keys. + `data_config` can have the following keys. Use these options to use either raw data or processed data when building the chunkedgraph edge_dir=None @@ -71,7 +71,7 @@ def ingest_into_chunkedgraph( fan_out=fan_out, instance_id=instance_id, project_id=project_id, - edge_dir=data_source["edge_dir"], + edge_dir=data_config["edge_dir"], is_new=is_new, ) @@ -82,9 +82,9 @@ def ingest_into_chunkedgraph( instance_id=instance_id, project_id=project_id, data_version=4, - use_raw_edge_data=data_source["use_raw_edge_data"], - use_raw_agglomeration_data=data_source["use_raw_agglomeration_data"], - agglomeration_dir=data_source["agglomeration_dir"], + use_raw_edge_data=data_config["use_raw_edge_data"], + use_raw_agglomeration_data=data_config["use_raw_agglomeration_data"], + agglomeration_dir=data_config["agglomeration_dir"], ) if layer < 3: From c609ef5d3af048d9101fc02d3273304f115290c8 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 17:41:21 +0000 Subject: [PATCH 0229/1097] save mapping data, ingest test run --- pychunkedgraph/app/cg_app_blueprint.py | 10 +++++----- pychunkedgraph/ingest/cli.py | 21 ++++++++++++++------- pychunkedgraph/ingest/ran_ingestion_v2.py | 10 +++++----- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/app/cg_app_blueprint.py b/pychunkedgraph/app/cg_app_blueprint.py index ae47bbfa4..b2d11bd00 100644 --- a/pychunkedgraph/app/cg_app_blueprint.py +++ b/pychunkedgraph/app/cg_app_blueprint.py @@ -14,7 +14,7 @@ from pychunkedgraph.app import app_utils, meshing_app_blueprint from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions, \ chunkedgraph_comp as cg_comp -from middle_auth_client import auth_required, auth_requires_roles +# from middle_auth_client import auth_required, auth_requires_roles __version__ = 'fafb.1.16' bp = Blueprint('pychunkedgraph', __name__, url_prefix="/segmentation") @@ -196,7 +196,7 @@ def handle_root_main(table_id, atomic_id, timestamp): ### MERGE ---------------------------------------------------------------------- @bp.route('/1.0//graph/merge', methods=['POST', 'GET']) -@auth_requires_roles('edit_all') +# @auth_requires_roles('edit_all') def handle_merge(table_id): current_app.request_type = "merge" @@ -277,7 +277,7 @@ def handle_merge(table_id): ### SPLIT ---------------------------------------------------------------------- @bp.route('/1.0//graph/split', methods=['POST', 'GET']) -@auth_requires_roles('edit_all') +# @auth_requires_roles('edit_all') def handle_split(table_id): current_app.request_type = "split" @@ -356,7 +356,7 @@ def handle_split(table_id): @bp.route("/1.0//graph/undo", methods=["POST"]) -@auth_requires_roles("edit_all") +# @auth_requires_roles("edit_all") def handle_undo(table_id): current_app.request_type = "undo" @@ -400,7 +400,7 @@ def handle_undo(table_id): @bp.route("/1.0//graph/redo", methods=["POST"]) -@auth_requires_roles("edit_all") +# @auth_requires_roles("edit_all") def handle_redo(table_id): current_app.request_type = "redo" diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 17cd35ac7..0dcc9b621 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -18,7 +18,7 @@ def handle_job_result(*args, **kwargs): """handle worker return""" global task_count - result = np.frombuffer(args[0]['data'], dtype=np.int32) + result = np.frombuffer(args[0]["data"], dtype=np.int32) layer = result[0] task_count += 1 @@ -29,29 +29,36 @@ def handle_job_result(*args, **kwargs): @ingest_cli.command("table") @click.argument("storage_path", type=str) @click.argument("ws_cv_path", type=str) -@click.argument("edge_dir", type=str) +@click.argument("cv_path", type=str) @click.argument("cg_table_id", type=str) -@click.argument("layer", type=int, default=None) -def run_ingest(storage_path, ws_cv_path, cg_table_id, edge_dir, layer): +@click.argument("layer", type=int) +def run_ingest(storage_path, ws_cv_path, cv_path, cg_table_id, layer): """ run ingestion job eg: flask ingest table \ gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-test/edges/pinky100-ingest \ - akhilesh-pinky100-1 \ + akhilesh-pinky100-2 \ 2 """ chunk_pubsub = current_app.redis.pubsub() chunk_pubsub.subscribe(**{INGEST_CHANNEL: handle_job_result}) chunk_pubsub.run_in_thread(sleep_time=0.1) + data_config = { + "edge_dir": f"{cv_path}/{cg_table_id}/edges", + "agglomeration_dir": f"{cv_path}/{cg_table_id}/agglomeration", + "use_raw_edge_data": True, + "use_raw_agglomeration_data": True, + } + ingest_into_chunkedgraph( storage_path=storage_path, ws_cv_path=ws_cv_path, cg_table_id=cg_table_id, - edge_dir=edge_dir, - layer=layer + layer=layer, + data_config=data_config, ) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index a1dbb502c..13e8bcf92 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -38,7 +38,7 @@ def ingest_into_chunkedgraph( size=None, instance_id=None, project_id=None, - layer=1, + layer=2, n_chunks=None, is_new: bool = True, data_config: Dict = None, @@ -49,9 +49,9 @@ def ingest_into_chunkedgraph( `data_config` can have the following keys. Use these options to use either raw data or processed data when building the chunkedgraph - edge_dir=None - agglomeration_dir=None - use_raw_edge_data=True + edge_dir=None, + agglomeration_dir=None, + use_raw_edge_data=True, use_raw_agglomeration_data=True """ storage_path = storage_path.strip("/") @@ -154,7 +154,7 @@ def create_atomic_chunk(imanager, coord): chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) - add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) + # add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) # to track workers completion, layer = 2 return str(2) From 93a190700900110230344391e623567595cbe49e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 18:35:17 +0000 Subject: [PATCH 0230/1097] fix: include new properties in ingestion manager serialized info --- pychunkedgraph/ingest/cli.py | 2 +- pychunkedgraph/ingest/ingestionmanager.py | 7 +++++-- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 0dcc9b621..8311a0e9e 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -38,7 +38,7 @@ def run_ingest(storage_path, ws_cv_path, cv_path, cg_table_id, layer): eg: flask ingest table \ gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ - gs://akhilesh-test/edges/pinky100-ingest \ + gs://akhilesh-pcg \ akhilesh-pinky100-2 \ 2 """ diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 30b44e25a..abcae28a2 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -15,7 +15,7 @@ def __init__( data_version=2, use_raw_edge_data=True, use_raw_agglomeration_data=True, - agglomeration_dir=None + agglomeration_dir=None, ): self._storage_path = storage_path self._cg_table_id = cg_table_id @@ -122,7 +122,7 @@ def use_raw_agglomeration_data(self): @property def agglomeration_dir(self): - return self._agglomeration_dir + return self._agglomeration_dir def get_serialized_info(self): info = { @@ -132,6 +132,9 @@ def get_serialized_info(self): "instance_id": self._instance_id, "project_id": self._project_id, "data_version": self.data_version, + "use_raw_edge_data": self._use_raw_edge_data, + "use_raw_agglomeration_data": self._use_raw_agglomeration_data, + "agglomeration_dir": self._agglomeration_dir, } return info diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 13e8bcf92..75ec156a1 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -201,7 +201,7 @@ def _read_raw_edge_data(imanager, coord) -> Dict: no_edges = no_edges and not sv_ids1.size if no_edges: return None - put_chunk_edges(imanager.cg.edge_dir, coord, chunk_edges, ZSTD_LEVEL) + put_chunk_edges(imanager.cg.cv_edges_path, coord, chunk_edges, ZSTD_LEVEL) return chunk_edges From 5b1a88a65a973c499932c6ac25398e3be081a474 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 19:07:03 +0000 Subject: [PATCH 0231/1097] feat: use protobuf for mapping --- pychunkedgraph/io/agglomeration.py | 32 ++++++-- pychunkedgraph/io/edges.py | 2 +- pychunkedgraph/io/protobuf/chunkMapping.proto | 8 ++ .../io/protobuf/chunkMapping_pb2.py | 77 +++++++++++++++++++ 4 files changed, 113 insertions(+), 6 deletions(-) create mode 100644 pychunkedgraph/io/protobuf/chunkMapping.proto create mode 100644 pychunkedgraph/io/protobuf/chunkMapping_pb2.py diff --git a/pychunkedgraph/io/agglomeration.py b/pychunkedgraph/io/agglomeration.py index 9c2a8750d..fc3b047dc 100644 --- a/pychunkedgraph/io/agglomeration.py +++ b/pychunkedgraph/io/agglomeration.py @@ -1,23 +1,45 @@ import json +from typing import Dict +import numpy as np from cloudvolume.storage import SimpleStorage +from .protobuf.chunkMapping_pb2 import ChunkMappingMsg +from ..backend.utils import basetypes -def put_chunk_agglomeration(agglomeration_dir, mapping, chunk_coord): + +def serialize(mapping: Dict) -> ChunkMappingMsg: + supervoxels = np.array(mapping.keys(), dtype=basetypes.NODE_ID) + components = np.array(mapping.values(), dtype=int) + mapping_message = ChunkMappingMsg() + mapping_message.supervoxels = supervoxels.tobytes() + mapping_message.components = components.tobytes() + return mapping_message + + +def deserialize(mapping_message: ChunkMappingMsg) -> Dict: + supervoxels = np.frombuffer(mapping_message.supervoxels, basetypes.NODE_ID) + components = np.frombuffer(mapping_message.components, basetypes.NODE_ID) + return dict(zip(supervoxels, components)) + + +def put_chunk_agglomeration(agglomeration_dir, mapping, chunk_coord) -> None: # filename format - chunk_x_y_z.serliazation + mapping_message = serialize(mapping) file_name = f"chunk_{'_'.join(str(coord) for coord in chunk_coord)}.json" with SimpleStorage(agglomeration_dir) as storage: storage.put_file( file_path=file_name, - content=json.dumps(mapping).encode("utf-8"), + content=mapping_message.SerializeToString(), compress="gzip", cache_control="no-cache", ) + def get_chunk_agglomeration(agglomeration_dir, chunk_coord): file_name = f"chunk_{'_'.join(str(coord) for coord in chunk_coord)}.json" with SimpleStorage(agglomeration_dir) as storage: - content = storage.get_file(file_name) - mapping = json.loads(content.decode('utf-8')) - return {int(key): mapping[key] for key in mapping} + mapping_message = ChunkMappingMsg() + mapping_message.ParseFromString(storage.get_file(file_name)) + return deserialize(mapping_message) diff --git a/pychunkedgraph/io/edges.py b/pychunkedgraph/io/edges.py index ad959884a..7e1af9d42 100644 --- a/pychunkedgraph/io/edges.py +++ b/pychunkedgraph/io/edges.py @@ -11,10 +11,10 @@ from cloudvolume import Storage from cloudvolume.storage import SimpleStorage +from .protobuf.chunkEdges_pb2 import EdgesMsg, ChunkEdgesMsg from ..backend.utils.edge_utils import concatenate_chunk_edges from ..backend.definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK from ..backend.utils import basetypes -from .protobuf.chunkEdges_pb2 import EdgesMsg, ChunkEdgesMsg def serialize(edges: Edges) -> EdgesMsg: diff --git a/pychunkedgraph/io/protobuf/chunkMapping.proto b/pychunkedgraph/io/protobuf/chunkMapping.proto new file mode 100644 index 000000000..38c0e3f15 --- /dev/null +++ b/pychunkedgraph/io/protobuf/chunkMapping.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package mapping; + +message ChunkMappingMsg { + bytes supervoxels = 1; + bytes components = 2; +} \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkMapping_pb2.py b/pychunkedgraph/io/protobuf/chunkMapping_pb2.py new file mode 100644 index 000000000..8801bfabb --- /dev/null +++ b/pychunkedgraph/io/protobuf/chunkMapping_pb2.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: chunkMapping.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='chunkMapping.proto', + package='mapping', + syntax='proto3', + serialized_options=None, + serialized_pb=_b('\n\x12\x63hunkMapping.proto\x12\x07mapping\":\n\x0f\x43hunkMappingMsg\x12\x13\n\x0bsupervoxels\x18\x01 \x01(\x0c\x12\x12\n\ncomponents\x18\x02 \x01(\x0c\x62\x06proto3') +) + + + + +_CHUNKMAPPINGMSG = _descriptor.Descriptor( + name='ChunkMappingMsg', + full_name='mapping.ChunkMappingMsg', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='supervoxels', full_name='mapping.ChunkMappingMsg.supervoxels', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='components', full_name='mapping.ChunkMappingMsg.components', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31, + serialized_end=89, +) + +DESCRIPTOR.message_types_by_name['ChunkMappingMsg'] = _CHUNKMAPPINGMSG +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ChunkMappingMsg = _reflection.GeneratedProtocolMessageType('ChunkMappingMsg', (_message.Message,), { + 'DESCRIPTOR' : _CHUNKMAPPINGMSG, + '__module__' : 'chunkMapping_pb2' + # @@protoc_insertion_point(class_scope:mapping.ChunkMappingMsg) + }) +_sym_db.RegisterMessage(ChunkMappingMsg) + + +# @@protoc_insertion_point(module_scope) From 9d99bdfc38f45e39cf4136c3a0c8545683b7d3a3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 12 Sep 2019 19:08:59 +0000 Subject: [PATCH 0232/1097] feat: use protobuf for mapping update file name --- pychunkedgraph/io/agglomeration.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/io/agglomeration.py b/pychunkedgraph/io/agglomeration.py index fc3b047dc..5da643c39 100644 --- a/pychunkedgraph/io/agglomeration.py +++ b/pychunkedgraph/io/agglomeration.py @@ -24,9 +24,9 @@ def deserialize(mapping_message: ChunkMappingMsg) -> Dict: def put_chunk_agglomeration(agglomeration_dir, mapping, chunk_coord) -> None: - # filename format - chunk_x_y_z.serliazation + # filename format - mapping_x_y_z.serliazation mapping_message = serialize(mapping) - file_name = f"chunk_{'_'.join(str(coord) for coord in chunk_coord)}.json" + file_name = f"mapping_{'_'.join(str(coord) for coord in chunk_coord)}.proto" with SimpleStorage(agglomeration_dir) as storage: storage.put_file( file_path=file_name, @@ -37,7 +37,8 @@ def put_chunk_agglomeration(agglomeration_dir, mapping, chunk_coord) -> None: def get_chunk_agglomeration(agglomeration_dir, chunk_coord): - file_name = f"chunk_{'_'.join(str(coord) for coord in chunk_coord)}.json" + # filename format - mapping_x_y_z.serliazation + file_name = f"mapping_{'_'.join(str(coord) for coord in chunk_coord)}.proto" with SimpleStorage(agglomeration_dir) as storage: mapping_message = ChunkMappingMsg() mapping_message.ParseFromString(storage.get_file(file_name)) From b8f9ab48f3566b565406e38643bc95a1c773be3e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 13:38:02 +0000 Subject: [PATCH 0233/1097] return type annotation --- pychunkedgraph/ingest/cli.py | 6 +++--- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- pychunkedgraph/io/agglomeration.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 8311a0e9e..f5f72a055 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -39,7 +39,7 @@ def run_ingest(storage_path, ws_cv_path, cv_path, cg_table_id, layer): gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-pcg \ - akhilesh-pinky100-2 \ + akhilesh-pinky100-0 \ 2 """ chunk_pubsub = current_app.redis.pubsub() @@ -47,9 +47,9 @@ def run_ingest(storage_path, ws_cv_path, cv_path, cg_table_id, layer): chunk_pubsub.run_in_thread(sleep_time=0.1) data_config = { - "edge_dir": f"{cv_path}/{cg_table_id}/edges", + "edge_dir": f"{cv_path}/akhilesh-pinky100-1/edges", "agglomeration_dir": f"{cv_path}/{cg_table_id}/agglomeration", - "use_raw_edge_data": True, + "use_raw_edge_data": False, "use_raw_agglomeration_data": True, } diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 75ec156a1..662b0c3da 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -154,7 +154,7 @@ def create_atomic_chunk(imanager, coord): chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) - # add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) + add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) # to track workers completion, layer = 2 return str(2) diff --git a/pychunkedgraph/io/agglomeration.py b/pychunkedgraph/io/agglomeration.py index 5da643c39..34bfd5cf0 100644 --- a/pychunkedgraph/io/agglomeration.py +++ b/pychunkedgraph/io/agglomeration.py @@ -36,7 +36,7 @@ def put_chunk_agglomeration(agglomeration_dir, mapping, chunk_coord) -> None: ) -def get_chunk_agglomeration(agglomeration_dir, chunk_coord): +def get_chunk_agglomeration(agglomeration_dir, chunk_coord) -> Dict: # filename format - mapping_x_y_z.serliazation file_name = f"mapping_{'_'.join(str(coord) for coord in chunk_coord)}.proto" with SimpleStorage(agglomeration_dir) as storage: From 6fa6f0394cdb201052035265d28d62eeeb1df197 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 14:50:33 +0000 Subject: [PATCH 0234/1097] fix: do not create mapping file if chunk is empty --- pychunkedgraph/backend/utils/edge_utils.py | 9 +++++---- pychunkedgraph/ingest/cli.py | 2 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 13 +++++++------ pychunkedgraph/io/agglomeration.py | 9 ++++++--- 4 files changed, 19 insertions(+), 14 deletions(-) diff --git a/pychunkedgraph/backend/utils/edge_utils.py b/pychunkedgraph/backend/utils/edge_utils.py index f92ea8b6d..ffd840b67 100644 --- a/pychunkedgraph/backend/utils/edge_utils.py +++ b/pychunkedgraph/backend/utils/edge_utils.py @@ -7,6 +7,7 @@ import numpy as np +from .basetypes import NODE_ID, EDGE_AFFINITY, EDGE_AREA from ...utils.general import reverse_dictionary from ..definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK from ..connectivity.search import check_reachability @@ -17,10 +18,10 @@ def concatenate_chunk_edges(chunk_edge_dicts: List) -> Dict: """combine edge_dicts of multiple chunks into one edge_dict""" edges_dict = {} for edge_type in [IN_CHUNK, BT_CHUNK, CX_CHUNK]: - sv_ids1 = [] - sv_ids2 = [] - affinities = [] - areas = [] + sv_ids1 = [np.array([], dtype=NODE_ID)] + sv_ids2 = [np.array([], dtype=NODE_ID)] + affinities = [np.array([], dtype=EDGE_AFFINITY)] + areas = [np.array([], dtype=EDGE_AREA)] for edge_d in chunk_edge_dicts: edges = edge_d[edge_type] sv_ids1.append(edges.node_ids1) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index f5f72a055..a2ddfe53f 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -39,7 +39,7 @@ def run_ingest(storage_path, ws_cv_path, cv_path, cg_table_id, layer): gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-pcg \ - akhilesh-pinky100-0 \ + akhilesh-pinky100-2 \ 2 """ chunk_pubsub = current_app.redis.pubsub() diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 662b0c3da..b364ae08e 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -167,7 +167,7 @@ def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: chunk_edges = ( _read_raw_edge_data(imanager, coord) if imanager.use_raw_edge_data - else get_chunk_edges(imanager.cg.cv_edges_path, coord) + else get_chunk_edges(imanager.cg.cv_edges_path, [coord]) ) mapping = ( _read_raw_agglomeration_data(imanager, coord) @@ -178,7 +178,7 @@ def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: def _read_raw_edge_data(imanager, coord) -> Dict: - edge_dict = collect_edge_data(imanager, coord) + edge_dict = _collect_edge_data(imanager, coord) edge_dict = iu.postprocess_edge_data(imanager, edge_dict) # flag to check if chunk has edges @@ -212,8 +212,8 @@ def _get_active_edges(imanager, coord, edges_d, mapping): edges = edges_d[edge_type] active = active_edges_flag_d[edge_type] - sv_ids1 = edges.sv_ids1[active] - sv_ids2 = edges.sv_ids2[active] + sv_ids1 = edges.node_ids1[active] + sv_ids2 = edges.node_ids2[active] affinities = edges.affinities[active] areas = edges.areas[active] chunk_edges_active[edge_type] = Edges( @@ -255,7 +255,7 @@ def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): return c_chunk_coords -def collect_edge_data(imanager, chunk_coord): +def _collect_edge_data(imanager, chunk_coord): """ Loads edge for single chunk :param imanager: IngestionManager @@ -439,7 +439,8 @@ def _read_raw_agglomeration_data(imanager, chunk_coord): cc = list(cc) mapping.update(dict(zip(cc, [i_cc] * len(cc)))) - put_chunk_agglomeration(imanager.agglomeration_dir, mapping, chunk_coord) + if mapping: + put_chunk_agglomeration(imanager.agglomeration_dir, mapping, chunk_coord) return mapping diff --git a/pychunkedgraph/io/agglomeration.py b/pychunkedgraph/io/agglomeration.py index 34bfd5cf0..fbfdc7c8b 100644 --- a/pychunkedgraph/io/agglomeration.py +++ b/pychunkedgraph/io/agglomeration.py @@ -9,8 +9,8 @@ def serialize(mapping: Dict) -> ChunkMappingMsg: - supervoxels = np.array(mapping.keys(), dtype=basetypes.NODE_ID) - components = np.array(mapping.values(), dtype=int) + supervoxels = np.array(list(mapping.keys()), dtype=basetypes.NODE_ID) + components = np.array(list(mapping.values()), dtype=int) mapping_message = ChunkMappingMsg() mapping_message.supervoxels = supervoxels.tobytes() mapping_message.components = components.tobytes() @@ -40,7 +40,10 @@ def get_chunk_agglomeration(agglomeration_dir, chunk_coord) -> Dict: # filename format - mapping_x_y_z.serliazation file_name = f"mapping_{'_'.join(str(coord) for coord in chunk_coord)}.proto" with SimpleStorage(agglomeration_dir) as storage: + content = storage.get_file(file_name) + if not content: + return {} mapping_message = ChunkMappingMsg() - mapping_message.ParseFromString(storage.get_file(file_name)) + mapping_message.ParseFromString() return deserialize(mapping_message) From 1b36098ebc14dad43a0c275b966dc3a74fa18fb6 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 18:27:42 +0000 Subject: [PATCH 0235/1097] feat: display busy worker count in redis status cli --- pychunkedgraph/app/redis_cli.py | 9 +++++++-- pychunkedgraph/ingest/cli.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py index 6431d14b9..ca56cf416 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/redis_cli.py @@ -5,7 +5,9 @@ import click from redis import Redis -from rq import Queue, Worker +from rq import Queue +from rq import Worker +from rq.worker import WorkerStatus from rq.job import Job from flask import current_app from flask.cli import AppGroup @@ -26,7 +28,10 @@ def get_status(queue="test"): workers = Worker.all(queue=q) print(f"Queue name \t: {queue}") print(f"Jobs queued \t: {len(q)}") - print(f"Workers count \t: {len(workers)}") + print(f"Workers total \t: {len(workers)}") + print( + f"Workers busy \t: {sum([worker.get_state() == WorkerStatus.BUSY for worker in workers])}" + ) print(f"Jobs failed \t: {q.failed_job_registry.count}") diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a2ddfe53f..f5f72a055 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -39,7 +39,7 @@ def run_ingest(storage_path, ws_cv_path, cv_path, cg_table_id, layer): gs://ranl/scratch/pinky100_ca_com/agg \ gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ gs://akhilesh-pcg \ - akhilesh-pinky100-2 \ + akhilesh-pinky100-0 \ 2 """ chunk_pubsub = current_app.redis.pubsub() From 45233cfbd9ede0fc21739c138e3301db619bf586 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 18:42:18 +0000 Subject: [PATCH 0236/1097] make busy count optional --- pychunkedgraph/app/redis_cli.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py index ca56cf416..7c498dcd0 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/redis_cli.py @@ -23,15 +23,17 @@ @redis_cli.command("status") @click.argument("queue", type=str, default="test") -def get_status(queue="test"): +@click.option("--show-busy", is_flag=True) +def get_status(queue, show_busy): + print("NOTE: Use --show-busy to display count of non idle workers\n") q = Queue(queue, connection=connection) - workers = Worker.all(queue=q) print(f"Queue name \t: {queue}") print(f"Jobs queued \t: {len(q)}") - print(f"Workers total \t: {len(workers)}") - print( - f"Workers busy \t: {sum([worker.get_state() == WorkerStatus.BUSY for worker in workers])}" - ) + print(f"Workers total \t: {Worker.count(queue=q)}") + if show_busy: + workers = Worker.all(queue=q) + count = sum([worker.get_state() == WorkerStatus.BUSY for worker in workers]) + print(f"Workers busy \t: {count}") print(f"Jobs failed \t: {q.failed_job_registry.count}") From bc8a5e5feb243fc9893f25f7c65c7ffd319e91a6 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 18:52:23 +0000 Subject: [PATCH 0237/1097] feat: redis cli requeue jobs --- pychunkedgraph/app/redis_cli.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py index 7c498dcd0..ac3f7c5c3 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/redis_cli.py @@ -2,6 +2,7 @@ cli for redis jobs """ import os +import sys import click from redis import Redis @@ -9,6 +10,8 @@ from rq import Worker from rq.worker import WorkerStatus from rq.job import Job +from rq.exceptions import InvalidJobOperationError +from rq.registry import FailedJobRegistry from flask import current_app from flask.cli import AppGroup @@ -67,5 +70,34 @@ def empty_queue(queue): print(f"{job_count} jobs removed from {queue}.") +@redis_cli.command("requeue") +@click.argument("queue", type=str) +@click.option("--all", "-a", is_flag=True, help="Requeue all failed jobs") +@click.argument("job_ids", nargs=-1) +def requeue(queue, all, job_ids): + """Requeue failed jobs.""" + failed_job_registry = FailedJobRegistry(queue, connection=connection) + if all: + job_ids = failed_job_registry.get_job_ids() + + if not job_ids: + click.echo("Nothing to do") + sys.exit(0) + + click.echo("Requeueing {0} jobs from failed queue".format(len(job_ids))) + fail_count = 0 + for job_id in job_ids: + try: + failed_job_registry.requeue(job_id) + except InvalidJobOperationError: + fail_count += 1 + + if fail_count > 0: + click.secho( + "Unable to requeue {0} jobs from failed job registry".format(fail_count), + fg="red", + ) + + def init_redis_cmds(app): app.cli.add_command(redis_cli) From cb670bbe9e79473a6ea04454b7edbf46cec4f012 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 18:58:48 +0000 Subject: [PATCH 0238/1097] f strings --- pychunkedgraph/app/redis_cli.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/redis_cli.py index ac3f7c5c3..19d02bf48 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/redis_cli.py @@ -84,7 +84,7 @@ def requeue(queue, all, job_ids): click.echo("Nothing to do") sys.exit(0) - click.echo("Requeueing {0} jobs from failed queue".format(len(job_ids))) + click.echo(f"Requeueing {len(job_ids)} jobs from failed queue") fail_count = 0 for job_id in job_ids: try: @@ -94,8 +94,7 @@ def requeue(queue, all, job_ids): if fail_count > 0: click.secho( - "Unable to requeue {0} jobs from failed job registry".format(fail_count), - fg="red", + f"Unable to requeue {fail_count} jobs from failed job registry", fg="red" ) From 434bcd2c40ce787ca6a8405acaa57eef9401ff89 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 19:06:42 +0000 Subject: [PATCH 0239/1097] return layer and chunk coord for tracking workers --- pychunkedgraph/ingest/initialization/abstract_layers.py | 2 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index c27d74aac..007e16f1b 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -48,7 +48,7 @@ def add_layer( time_stamp, ) # to track worker completion - return str(layer_id) + return return np.concatenate([[layer_id], chunk_coords]) def _process_chunks(cg_instance, layer_id, chunk_coords): diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index b364ae08e..df208d056 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -156,7 +156,7 @@ def create_atomic_chunk(imanager, coord): ) add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) # to track workers completion, layer = 2 - return str(2) + return np.concatenate([[2], coord]) def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: From 91ee95d3f411fdaf7a9a7e455336de17e4fd0d1f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 13 Sep 2019 19:07:20 +0000 Subject: [PATCH 0240/1097] return layer and chunk coord for tracking workers --- pychunkedgraph/ingest/initialization/abstract_layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 007e16f1b..582325217 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -48,7 +48,7 @@ def add_layer( time_stamp, ) # to track worker completion - return return np.concatenate([[layer_id], chunk_coords]) + return np.concatenate([[layer_id], chunk_coords]) def _process_chunks(cg_instance, layer_id, chunk_coords): From e103fe576f378e82ae8d42742251d161294c24ce Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 1 Oct 2019 11:57:57 -0400 Subject: [PATCH 0241/1097] akhilesh-jobs-layer-dependency (#167) * fix: cache ingestion manager chunk coords * wip: chunks task dependency tree * wip: task dependency * wip: task dependency * wip: task dependency * wip: task dependency * wip: task dependency use new redis connectin in thread * wip: job dependency * wip: job dependency * fix: defaultdict * add stats * wip * wip: ingest interface * wip: ingest interface config namedtuples * wip: ingest interface rename module * wip: ingest interface compute chunk id util function * wip: ingest interface refactors * wip: ingest interface use lambda * wip: ingest interface get chunk id fix and refactors * wip: ingest interface ingest raw cli * redis server in dev container * dev container envs * wip: ingest interface bug fixes * supervoxels count fix * terminlogy changes * components storage * wip: ingest interface change components storage format * result ttl 0 * wip: ingest interface generator bug * wip: remove commented code * fix: wrong start index when reading components * wip: format docs * format documentation updates * update dracopy version * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingest robust * wip: make ingestion robust helper function to calculate layer chunk bounds * wip: make ingestion robust helper function to calculate layer chunk bounds * queue jobs in batches * reduce job timeout * wip: helper command to enqueue parent chunk tasks periodically * rename cli group, more helper commands * refactors and helper command to periodically check child status * refactors and helper command to periodically check child status * wip: make ingestion manager independent of chunkedgraph class * make ingestion manager independent of chunkedgraph instance * fix: relevant parameters in serilization * redis connection in ingestion manager * fix: wrong variable * fix: incorrect bucket * remove unused class --- .devcontainer/Dockerfile | 5 +- .devcontainer/devcontainer.json | 8 +- Dockerfile | 4 +- pychunkedgraph/app/__init__.py | 4 +- pychunkedgraph/app/config.py | 9 + .../app/{redis_cli.py => rq_cli.py} | 41 +- pychunkedgraph/backend/chunkedgraph.py | 1931 +++++++++-------- pychunkedgraph/backend/chunkedgraph_edits.py | 2 +- pychunkedgraph/backend/chunkedgraph_utils.py | 147 +- pychunkedgraph/backend/definitions/config.py | 32 + .../backend/utils/{helpers.py => general.py} | 3 +- pychunkedgraph/ingest/cli.py | 205 +- pychunkedgraph/ingest/ingestion_utils.py | 5 +- pychunkedgraph/ingest/ingestionmanager.py | 108 +- .../ingest/initialization/abstract_layers.py | 11 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 309 ++- pychunkedgraph/io/agglomeration.py | 49 - pychunkedgraph/io/components.py | 60 + pychunkedgraph/io/edges.py | 2 - .../io/protobuf/chunkComponents.proto | 7 + .../io/protobuf/chunkComponents_pb2.py | 70 + pychunkedgraph/io/protobuf/chunkMapping.proto | 8 - pychunkedgraph/io/storage.md | 66 + pychunkedgraph/utils/redis.py | 20 +- requirements.txt | 2 +- rq_workers/ingest_worker.py | 8 +- rq_workers/test_worker.py | 6 +- 27 files changed, 1779 insertions(+), 1343 deletions(-) rename pychunkedgraph/app/{redis_cli.py => rq_cli.py} (71%) create mode 100644 pychunkedgraph/backend/definitions/config.py rename pychunkedgraph/backend/utils/{helpers.py => general.py} (95%) delete mode 100644 pychunkedgraph/io/agglomeration.py create mode 100644 pychunkedgraph/io/components.py create mode 100644 pychunkedgraph/io/protobuf/chunkComponents.proto create mode 100644 pychunkedgraph/io/protobuf/chunkComponents_pb2.py delete mode 100644 pychunkedgraph/io/protobuf/chunkMapping.proto create mode 100644 pychunkedgraph/io/storage.md diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 4855f1782..8192e0ac5 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM tiangolo/uwsgi-nginx-flask:python3.6 +FROM tiangolo/uwsgi-nginx-flask:python3.7 COPY override/timeout.conf /etc/nginx/conf.d/timeout.conf COPY override/supervisord.conf /etc/supervisor/conf.d/supervisord.conf @@ -12,6 +12,7 @@ RUN apt-get update \ lsb-release \ curl \ apt-transport-https \ + redis-server \ # GOOGLE-CLOUD-SDK && pip install --no-cache-dir --upgrade crcmod \ && echo "deb https://packages.cloud.google.com/apt cloud-sdk-$(lsb_release -c -s) main" > /etc/apt/sources.list.d/google-cloud-sdk.list \ @@ -27,7 +28,7 @@ RUN apt-get update \ && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com --recv-key 612DEFB798507F25 \ && apt-get update \ && apt-get install -y python3-graph-tool \ - && ln -s /usr/lib/python3/dist-packages/graph_tool /usr/local/lib/python3.6/site-packages/graph_tool \ + && ln -s /usr/lib/python3/dist-packages/graph_tool /usr/local/lib/python3.7/site-packages/graph_tool \ && pip install --no-cache-dir --upgrade scipy \ # PYCHUNKEDGRAPH # Need pip 18.1 for process-dependency-links flag support diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 593a0d017..e7bf70fc7 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -21,7 +21,11 @@ // Uncomment the next line to use a non-root user. See https://aka.ms/vscode-remote/containers/non-root-user. // "-u", "1000" "-w", "${env:HOME}/projects/sl-pychunkedgraph", - "-v", "${env:HOME}/secrets:/root/.cloudvolume/secrets" + "-v", "${env:HOME}/secrets:/root/.cloudvolume/secrets", + "-e", "FLASK_APP=run_dev.py", + "-e", "APP_SETTINGS=pychunkedgraph.app.config.DockerDevelopmentConfig", + "-e", "REDIS_PASSWORD=dev", + "-e", "LC_ALL=C.UTF-8" ], // Uncomment the next line if you want to publish any ports. @@ -31,7 +35,7 @@ // "settings": { "workbench.colorTheme": "Quiet Light" }, // Uncomment the next line to run commands after the container is created - for example installing git. - "postCreateCommand": "rm -rf notebooks && git clone https://github.com/akhileshh/notebooks.git", + "postCreateCommand": "redis-server --requirepass dev --appendonly no", // Add the IDs of any extensions you want installed in the array below. "extensions": [] diff --git a/Dockerfile b/Dockerfile index 48b20aac0..769f9bd71 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM tiangolo/uwsgi-nginx-flask:python3.6 +FROM tiangolo/uwsgi-nginx-flask:python3.7 COPY override/timeout.conf /etc/nginx/conf.d/timeout.conf @@ -31,7 +31,7 @@ RUN mkdir -p /home/nginx/.cloudvolume/secrets \ && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com --recv-key 612DEFB798507F25 \ && apt-get update \ && apt-get install -y python3-graph-tool \ - && ln -s /usr/lib/python3/dist-packages/graph_tool /usr/local/lib/python3.6/site-packages/graph_tool \ + && ln -s /usr/lib/python3/dist-packages/graph_tool /usr/local/lib/python3.7/site-packages/graph_tool \ && pip install --no-cache-dir --upgrade scipy \ # PYCHUNKEDGRAPH # Need pip 18.1 for process-dependency-links flag support diff --git a/pychunkedgraph/app/__init__.py b/pychunkedgraph/app/__init__.py index a90281987..cf9de2381 100644 --- a/pychunkedgraph/app/__init__.py +++ b/pychunkedgraph/app/__init__.py @@ -17,7 +17,7 @@ from pychunkedgraph.logging import jsonformatter # from pychunkedgraph.app import manifest_app_blueprint -from .redis_cli import init_redis_cmds +from .rq_cli import init_rq_cmds from ..ingest.cli import init_ingest_cmds os.environ['TRAVIS_BRANCH'] = "IDONTKNOWWHYINEEDTHIS" @@ -80,4 +80,4 @@ def configure_app(app): app.test_q = Queue('test', connection=app.redis) with app.app_context(): init_ingest_cmds(app) - init_redis_cmds(app) \ No newline at end of file + init_rq_cmds(app) \ No newline at end of file diff --git a/pychunkedgraph/app/config.py b/pychunkedgraph/app/config.py index c2b629b18..2c5d0699c 100644 --- a/pychunkedgraph/app/config.py +++ b/pychunkedgraph/app/config.py @@ -28,6 +28,15 @@ class DevelopmentConfig(BaseConfig): DEBUG = True +class DockerDevelopmentConfig(BaseConfig): + """Development configuration.""" + USE_REDIS_JOBS = True + REDIS_HOST = os.environ.get('REDIS_SERVICE_HOST', 'localhost') + REDIS_PORT = os.environ.get('REDIS_SERVICE_PORT', '6379') + REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', 'dev') + REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0' + + class DeploymentWithRedisConfig(BaseConfig): """Deployment configuration with Redis.""" USE_REDIS_JOBS = True diff --git a/pychunkedgraph/app/redis_cli.py b/pychunkedgraph/app/rq_cli.py similarity index 71% rename from pychunkedgraph/app/redis_cli.py rename to pychunkedgraph/app/rq_cli.py index 19d02bf48..b4fdfe22c 100644 --- a/pychunkedgraph/app/redis_cli.py +++ b/pychunkedgraph/app/rq_cli.py @@ -11,6 +11,7 @@ from rq.worker import WorkerStatus from rq.job import Job from rq.exceptions import InvalidJobOperationError +from rq.registry import StartedJobRegistry from rq.registry import FailedJobRegistry from flask import current_app from flask.cli import AppGroup @@ -20,11 +21,11 @@ from ..utils.redis import REDIS_PASSWORD -redis_cli = AppGroup("redis") +rq_cli = AppGroup("rq") connection = Redis(host=REDIS_HOST, port=REDIS_PORT, db=0, password=REDIS_PASSWORD) -@redis_cli.command("status") +@rq_cli.command("status") @click.argument("queue", type=str, default="test") @click.option("--show-busy", is_flag=True) def get_status(queue, show_busy): @@ -40,7 +41,7 @@ def get_status(queue, show_busy): print(f"Jobs failed \t: {q.failed_job_registry.count}") -@redis_cli.command("failed_ids") +@rq_cli.command("failed_ids") @click.argument("queue", type=str) def failed_jobs(queue): q = Queue(queue, connection=connection) @@ -48,7 +49,7 @@ def failed_jobs(queue): print("\n".join(ids)) -@redis_cli.command("failed_info") +@rq_cli.command("failed_info") @click.argument("queue", type=str) @click.argument("id", type=str) def failed_job_info(queue, id): @@ -61,7 +62,7 @@ def failed_job_info(queue, id): print(j.exc_info) -@redis_cli.command("empty") +@rq_cli.command("empty") @click.argument("queue", type=str) def empty_queue(queue): q = Queue(queue, connection=connection) @@ -70,7 +71,17 @@ def empty_queue(queue): print(f"{job_count} jobs removed from {queue}.") -@redis_cli.command("requeue") +@rq_cli.command("reenqueue") +@click.argument("queue", type=str) +@click.argument("job_ids", nargs=-1, required=True) +def enqueue(queue, job_ids): + """Enqueues *existing* jobs that are stuck for whatever reason.""" + q = Queue(queue, connection=connection) + for job_id in job_ids: + q.push_job_id(job_id) + + +@rq_cli.command("requeue") @click.argument("queue", type=str) @click.option("--all", "-a", is_flag=True, help="Requeue all failed jobs") @click.argument("job_ids", nargs=-1) @@ -98,5 +109,19 @@ def requeue(queue, all, job_ids): ) -def init_redis_cmds(app): - app.cli.add_command(redis_cli) +@rq_cli.command("cleanup") +@click.argument("queue", type=str) +def clean_start_registry(queue): + """ + Clean started job registry + Sometimes started jobs are not moved to failed registry (network issues) + This command takes the jobs off the started registry and reueues them + """ + registry = StartedJobRegistry(name=queue, connection=connection) + cleaned_jobs = registry.cleanup() + print(f"Requeued {len(cleaned_jobs)} jobs from the started job registry.") + + +def init_rq_cmds(app): + app.cli.add_command(rq_cli) + diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index a81272aa6..50e1e457e 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1,50 +1,70 @@ -import collections -import numpy as np -import time -import datetime import os import sys -import networkx as nx -import cloudvolume -import re -import itertools +import time +import datetime import logging -from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple from itertools import chain +from itertools import product from functools import reduce -from multiwrapper import multiprocessing_utils as mu -from pychunkedgraph.backend import cutting, chunkedgraph_comp, flatgraph_utils -from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \ - compute_bitmasks, get_valid_timestamp, \ - get_time_range_filter, get_time_range_and_column_filter, get_max_time, \ - combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict -from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes -from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions, \ - chunkedgraph_edits as cg_edits -from pychunkedgraph.backend.graphoperation import ( - GraphEditOperation, - MergeOperation, - MulticutOperation, - SplitOperation, -) +from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union +import numpy as np +import pytz + +from cloudvolume import CloudVolume +from multiwrapper import multiprocessing_utils as mu from google.api_core.retry import Retry, if_exception_type -from google.api_core.exceptions import Aborted, DeadlineExceeded, \ - ServiceUnavailable +from google.api_core.exceptions import Aborted, DeadlineExceeded, ServiceUnavailable from google.auth import credentials from google.cloud import bigtable -from google.cloud.bigtable.row_filters import TimestampRange, \ - TimestampRangeFilter, ColumnRangeFilter, ValueRangeFilter, RowFilterChain, \ - ColumnQualifierRegexFilter, RowFilterUnion, ConditionalRowFilter, \ - PassAllFilter, RowFilter, RowKeyRegexFilter, FamilyNameRegexFilter +from google.cloud.bigtable.row_filters import ( + TimestampRange, + TimestampRangeFilter, + ColumnRangeFilter, + ValueRangeFilter, + RowFilterChain, + ColumnQualifierRegexFilter, + ConditionalRowFilter, + PassAllFilter, + RowFilter, +) from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.column_family import MaxVersionsGCRule +from . import ( + chunkedgraph_exceptions as cg_exceptions, + chunkedgraph_edits as cg_edits, + cutting, + chunkedgraph_comp, + flatgraph_utils, +) +from .chunkedgraph_utils import ( + compute_indices_pandas, + compute_bitmasks, + get_valid_timestamp, + get_time_range_filter, + get_time_range_and_column_filter, + get_max_time, + combine_cross_chunk_edge_dicts, + get_min_time, + partial_row_data_to_column_dict, + compute_chunk_id, +) +from .utils import serializers, column_keys, row_keys, basetypes +from .graphoperation import ( + GraphEditOperation, + MergeOperation, + MulticutOperation, + SplitOperation, + RedoOperation, + UndoOperation, +) from .definitions.edges import Edges from .definitions.agglomeration import Agglomeration -from .utils.edge_utils import ( - concatenate_chunk_edges, filter_edges, get_active_edges) +from .utils.edge_utils import concatenate_chunk_edges +from .utils.edge_utils import filter_edges +from .utils.edge_utils import get_active_edges from ..io.edges import get_chunk_edges @@ -54,27 +74,30 @@ LOCK_EXPIRED_TIME_DELTA = datetime.timedelta(minutes=3, seconds=0) # Setting environment wide credential path -os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = \ - HOME + "/.cloudvolume/secrets/google-secret.json" +os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ( + HOME + "/.cloudvolume/secrets/google-secret.json" +) class ChunkedGraph(object): - def __init__(self, - table_id: str, - instance_id: str = "pychunkedgraph", - project_id: str = "neuromancer-seung-import", - chunk_size: Tuple[np.uint64, np.uint64, np.uint64] = None, - fan_out: Optional[np.uint64] = None, - use_skip_connections: Optional[bool] = True, - edge_dir: Optional[str] = None, - s_bits_atomic_layer: Optional[np.uint64] = 8, - n_bits_root_counter: Optional[np.uint64] = 0, - n_layers: Optional[np.uint64] = None, - credentials: Optional[credentials.Credentials] = None, - client: bigtable.Client = None, - dataset_info: Optional[object] = None, - is_new: bool = False, - logger: Optional[logging.Logger] = None) -> None: + def __init__( + self, + table_id: str, + instance_id: str = "pychunkedgraph", + project_id: str = "neuromancer-seung-import", + chunk_size: Tuple[np.uint64, np.uint64, np.uint64] = None, + fan_out: Optional[np.uint64] = None, + use_skip_connections: Optional[bool] = True, + edge_dir: Optional[str] = None, + s_bits_atomic_layer: Optional[np.uint64] = 8, + n_bits_root_counter: Optional[np.uint64] = 0, + n_layers: Optional[np.uint64] = None, + credentials: Optional[credentials.Credentials] = None, + client: bigtable.Client = None, + dataset_info: Optional[object] = None, + is_new: bool = False, + logger: Optional[logging.Logger] = None, + ) -> None: if logger is None: self.logger = logging.getLogger(f"{project_id}/{instance_id}/{table_id}") @@ -89,8 +112,9 @@ def __init__(self, if client is not None: self._client = client else: - self._client = bigtable.Client(project=project_id, admin=True, - credentials=credentials) + self._client = bigtable.Client( + project=project_id, admin=True, credentials=credentials + ) self._instance = self.client.instance(instance_id) self._table_id = table_id @@ -101,40 +125,57 @@ def __init__(self, self._check_and_create_table() self._dataset_info = self.check_and_write_table_parameters( - column_keys.GraphSettings.DatasetInfo, dataset_info, - required=True, is_new=is_new) + column_keys.GraphSettings.DatasetInfo, + dataset_info, + required=True, + is_new=is_new, + ) - self._cv_path = self._dataset_info["data_dir"] # required + self._cv_path = self._dataset_info["data_dir"] # required self._mesh_dir = self._dataset_info.get("mesh", None) # optional self._edge_dir = self.check_and_write_table_parameters( - column_keys.GraphSettings.EdgeDir, edge_dir, - required=False, is_new=is_new) + column_keys.GraphSettings.EdgeDir, edge_dir, required=False, is_new=is_new + ) self._n_layers = self.check_and_write_table_parameters( - column_keys.GraphSettings.LayerCount, n_layers, - required=True, is_new=is_new) + column_keys.GraphSettings.LayerCount, n_layers, required=True, is_new=is_new + ) self._fan_out = self.check_and_write_table_parameters( - column_keys.GraphSettings.FanOut, fan_out, - required=True, is_new=is_new) + column_keys.GraphSettings.FanOut, fan_out, required=True, is_new=is_new + ) s_bits_atomic_layer = self.check_and_write_table_parameters( column_keys.GraphSettings.SpatialBits, np.uint64(s_bits_atomic_layer), - required=False, is_new=is_new) - self._use_skip_connections = self.check_and_write_table_parameters( - column_keys.GraphSettings.SkipConnections, - np.uint64(use_skip_connections), required=False, is_new=is_new) > 0 + required=False, + is_new=is_new, + ) + self._use_skip_connections = ( + self.check_and_write_table_parameters( + column_keys.GraphSettings.SkipConnections, + np.uint64(use_skip_connections), + required=False, + is_new=is_new, + ) + > 0 + ) self._n_bits_root_counter = self.check_and_write_table_parameters( column_keys.GraphSettings.RootCounterBits, np.uint64(n_bits_root_counter), - required=False, is_new=is_new) + required=False, + is_new=is_new, + ) self._chunk_size = self.check_and_write_table_parameters( - column_keys.GraphSettings.ChunkSize, chunk_size, - required=True, is_new=is_new) + column_keys.GraphSettings.ChunkSize, + chunk_size, + required=True, + is_new=is_new, + ) self._dataset_info["graph"] = {"chunk_size": self.chunk_size} - self._bitmasks = compute_bitmasks(self.n_layers, self.fan_out, - s_bits_atomic_layer) + self._bitmasks = compute_bitmasks( + self.n_layers, self.fan_out, s_bits_atomic_layer + ) self._cv = None @@ -188,8 +229,12 @@ def cross_edge_family_id(self) -> str: @property def family_ids(self): - return [self.family_id, self.incrementer_family_id, self.log_family_id, - self.cross_edge_family_id] + return [ + self.family_id, + self.incrementer_family_id, + self.log_family_id, + self.cross_edge_family_id, + ] @property def fan_out(self) -> np.uint64: @@ -244,11 +289,11 @@ def cv_mip(self) -> int: return self._cv_mip @property - def cv(self) -> cloudvolume.CloudVolume: + def cv(self) -> CloudVolume: if self._cv is None: - self._cv = cloudvolume.CloudVolume(self._cv_path, mip=self._cv_mip, - info=self.dataset_info) - + self._cv = CloudVolume( + self._cv_path, mip=self._cv_mip, info=self.dataset_info + ) return self._cv @property @@ -268,30 +313,33 @@ def _check_and_create_table(self) -> None: f = self.table.column_family(self.family_id) f.create() - f_inc = self.table.column_family(self.incrementer_family_id, - gc_rule=MaxVersionsGCRule(1)) + f_inc = self.table.column_family( + self.incrementer_family_id, gc_rule=MaxVersionsGCRule(1) + ) f_inc.create() f_log = self.table.column_family(self.log_family_id) f_log.create() - f_ce = self.table.column_family(self.cross_edge_family_id, - gc_rule=MaxVersionsGCRule(1)) + f_ce = self.table.column_family( + self.cross_edge_family_id, gc_rule=MaxVersionsGCRule(1) + ) f_ce.create() self.logger.info(f"Table {self.table_id} created") - def check_and_write_table_parameters(self, column: column_keys._Column, - value: Optional[Union[str, np.uint64]] = None, - required: bool = True, - is_new: bool = False - ) -> Union[str, np.uint64]: + def check_and_write_table_parameters( + self, + column: column_keys._Column, + value: Optional[Union[str, np.uint64]] = None, + required: bool = True, + is_new: bool = False, + ) -> Union[str, np.uint64]: """ Checks if a parameter already exists in the table. If it already exists it returns the stored value, else it stores the given value. Storing the given values can be enforced with `is_new`. The function raises an exception if no value is passed and the parameter does not exist, yet. - :param column: column_keys._Column :param value: Union[str, np.uint64] :param required: bool @@ -299,9 +347,7 @@ def check_and_write_table_parameters(self, column: column_keys._Column, :return: Union[str, np.uint64] value """ - setting = self.read_byte_row(row_key=row_keys.GraphSettings, - columns=column) - + setting = self.read_byte_row(row_key=row_keys.GraphSettings, columns=column) if (not setting or is_new) and value is not None: row = self.mutate_row(row_keys.GraphSettings, {column: value}) self.bulk_write([row]) @@ -315,12 +361,10 @@ def check_and_write_table_parameters(self, column: column_keys._Column, def is_in_bounds(self, coordinate: Sequence[int]): """ Checks whether a coordinate is within the segmentation bounds - :param coordinate: [int, int, int] :return bool """ coordinate = np.array(coordinate) - if np.any(coordinate < self.segmentation_bounds[0]): return False elif np.any(coordinate > self.segmentation_bounds[1]): @@ -330,42 +374,39 @@ def is_in_bounds(self, coordinate: Sequence[int]): def get_serialized_info(self): """ Rerturns dictionary that can be used to load this ChunkedGraph - :return: dict """ - info = {"table_id": self.table_id, - "instance_id": self.instance_id, - "project_id": self.project_id} - + info = { + "table_id": self.table_id, + "instance_id": self.instance_id, + "project_id": self.project_id, + } try: info["credentials"] = self.client.credentials except: info["credentials"] = self.client._credentials - return info - - def adjust_vol_coordinates_to_cv(self, x: np.int, y: np.int, z: np.int, - resolution: Sequence[np.int]): + def adjust_vol_coordinates_to_cv( + self, x: np.int, y: np.int, z: np.int, resolution: Sequence[np.int] + ): resolution = np.array(resolution) scaling = np.array(self.cv.resolution / resolution, dtype=np.int) - - x = (x / scaling[0] - self.vx_vol_bounds[0, 0]) - y = (y / scaling[1] - self.vx_vol_bounds[1, 0]) - z = (z / scaling[2] - self.vx_vol_bounds[2, 0]) - + x = x / scaling[0] - self.vx_vol_bounds[0, 0] + y = y / scaling[1] - self.vx_vol_bounds[1, 0] + z = z / scaling[2] - self.vx_vol_bounds[2, 0] return np.array([x, y, z]) - def get_chunk_coordinates_from_vol_coordinates(self, - x: np.int, - y: np.int, - z: np.int, - resolution: Sequence[np.int], - ceil: bool = False, - layer: int = 1 - ) -> np.ndarray: + def get_chunk_coordinates_from_vol_coordinates( + self, + x: np.int, + y: np.int, + z: np.int, + resolution: Sequence[np.int], + ceil: bool = False, + layer: int = 1, + ) -> np.ndarray: """ Translates volume coordinates to chunk_coordinates - :param x: np.int :param y: np.int :param z: np.int @@ -388,33 +429,26 @@ def get_chunk_coordinates_from_vol_coordinates(self, coords = np.array([x, y, z]) if ceil: coords = np.ceil(coords) - return coords.astype(np.int) def get_chunk_layer(self, node_or_chunk_id: np.uint64) -> int: """ Extract Layer from Node ID or Chunk ID - :param node_or_chunk_id: np.uint64 :return: int """ return int(int(node_or_chunk_id) >> 64 - self._n_bits_for_layer_id) - def get_chunk_layers(self, node_or_chunk_ids: Sequence[np.uint64] - ) -> np.ndarray: + def get_chunk_layers(self, node_or_chunk_ids: Sequence[np.uint64]) -> np.ndarray: """ Extract Layers from Node IDs or Chunk IDs - :param node_or_chunk_ids: np.ndarray :return: np.ndarray """ if len(node_or_chunk_ids) == 0: return np.array([], dtype=np.int) - return self._get_chunk_layer_vec(node_or_chunk_ids) - def get_chunk_coordinates(self, node_or_chunk_id: np.uint64 - ) -> np.ndarray: + def get_chunk_coordinates(self, node_or_chunk_id: np.uint64) -> np.ndarray: """ Extract X, Y and Z coordinate from Node ID or Chunk ID - :param node_or_chunk_id: np.uint64 :return: Tuple(int, int, int) """ @@ -430,14 +464,16 @@ def get_chunk_coordinates(self, node_or_chunk_id: np.uint64 z = int(node_or_chunk_id) >> z_offset & 2 ** bits_per_dim - 1 return np.array([x, y, z]) - def get_chunk_id(self, node_id: Optional[np.uint64] = None, - layer: Optional[int] = None, - x: Optional[int] = None, - y: Optional[int] = None, - z: Optional[int] = None) -> np.uint64: + def get_chunk_id( + self, + node_id: Optional[np.uint64] = None, + layer: Optional[int] = None, + x: Optional[int] = None, + y: Optional[int] = None, + z: Optional[int] = None, + ) -> np.uint64: """ (1) Extract Chunk ID from Node ID (2) Build Chunk ID from Layer, X, Y and Z components - :param node_id: np.uint64 :param layer: int :param x: int @@ -445,9 +481,7 @@ def get_chunk_id(self, node_id: Optional[np.uint64] = None, :param z: int :return: np.uint64 """ - assert node_id is not None or \ - all(v is not None for v in [layer, x, y, z]) - + assert node_id is not None or all(v is not None for v in [layer, x, y, z]) if node_id is not None: layer = self.get_chunk_layer(node_id) bits_per_dim = self.bitmasks[layer] @@ -455,39 +489,19 @@ def get_chunk_id(self, node_id: Optional[np.uint64] = None, if node_id is not None: chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim return np.uint64((int(node_id) >> chunk_offset) << chunk_offset) - else: + return compute_chunk_id(layer, x, y, z, bits_per_dim, self._n_bits_for_layer_id) - if not(x < 2 ** bits_per_dim and - y < 2 ** bits_per_dim and - z < 2 ** bits_per_dim): - raise Exception("Chunk coordinate is out of range for" - "this graph on layer %d with %d bits/dim." - "[%d, %d, %d]; max = %d." - % (layer, bits_per_dim, x, y, z, - 2 ** bits_per_dim)) - - layer_offset = 64 - self._n_bits_for_layer_id - x_offset = layer_offset - bits_per_dim - y_offset = x_offset - bits_per_dim - z_offset = y_offset - bits_per_dim - return np.uint64(layer << layer_offset | x << x_offset | - y << y_offset | z << z_offset) - - def get_chunk_ids_from_node_ids(self, node_ids: Iterable[np.uint64] - ) -> np.ndarray: + def get_chunk_ids_from_node_ids(self, node_ids: Iterable[np.uint64]) -> np.ndarray: """ Extract a list of Chunk IDs from a list of Node IDs - :param node_ids: np.ndarray(dtype=np.uint64) :return: np.ndarray(dtype=np.uint64) """ if len(node_ids) == 0: return np.array([], dtype=np.int) - return self._get_chunk_id_vec(node_ids) def get_child_chunk_ids(self, node_or_chunk_id: np.uint64) -> np.ndarray: """ Calculates the ids of the children chunks in the next lower layer - :param node_or_chunk_id: np.uint64 :return: np.ndarray """ @@ -498,54 +512,53 @@ def get_child_chunk_ids(self, node_or_chunk_id: np.uint64) -> np.ndarray: return np.array([]) elif chunk_layer == 2: x, y, z = chunk_coords - return np.array([self.get_chunk_id(layer=chunk_layer-1, - x=x, y=y, z=z)]) + return np.array([self.get_chunk_id(layer=chunk_layer - 1, x=x, y=y, z=z)]) else: chunk_ids = [] - for dcoord in itertools.product(*[range(self.fan_out)]*3): + for dcoord in product(*[range(self.fan_out)] * 3): x, y, z = chunk_coords * self.fan_out + np.array(dcoord) - child_chunk_id = self.get_chunk_id(layer=chunk_layer-1, - x=x, y=y, z=z) + child_chunk_id = self.get_chunk_id(layer=chunk_layer - 1, x=x, y=y, z=z) chunk_ids.append(child_chunk_id) return np.array(chunk_ids) def get_parent_chunk_ids(self, node_or_chunk_id: np.uint64) -> np.ndarray: """ Creates list of chunk parent ids - :param node_or_chunk_id: np.uint64 :return: np.ndarray """ - parent_chunk_layers = range(self.get_chunk_layer(node_or_chunk_id) + 1, - self.n_layers + 1) + parent_chunk_layers = range( + self.get_chunk_layer(node_or_chunk_id) + 1, self.n_layers + 1 + ) chunk_coord = self.get_chunk_coordinates(node_or_chunk_id) - parent_chunk_ids = [self.get_chunk_id(node_or_chunk_id)] for layer in parent_chunk_layers: chunk_coord = chunk_coord // self.fan_out - parent_chunk_ids.append(self.get_chunk_id(layer=layer, - x=chunk_coord[0], - y=chunk_coord[1], - z=chunk_coord[2])) + parent_chunk_ids.append( + self.get_chunk_id( + layer=layer, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] + ) + ) return np.array(parent_chunk_ids, dtype=np.uint64) def get_parent_chunk_id_dict(self, node_or_chunk_id: np.uint64) -> dict: """ Creates dict of chunk parent ids - :param node_or_chunk_id: np.uint64 :return: dict """ chunk_layer = self.get_chunk_layer(node_or_chunk_id) - return dict(zip(range(chunk_layer, self.n_layers + 1), - self.get_parent_chunk_ids(node_or_chunk_id))) + return dict( + zip( + range(chunk_layer, self.n_layers + 1), + self.get_parent_chunk_ids(node_or_chunk_id), + ) + ) def get_segment_id_limit(self, node_or_chunk_id: np.uint64) -> np.uint64: """ Get maximum possible Segment ID for given Node ID or Chunk ID - :param node_or_chunk_id: np.uint64 :return: np.uint64 """ - layer = self.get_chunk_layer(node_or_chunk_id) bits_per_dim = self.bitmasks[layer] chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim @@ -553,22 +566,22 @@ def get_segment_id_limit(self, node_or_chunk_id: np.uint64) -> np.uint64: def get_segment_id(self, node_id: np.uint64) -> np.uint64: """ Extract Segment ID from Node ID - :param node_id: np.uint64 :return: np.uint64 """ - return node_id & self.get_segment_id_limit(node_id) - def get_node_id(self, segment_id: np.uint64, - chunk_id: Optional[np.uint64] = None, - layer: Optional[int] = None, - x: Optional[int] = None, - y: Optional[int] = None, - z: Optional[int] = None) -> np.uint64: + def get_node_id( + self, + segment_id: np.uint64, + chunk_id: Optional[np.uint64] = None, + layer: Optional[int] = None, + x: Optional[int] = None, + y: Optional[int] = None, + z: Optional[int] = None, + ) -> np.uint64: """ (1) Build Node ID from Segment ID and Chunk ID (2) Build Node ID from Segment ID, Layer, X, Y and Z components - :param segment_id: np.uint64 :param chunk_id: np.uint64 :param layer: int @@ -577,7 +590,6 @@ def get_node_id(self, segment_id: np.uint64, :param z: int :return: np.uint64 """ - if chunk_id is not None: return chunk_id | segment_id else: @@ -585,123 +597,110 @@ def get_node_id(self, segment_id: np.uint64, def _get_unique_range(self, row_key, step): column = column_keys.Concurrency.CounterID - # Incrementer row keys start with an "i" followed by the chunk id append_row = self.table.row(row_key, append=True) append_row.increment_cell_value(column.family_id, column.key, step) # This increments the row entry and returns the value AFTER incrementing latest_row = append_row.commit() - max_segment_id = column.deserialize(latest_row[column.family_id][column.key][0][0]) - - min_segment_id = max_segment_id + np.uint64(1) - step + max_segment_id = column.deserialize( + latest_row[column.family_id][column.key][0][0] + ) + min_segment_id = max_segment_id + np.uint64(1) - step return min_segment_id, max_segment_id - def get_unique_segment_id_root_row(self, step: int = 1, - counter_id: int = None) -> np.ndarray: + def get_unique_segment_id_root_row( + self, step: int = 1, counter_id: int = None + ) -> np.ndarray: """ Return unique Segment ID for the Root Chunk - atomic counter - :param step: int :param counter_id: np.uint64 :return: np.uint64 """ if self.n_bits_root_counter == 0: - return self.get_unique_segment_id_range(self.root_chunk_id, - step=step) + return self.get_unique_segment_id_range(self.root_chunk_id, step=step) n_counters = np.uint64(2 ** self._n_bits_root_counter) - if counter_id is None: counter_id = np.uint64(np.random.randint(0, n_counters)) else: counter_id = np.uint64(counter_id % n_counters) row_key = serializers.serialize_key( - f"i{serializers.pad_node_id(self.root_chunk_id)}_{counter_id}") - - min_segment_id, max_segment_id = self._get_unique_range(row_key=row_key, - step=step) - - segment_id_range = np.arange(min_segment_id * n_counters + counter_id, - max_segment_id * n_counters + - np.uint64(1) + counter_id, n_counters, - dtype=basetypes.SEGMENT_ID) + f"i{serializers.pad_node_id(self.root_chunk_id)}_{counter_id}" + ) + min_segment_id, max_segment_id = self._get_unique_range( + row_key=row_key, step=step + ) + segment_id_range = np.arange( + min_segment_id * n_counters + counter_id, + max_segment_id * n_counters + np.uint64(1) + counter_id, + n_counters, + dtype=basetypes.SEGMENT_ID, + ) return segment_id_range - def get_unique_segment_id_range(self, chunk_id: np.uint64, step: int = 1 - ) -> np.ndarray: + def get_unique_segment_id_range( + self, chunk_id: np.uint64, step: int = 1 + ) -> np.ndarray: """ Return unique Segment ID for given Chunk ID - atomic counter - :param chunk_id: np.uint64 :param step: int :return: np.uint64 """ - if self.n_layers == self.get_chunk_layer(chunk_id) and \ - self.n_bits_root_counter > 0: + if ( + self.n_layers == self.get_chunk_layer(chunk_id) + and self.n_bits_root_counter > 0 + ): return self.get_unique_segment_id_root_row(step=step) - row_key = serializers.serialize_key( - "i%s" % serializers.pad_node_id(chunk_id)) - min_segment_id, max_segment_id = self._get_unique_range(row_key=row_key, - step=step) - segment_id_range = np.arange(min_segment_id, - max_segment_id + np.uint64(1), - dtype=basetypes.SEGMENT_ID) + row_key = serializers.serialize_key("i%s" % serializers.pad_node_id(chunk_id)) + min_segment_id, max_segment_id = self._get_unique_range( + row_key=row_key, step=step + ) + segment_id_range = np.arange( + min_segment_id, max_segment_id + np.uint64(1), dtype=basetypes.SEGMENT_ID + ) return segment_id_range def get_unique_segment_id(self, chunk_id: np.uint64) -> np.uint64: - """ Return unique Segment ID for given Chunk ID - - atomic counter - + """ Return unique Segment ID for given Chunk ID atomic counter :param chunk_id: np.uint64 :param step: int :return: np.uint64 """ - return self.get_unique_segment_id_range(chunk_id=chunk_id, step=1)[0] - def get_unique_node_id_range(self, chunk_id: np.uint64, step: int = 1 - ) -> np.ndarray: - """ Return unique Node ID range for given Chunk ID - - atomic counter - + def get_unique_node_id_range( + self, chunk_id: np.uint64, step: int = 1 + ) -> np.ndarray: + """ Return unique Node ID range for given Chunk ID atomic counter :param chunk_id: np.uint64 :param step: int :return: np.uint64 """ - - segment_ids = self.get_unique_segment_id_range(chunk_id=chunk_id, - step=step) - - node_ids = np.array([self.get_node_id(segment_id, chunk_id) - for segment_id in segment_ids], dtype=np.uint64) + segment_ids = self.get_unique_segment_id_range(chunk_id=chunk_id, step=step) + node_ids = np.array( + [self.get_node_id(segment_id, chunk_id) for segment_id in segment_ids], + dtype=np.uint64, + ) return node_ids def get_unique_node_id(self, chunk_id: np.uint64) -> np.uint64: - """ Return unique Node ID for given Chunk ID - - atomic counter - + """ Return unique Node ID for given Chunk ID atomic counter :param chunk_id: np.uint64 :return: np.uint64 """ - return self.get_unique_node_id_range(chunk_id=chunk_id, step=1)[0] def get_max_seg_id_root_chunk(self) -> np.uint64: """ Gets maximal root id based on the atomic counter - This is an approximation. It is not guaranteed that all ids smaller or equal to this id exists. However, it is guaranteed that no larger id exist at the time this function is executed. - :return: uint64 """ if self.n_bits_root_counter == 0: @@ -711,69 +710,53 @@ def get_max_seg_id_root_chunk(self) -> np.uint64: max_value = 0 for counter_id in range(n_counters): row_key = serializers.serialize_key( - f"i{serializers.pad_node_id(self.root_chunk_id)}_{counter_id}") - - row = self.read_byte_row(row_key, - columns=column_keys.Concurrency.CounterID) - - counter = basetypes.SEGMENT_ID.type(row[0].value if row else 0) * \ - n_counters + f"i{serializers.pad_node_id(self.root_chunk_id)}_{counter_id}" + ) + row = self.read_byte_row(row_key, columns=column_keys.Concurrency.CounterID) + counter = basetypes.SEGMENT_ID.type(row[0].value if row else 0) * n_counters if counter > max_value: max_value = counter - return max_value def get_max_seg_id(self, chunk_id: np.uint64) -> np.uint64: """ Gets maximal seg id in a chunk based on the atomic counter - This is an approximation. It is not guaranteed that all ids smaller or equal to this id exists. However, it is guaranteed that no larger id exist at the time this function is executed. - - :return: uint64 """ - if self.n_layers == self.get_chunk_layer(chunk_id) and \ - self.n_bits_root_counter > 0: + if ( + self.n_layers == self.get_chunk_layer(chunk_id) + and self.n_bits_root_counter > 0 + ): return self.get_max_seg_id_root_chunk() # Incrementer row keys start with an "i" - row_key = serializers.serialize_key( - "i%s" % serializers.pad_node_id(chunk_id)) - row = self.read_byte_row(row_key, - columns=column_keys.Concurrency.CounterID) + row_key = serializers.serialize_key("i%s" % serializers.pad_node_id(chunk_id)) + row = self.read_byte_row(row_key, columns=column_keys.Concurrency.CounterID) # Read incrementer value (default to 0) and interpret is as Segment ID return basetypes.SEGMENT_ID.type(row[0].value if row else 0) def get_max_node_id(self, chunk_id: np.uint64) -> np.uint64: """ Gets maximal node id in a chunk based on the atomic counter - This is an approximation. It is not guaranteed that all ids smaller or equal to this id exists. However, it is guaranteed that no larger id exist at the time this function is executed. - - :return: uint64 """ - max_seg_id = self.get_max_seg_id(chunk_id) return self.get_node_id(segment_id=max_seg_id, chunk_id=chunk_id) def get_unique_operation_id(self) -> np.uint64: - """ Finds a unique operation id - - atomic counter - + """ Finds a unique operation id atomic counter Operations essentially live in layer 0. Even if segmentation ids might live in layer 0 one day, they would not collide with the operation ids because we write information belonging to operations in a separate family id. - :return: str """ column = column_keys.Concurrency.CounterID - append_row = self.table.row(row_keys.OperationID, append=True) append_row.increment_cell_value(column.family_id, column.key, 1) @@ -781,30 +764,23 @@ def get_unique_operation_id(self) -> np.uint64: latest_row = append_row.commit() operation_id_b = latest_row[column.family_id][column.key][0][0] operation_id = column.deserialize(operation_id_b) - return np.uint64(operation_id) def get_max_operation_id(self) -> np.int64: """ Gets maximal operation id based on the atomic counter - This is an approximation. It is not guaranteed that all ids smaller or equal to this id exists. However, it is guaranteed that no larger id exist at the time this function is executed. - - :return: int64 """ column = column_keys.Concurrency.CounterID row = self.read_byte_row(row_keys.OperationID, columns=column) - return row[0].value if row else column.basetype(0) def get_cross_chunk_edges_layer(self, cross_edges): """ Computes the layer in which a cross chunk edge becomes relevant. - I.e. if a cross chunk edge links two nodes in layer 4 this function returns 3. - :param cross_edges: n x 2 array edges between atomic (level 1) node ids :return: array of length n @@ -813,59 +789,67 @@ def get_cross_chunk_edges_layer(self, cross_edges): return np.array([], dtype=np.int) cross_chunk_edge_layers = np.ones(len(cross_edges), dtype=np.int) - cross_edge_coordinates = [] for cross_edge in cross_edges: cross_edge_coordinates.append( - [self.get_chunk_coordinates(cross_edge[0]), - self.get_chunk_coordinates(cross_edge[1])]) + [ + self.get_chunk_coordinates(cross_edge[0]), + self.get_chunk_coordinates(cross_edge[1]), + ] + ) cross_edge_coordinates = np.array(cross_edge_coordinates, dtype=np.int) - - for layer in range(2, self.n_layers): - edge_diff = np.sum(np.abs(cross_edge_coordinates[:, 0] - - cross_edge_coordinates[:, 1]), axis=1) + for _ in range(2, self.n_layers): + edge_diff = np.sum( + np.abs(cross_edge_coordinates[:, 0] - cross_edge_coordinates[:, 1]), + axis=1, + ) cross_chunk_edge_layers[edge_diff > 0] += 1 cross_edge_coordinates = cross_edge_coordinates // self.fan_out - return cross_chunk_edge_layers def get_cross_chunk_edge_dict(self, cross_edges): """ Generates a cross chunk edge dict for a list of cross chunk edges - :param cross_edges: n x 2 array :return: dict """ cce_layers = self.get_cross_chunk_edges_layer(cross_edges) u_cce_layers = np.unique(cce_layers) cross_edge_dict = {} - for l in range(2, self.n_layers): - cross_edge_dict[l] = column_keys.Connectivity.CrossChunkEdge.deserialize(b'') + cross_edge_dict[l] = column_keys.Connectivity.CrossChunkEdge.deserialize( + b"" + ) val_dict = {} for cc_layer in u_cce_layers: layer_cross_edges = cross_edges[cce_layers == cc_layer] - if len(layer_cross_edges) > 0: - val_dict[column_keys.Connectivity.CrossChunkEdge[cc_layer]] = \ - layer_cross_edges + val_dict[ + column_keys.Connectivity.CrossChunkEdge[cc_layer] + ] = layer_cross_edges cross_edge_dict[cc_layer] = layer_cross_edges return cross_edge_dict def read_byte_rows( - self, - start_key: Optional[bytes] = None, - end_key: Optional[bytes] = None, - end_key_inclusive: bool = False, - row_keys: Optional[Iterable[bytes]] = None, - columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None, - start_time: Optional[datetime.datetime] = None, - end_time: Optional[datetime.datetime] = None, - end_time_inclusive: bool = False) -> Dict[bytes, Union[ - Dict[column_keys._Column, List[bigtable.row_data.Cell]], - List[bigtable.row_data.Cell] - ]]: + self, + start_key: Optional[bytes] = None, + end_key: Optional[bytes] = None, + end_key_inclusive: bool = False, + row_keys: Optional[Iterable[bytes]] = None, + columns: Optional[ + Union[Iterable[column_keys._Column], column_keys._Column] + ] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + end_time_inclusive: bool = False, + ) -> Dict[ + bytes, + Union[ + Dict[column_keys._Column, List[bigtable.row_data.Cell]], + List[bigtable.row_data.Cell], + ], + ]: """Main function for reading a row range or non-contiguous row sets from Bigtable using `bytes` keys. @@ -900,17 +884,16 @@ def read_byte_rows( If only a single `column_keys._Column` was requested, the List of cells will be attached to the row dictionary directly (skipping the column dictionary). """ - # Create filters: Column and Time filter_ = get_time_range_and_column_filter( columns=columns, start_time=start_time, end_time=end_time, - end_inclusive=end_time_inclusive) + end_inclusive=end_time_inclusive, + ) # Create filters: Rows row_set = RowSet() - if row_keys is not None: for row_key in row_keys: row_set.add_row_key(row_key) @@ -919,14 +902,16 @@ def read_byte_rows( start_key=start_key, start_inclusive=True, end_key=end_key, - end_inclusive=end_key_inclusive) + end_inclusive=end_key_inclusive, + ) else: - raise cg_exceptions.PreconditionError("Need to either provide a valid set of rows, or" - " both, a start row and an end row.") + raise cg_exceptions.PreconditionError( + "Need to either provide a valid set of rows, or" + " both, a start row and an end row." + ) # Bigtable read with retries rows = self._execute_read(row_set=row_set, row_filter=filter_) - # Deserialize cells for row_key, column_dict in rows.items(): for column, cell_entries in column_dict.items(): @@ -935,23 +920,24 @@ def read_byte_rows( # If no column array was requested, reattach single column's values directly to the row if isinstance(columns, column_keys._Column): rows[row_key] = cell_entries - return rows def read_byte_row( - self, - row_key: bytes, - columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None, - start_time: Optional[datetime.datetime] = None, - end_time: Optional[datetime.datetime] = None, - end_time_inclusive: bool = False) -> \ - Union[Dict[column_keys._Column, List[bigtable.row_data.Cell]], - List[bigtable.row_data.Cell]]: + self, + row_key: bytes, + columns: Optional[ + Union[Iterable[column_keys._Column], column_keys._Column] + ] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + end_time_inclusive: bool = False, + ) -> Union[ + Dict[column_keys._Column, List[bigtable.row_data.Cell]], + List[bigtable.row_data.Cell], + ]: """Convenience function for reading a single row from Bigtable using its `bytes` keys. - Arguments: row_key {bytes} -- The row to be read. - Keyword Arguments: columns {Optional[Union[Iterable[column_keys._Column], column_keys._Column]]} -- Optional filtering by columns to speed up the query. If `columns` is a single @@ -963,7 +949,6 @@ def read_byte_row( If None, no upper bound. (default: {None}) end_time_inclusive {bool} -- Whether or not `end_time` itself should be included in the request, ignored if `end_time` is None. (default: {False}) - Returns: Union[Dict[column_keys._Column, List[bigtable.row_data.Cell]], List[bigtable.row_data.Cell]] -- @@ -973,30 +958,39 @@ def read_byte_row( If only a single `column_keys._Column` was requested, the List of cells is returned directly. """ - row = self.read_byte_rows(row_keys=[row_key], columns=columns, start_time=start_time, - end_time=end_time, end_time_inclusive=end_time_inclusive) - + row = self.read_byte_rows( + row_keys=[row_key], + columns=columns, + start_time=start_time, + end_time=end_time, + end_time_inclusive=end_time_inclusive, + ) if isinstance(columns, column_keys._Column): return row.get(row_key, []) else: return row.get(row_key, {}) def read_node_id_rows( - self, - start_id: Optional[np.uint64] = None, - end_id: Optional[np.uint64] = None, - end_id_inclusive: bool = False, - node_ids: Optional[Iterable[np.uint64]] = None, - columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None, - start_time: Optional[datetime.datetime] = None, - end_time: Optional[datetime.datetime] = None, - end_time_inclusive: bool = False) -> Dict[np.uint64, Union[ - Dict[column_keys._Column, List[bigtable.row_data.Cell]], - List[bigtable.row_data.Cell] - ]]: + self, + start_id: Optional[np.uint64] = None, + end_id: Optional[np.uint64] = None, + end_id_inclusive: bool = False, + node_ids: Optional[Iterable[np.uint64]] = None, + columns: Optional[ + Union[Iterable[column_keys._Column], column_keys._Column] + ] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + end_time_inclusive: bool = False, + ) -> Dict[ + np.uint64, + Union[ + Dict[column_keys._Column, List[bigtable.row_data.Cell]], + List[bigtable.row_data.Cell], + ], + ]: """Convenience function for reading a row range or non-contiguous row sets from Bigtable representing NodeIDs. - Keyword Arguments: start_id {Optional[np.uint64]} -- The first row to be read, ignored if `node_ids` is set. If None, no lower boundary is used. (default: {None}) @@ -1036,26 +1030,32 @@ def read_node_id_rows( start_key=to_bytes(start_id) if start_id is not None else None, end_key=to_bytes(end_id) if end_id is not None else None, end_key_inclusive=end_id_inclusive, - row_keys=(to_bytes(node_id) for node_id in node_ids) if node_ids is not None else None, + row_keys=(to_bytes(node_id) for node_id in node_ids) + if node_ids is not None + else None, columns=columns, start_time=start_time, end_time=end_time, - end_time_inclusive=end_time_inclusive) + end_time_inclusive=end_time_inclusive, + ) # Convert row_keys back to Node IDs return {from_bytes(row_key): data for (row_key, data) in rows.items()} def read_node_id_row( - self, - node_id: np.uint64, - columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None, - start_time: Optional[datetime.datetime] = None, - end_time: Optional[datetime.datetime] = None, - end_time_inclusive: bool = False) -> \ - Union[Dict[column_keys._Column, List[bigtable.row_data.Cell]], - List[bigtable.row_data.Cell]]: + self, + node_id: np.uint64, + columns: Optional[ + Union[Iterable[column_keys._Column], column_keys._Column] + ] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + end_time_inclusive: bool = False, + ) -> Union[ + Dict[column_keys._Column, List[bigtable.row_data.Cell]], + List[bigtable.row_data.Cell], + ]: """Convenience function for reading a single row from Bigtable, representing a NodeID. - Arguments: node_id {np.uint64} -- the NodeID of the row to be read. @@ -1080,12 +1080,17 @@ def read_node_id_row( If only a single `column_keys._Column` was requested, the List of cells is returned directly. """ - return self.read_byte_row(row_key=serializers.serialize_uint64(node_id), columns=columns, - start_time=start_time, end_time=end_time, - end_time_inclusive=end_time_inclusive) + return self.read_byte_row( + row_key=serializers.serialize_uint64(node_id), + columns=columns, + start_time=start_time, + end_time=end_time, + end_time_inclusive=end_time_inclusive, + ) - def read_cross_chunk_edges(self, node_id: np.uint64, start_layer: int = 2, - end_layer: int = None) -> Dict: + def read_cross_chunk_edges( + self, node_id: np.uint64, start_layer: int = 2, end_layer: int = None + ) -> Dict: """ Reads the cross chunk edge entry from the table for a given node id and formats it as cross edge dict @@ -1101,11 +1106,12 @@ def read_cross_chunk_edges(self, node_id: np.uint64, start_layer: int = 2, return {} start_layer = np.max([self.get_chunk_layer(node_id), start_layer]) - assert end_layer > start_layer and end_layer <= self.n_layers - columns = [column_keys.Connectivity.CrossChunkEdge[l] - for l in range(start_layer, end_layer)] + columns = [ + column_keys.Connectivity.CrossChunkEdge[l] + for l in range(start_layer, end_layer) + ] row_dict = self.read_node_id_row(node_id, columns=columns) cross_edge_dict = {} @@ -1114,47 +1120,47 @@ def read_cross_chunk_edges(self, node_id: np.uint64, start_layer: int = 2, if col in row_dict: cross_edge_dict[l] = row_dict[col][0].value else: - cross_edge_dict[l] = col.deserialize(b'') - + cross_edge_dict[l] = col.deserialize(b"") return cross_edge_dict - def mutate_row(self, row_key: bytes, - val_dict: Dict[column_keys._Column, Any], - time_stamp: Optional[datetime.datetime] = None, - isbytes: bool = False - ) -> bigtable.row.Row: + def mutate_row( + self, + row_key: bytes, + val_dict: Dict[column_keys._Column, Any], + time_stamp: Optional[datetime.datetime] = None, + isbytes: bool = False, + ) -> bigtable.row.Row: """ Mutates a single row - :param row_key: serialized bigtable row key :param val_dict: Dict[column_keys._TypedColumn: bytes] :param time_stamp: None or datetime :return: list """ row = self.table.row(row_key) - for column, value in val_dict.items(): if not isbytes: value = column.serialize(value) - - row.set_cell(column_family_id=column.family_id, - column=column.key, - value=value, - timestamp=time_stamp) + row.set_cell( + column_family_id=column.family_id, + column=column.key, + value=value, + timestamp=time_stamp, + ) return row - def bulk_write(self, rows: Iterable[bigtable.row.DirectRow], - root_ids: Optional[Union[np.uint64, - Iterable[np.uint64]]] = None, - operation_id: Optional[np.uint64] = None, - slow_retry: bool = True, - block_size: int = 2000): + def bulk_write( + self, + rows: Iterable[bigtable.row.DirectRow], + root_ids: Optional[Union[np.uint64, Iterable[np.uint64]]] = None, + operation_id: Optional[np.uint64] = None, + slow_retry: bool = True, + block_size: int = 2000, + ): """ Writes a list of mutated rows in bulk - WARNING: If contains the same row (same row_key) and column key two times only the last one is effectively written to the BigTable (even when the mutations were applied to different columns) --> no versioning! - :param rows: list list of mutated rows :param root_ids: list if uint64 @@ -1171,27 +1177,31 @@ def bulk_write(self, rows: Iterable[bigtable.row.DirectRow], initial = 1 retry_policy = Retry( - predicate=if_exception_type((Aborted, - DeadlineExceeded, - ServiceUnavailable)), + predicate=if_exception_type( + (Aborted, DeadlineExceeded, ServiceUnavailable) + ), initial=initial, maximum=15.0, multiplier=2.0, - deadline=LOCK_EXPIRED_TIME_DELTA.seconds) + deadline=LOCK_EXPIRED_TIME_DELTA.seconds, + ) if root_ids is not None and operation_id is not None: if isinstance(root_ids, int): root_ids = [root_ids] - if not self.check_and_renew_root_locks(root_ids, operation_id): - raise cg_exceptions.LockError(f"Root lock renewal failed for operation ID {operation_id}") + raise cg_exceptions.LockError( + f"Root lock renewal failed for operation ID {operation_id}" + ) for i_row in range(0, len(rows), block_size): - status = self.table.mutate_rows(rows[i_row: i_row + block_size], - retry=retry_policy) - + status = self.table.mutate_rows( + rows[i_row : i_row + block_size], retry=retry_policy + ) if not all(status): - raise cg_exceptions.ChunkedGraphError(f"Bulk write failed for operation ID {operation_id}") + raise cg_exceptions.ChunkedGraphError( + f"Bulk write failed for operation ID {operation_id}" + ) def _execute_read_thread(self, row_set_and_filter: Tuple[RowSet, RowFilter]): row_set, row_filter = row_set_and_filter @@ -1201,12 +1211,12 @@ def _execute_read_thread(self, row_set_and_filter: Tuple[RowSet, RowFilter]): return {} range_read = self.table.read_rows(row_set=row_set, filter_=row_filter) - res = {v.row_key: partial_row_data_to_column_dict(v) - for v in range_read} + res = {v.row_key: partial_row_data_to_column_dict(v) for v in range_read} return res - def _execute_read(self, row_set: RowSet, row_filter: RowFilter = None) \ - -> Dict[bytes, Dict[column_keys._Column, bigtable.row_data.PartialRowData]]: + def _execute_read( + self, row_set: RowSet, row_filter: RowFilter = None + ) -> Dict[bytes, Dict[column_keys._Column, bigtable.row_data.PartialRowData]]: """ Core function to read rows from Bigtable. Uses standard Bigtable retry logic :param row_set: BigTable RowSet :param row_filter: BigTable RowFilter @@ -1217,25 +1227,26 @@ def _execute_read(self, row_set: RowSet, row_filter: RowFilter = None) \ # calculate this properly (range_read.request.SerializeToString()), but this estimate is # good enough for now max_row_key_count = 20000 - n_subrequests = max(1, int(np.ceil(len(row_set.row_keys) / - max_row_key_count))) + n_subrequests = max(1, int(np.ceil(len(row_set.row_keys) / max_row_key_count))) n_threads = min(n_subrequests, 2 * mu.n_cpus) row_sets = [] for i in range(n_subrequests): r = RowSet() - r.row_keys = row_set.row_keys[i * max_row_key_count: - (i + 1) * max_row_key_count] + r.row_keys = row_set.row_keys[ + i * max_row_key_count : (i + 1) * max_row_key_count + ] row_sets.append(r) # Don't forget the original RowSet's row_ranges row_sets[0].row_ranges = row_set.row_ranges - responses = mu.multithread_func(self._execute_read_thread, - params=((r, row_filter) - for r in row_sets), - debug=n_threads == 1, - n_threads=n_threads) + responses = mu.multithread_func( + self._execute_read_thread, + params=((r, row_filter) for r in row_sets), + debug=n_threads == 1, + n_threads=n_threads, + ) combined_response = {} for resp in responses: @@ -1244,17 +1255,23 @@ def _execute_read(self, row_set: RowSet, row_filter: RowFilter = None) \ return combined_response def range_read_chunk( - self, - layer: Optional[int] = None, - x: Optional[int] = None, - y: Optional[int] = None, - z: Optional[int] = None, - chunk_id: Optional[np.uint64] = None, - columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None, - time_stamp: Optional[datetime.datetime] = None) -> Dict[np.uint64, Union[ - Dict[column_keys._Column, List[bigtable.row_data.Cell]], - List[bigtable.row_data.Cell] - ]]: + self, + layer: Optional[int] = None, + x: Optional[int] = None, + y: Optional[int] = None, + z: Optional[int] = None, + chunk_id: Optional[np.uint64] = None, + columns: Optional[ + Union[Iterable[column_keys._Column], column_keys._Column] + ] = None, + time_stamp: Optional[datetime.datetime] = None, + ) -> Dict[ + np.uint64, + Union[ + Dict[column_keys._Column, List[bigtable.row_data.Cell]], + List[bigtable.row_data.Cell], + ], + ]: """Convenience function for reading all NodeID rows of a single chunk from Bigtable. Chunk can either be specified by its (layer, x, y, and z coordinate), or by the chunk ID. @@ -1291,7 +1308,9 @@ def range_read_chunk( elif layer is not None and x is not None and y is not None and z is not None: chunk_id = self.get_chunk_id(layer=layer, x=x, y=y, z=z) else: - raise Exception("Either chunk_id or layer and coordinates have to be defined") + raise Exception( + "Either chunk_id or layer and coordinates have to be defined" + ) if layer == 1: max_segment_id = self.get_segment_id_limit(chunk_id) @@ -1309,38 +1328,36 @@ def range_read_chunk( end_id_inclusive=True, columns=columns, end_time=time_stamp, - end_time_inclusive=True) + end_time_inclusive=True, + ) except Exception as err: - raise Exception("Unable to consume chunk read: " - "[%d, %d, %d], l = %d: %s" % - (x, y, z, layer, err)) + raise Exception( + "Unable to consume chunk read: " + "[%d, %d, %d], l = %d: %s" % (x, y, z, layer, err) + ) return rr def range_read_layer(self, layer_id: int): """ Reads all ids within a layer - This can take a while depending on the size of the graph - :param layer_id: int :return: list of rows """ raise NotImplementedError() - def test_if_nodes_are_in_same_chunk(self, node_ids: Sequence[np.uint64] - ) -> bool: + def test_if_nodes_are_in_same_chunk(self, node_ids: Sequence[np.uint64]) -> bool: """ Test whether two nodes are in the same chunk :param node_ids: list of two ints :return: bool """ assert len(node_ids) == 2 - return self.get_chunk_id(node_id=node_ids[0]) == \ - self.get_chunk_id(node_id=node_ids[1]) + return self.get_chunk_id(node_id=node_ids[0]) == self.get_chunk_id( + node_id=node_ids[1] + ) - def get_chunk_id_from_coord(self, layer: int, - x: int, y: int, z: int) -> np.uint64: + def get_chunk_id_from_coord(self, layer: int, x: int, y: int, z: int) -> np.uint64: """ Return ChunkID for given chunked graph layer and voxel coordinates. - :param layer: int -- ChunkedGraph layer :param x: int -- X coordinate in voxel :param y: int -- Y coordinate in voxel @@ -1353,13 +1370,13 @@ def get_chunk_id_from_coord(self, layer: int, layer=layer, x=x // (int(self.chunk_size[0]) * base_chunk_span), y=y // (int(self.chunk_size[1]) * base_chunk_span), - z=z // (int(self.chunk_size[2]) * base_chunk_span)) + z=z // (int(self.chunk_size[2]) * base_chunk_span), + ) - def get_atomic_id_from_coord(self, x: int, y: int, z: int, - parent_id: np.uint64, n_tries: int=5 - ) -> np.uint64: + def get_atomic_id_from_coord( + self, x: int, y: int, z: int, parent_id: np.uint64, n_tries: int = 5 + ) -> np.uint64: """ Determines atomic id given a coordinate - :param x: int :param y: int :param z: int @@ -1370,9 +1387,8 @@ def get_atomic_id_from_coord(self, x: int, y: int, z: int, if self.get_chunk_layer(parent_id) == 1: return parent_id - - x /= 2**self.cv_mip - y /= 2**self.cv_mip + x /= 2 ** self.cv_mip + y /= 2 ** self.cv_mip x = int(x) y = int(y) @@ -1383,53 +1399,41 @@ def get_atomic_id_from_coord(self, x: int, y: int, z: int, root_id = self.get_root(parent_id) for i_try in range(n_tries): - # Define block size -- increase by one each try - x_l = x - (i_try - 1)**2 - y_l = y - (i_try - 1)**2 - z_l = z - (i_try - 1)**2 - - x_h = x + 1 + (i_try - 1)**2 - y_h = y + 1 + (i_try - 1)**2 - z_h = z + 1 + (i_try - 1)**2 + x_l = x - (i_try - 1) ** 2 + y_l = y - (i_try - 1) ** 2 + z_l = z - (i_try - 1) ** 2 - if x_l < 0: - x_l = 0 + x_h = x + 1 + (i_try - 1) ** 2 + y_h = y + 1 + (i_try - 1) ** 2 + z_h = z + 1 + (i_try - 1) ** 2 - if y_l < 0: - y_l = 0 - - if z_l < 0: - z_l = 0 + x_l = 0 if x_l < 0 else x_l + y_l = 0 if y_l < 0 else y_l + z_l = 0 if z_l < 0 else z_l # Get atomic ids from cloudvolume - atomic_id_block = self.cv[x_l: x_h, y_l: y_h, z_l: z_h] - atomic_ids, atomic_id_count = np.unique(atomic_id_block, - return_counts=True) + atomic_id_block = self.cv[x_l:x_h, y_l:y_h, z_l:z_h] + atomic_ids, atomic_id_count = np.unique(atomic_id_block, return_counts=True) # sort by frequency and discard those ids that have been checked # previously sorted_atomic_ids = atomic_ids[np.argsort(atomic_id_count)] - sorted_atomic_ids = sorted_atomic_ids[~np.in1d(sorted_atomic_ids, - checked)] + sorted_atomic_ids = sorted_atomic_ids[~np.in1d(sorted_atomic_ids, checked)] # For each candidate id check whether its root id corresponds to the # given root id for candidate_atomic_id in sorted_atomic_ids: ass_root_id = self.get_root(candidate_atomic_id) - if ass_root_id == root_id: # atomic_id is not None will be our indicator that the # search was successful - atomic_id = candidate_atomic_id break else: checked.append(candidate_atomic_id) - if atomic_id is not None: break - # Returns None if unsuccessful return atomic_id @@ -1437,7 +1441,6 @@ def read_log_row( self, operation_id: np.uint64 ) -> Dict[column_keys._Column, Union[np.ndarray, np.number]]: """ Retrieves log record from Bigtable for a given operation ID - :param operation_id: np.uint64 :return: Dict[column_keys._Column, Union[np.ndarray, np.number]] """ @@ -1461,36 +1464,39 @@ def read_log_row( def read_first_log_row(self): """ Returns first log row - :return: None or dict """ - for operation_id in range(1, 100): log_row = self.read_log_row(np.uint64(operation_id)) - if len(log_row) > 0: return log_row - return None - def add_atomic_edges_in_chunks(self, edge_id_dict: dict, - edge_aff_dict: dict, edge_area_dict: dict, - isolated_node_ids: Sequence[np.uint64], - verbose: bool = True, - time_stamp: Optional[datetime.datetime] = None): + def add_atomic_edges_in_chunks( + self, + edge_id_dict: dict, + edge_aff_dict: dict, + edge_area_dict: dict, + isolated_node_ids: Sequence[np.uint64], + verbose: bool = True, + time_stamp: Optional[datetime.datetime] = None, + ): raise NotImplementedError() - def add_layer(self, layer_id: int, - child_chunk_coords: Sequence[Sequence[int]], - time_stamp: Optional[datetime.datetime] = None, - verbose: bool = True, n_threads: int = 20) -> None: + def add_layer( + self, + layer_id: int, + child_chunk_coords: Sequence[Sequence[int]], + time_stamp: Optional[datetime.datetime] = None, + verbose: bool = True, + n_threads: int = 20, + ) -> None: raise NotImplementedError() - - def get_atomic_cross_edge_dict(self, node_id: np.uint64, - layer_ids: Sequence[int] = None): + def get_atomic_cross_edge_dict( + self, node_id: np.uint64, layer_ids: Sequence[int] = None + ): """ Extracts all atomic cross edges and serves them as a dictionary - :param node_id: np.uint64 :param layer_ids: list of ints :return: dict @@ -1505,29 +1511,25 @@ def get_atomic_cross_edge_dict(self, node_id: np.uint64, return {} columns = [column_keys.Connectivity.CrossChunkEdge[l] for l in layer_ids] - row = self.read_node_id_row(node_id, columns=columns) - if not row: return {} atomic_cross_edges = {} - for l in layer_ids: column = column_keys.Connectivity.CrossChunkEdge[l] - atomic_cross_edges[l] = [] - if column in row: atomic_cross_edges[l] = row[column][0].value - return atomic_cross_edges - def get_parents(self, node_ids: Sequence[np.uint64], - get_only_relevant_parents: bool = True, - time_stamp: Optional[datetime.datetime] = None): + def get_parents( + self, + node_ids: Sequence[np.uint64], + get_only_relevant_parents: bool = True, + time_stamp: Optional[datetime.datetime] = None, + ): """ Acquires parents of a node at a specific time stamp - :param node_ids: list of uint64 :param get_only_relevant_parents: bool True: return single parent according to time_stamp @@ -1540,33 +1542,34 @@ def get_parents(self, node_ids: Sequence[np.uint64], time_stamp = datetime.datetime.utcnow() if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) + time_stamp = pytz.UTC.localize(time_stamp) - parent_rows = self.read_node_id_rows(node_ids=node_ids, - columns=column_keys.Hierarchy.Parent, - end_time=time_stamp, - end_time_inclusive=True) + parent_rows = self.read_node_id_rows( + node_ids=node_ids, + columns=column_keys.Hierarchy.Parent, + end_time=time_stamp, + end_time_inclusive=True, + ) if not parent_rows: return None if get_only_relevant_parents: - return np.array([parent_rows[node_id][0].value - for node_id in node_ids]) + return np.array([parent_rows[node_id][0].value for node_id in node_ids]) parents = [] for node_id in node_ids: - parents.append([(p.value, p.timestamp) - for p in parent_rows[node_id]]) + parents.append([(p.value, p.timestamp) for p in parent_rows[node_id]]) return parents - def get_parent(self, node_id: np.uint64, - get_only_relevant_parent: bool = True, - time_stamp: Optional[datetime.datetime] = None) -> Union[ - List[Tuple[np.uint64, datetime.datetime]], np.uint64]: + def get_parent( + self, + node_id: np.uint64, + get_only_relevant_parent: bool = True, + time_stamp: Optional[datetime.datetime] = None, + ) -> Union[List[Tuple[np.uint64, datetime.datetime]], np.uint64]: """ Acquires parent of a node at a specific time stamp - :param node_id: uint64 :param get_only_relevant_parent: bool True: return single parent according to time_stamp @@ -1579,25 +1582,26 @@ def get_parent(self, node_id: np.uint64, time_stamp = datetime.datetime.utcnow() if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) + time_stamp = pytz.UTC.localize(time_stamp) - parents = self.read_node_id_row(node_id, - columns=column_keys.Hierarchy.Parent, - end_time=time_stamp, - end_time_inclusive=True) + parents = self.read_node_id_row( + node_id, + columns=column_keys.Hierarchy.Parent, + end_time=time_stamp, + end_time_inclusive=True, + ) if not parents: return None if get_only_relevant_parent: return parents[0].value - return [(p.value, p.timestamp) for p in parents] - def get_children(self, node_id: Union[Iterable[np.uint64], np.uint64], - flatten: bool = False) -> Union[Dict[np.uint64, np.ndarray], np.ndarray]: + def get_children( + self, node_id: Union[Iterable[np.uint64], np.uint64], flatten: bool = False + ) -> Union[Dict[np.uint64, np.ndarray], np.ndarray]: """Returns children for the specified NodeID or NodeIDs - :param node_id: The NodeID or NodeIDs for which to retrieve children :type node_id: Union[Iterable[np.uint64], np.uint64] :param flatten: If True, combine all children into a single array, else generate a map @@ -1608,39 +1612,49 @@ def get_children(self, node_id: Union[Iterable[np.uint64], np.uint64], :rtype: Union[Dict[np.uint64, np.ndarray], np.ndarray] """ if np.isscalar(node_id): - children = self.read_node_id_row(node_id=node_id, columns=column_keys.Hierarchy.Child) + children = self.read_node_id_row( + node_id=node_id, columns=column_keys.Hierarchy.Child + ) if not children: return np.empty(0, dtype=basetypes.NODE_ID) return children[0].value else: - children = self.read_node_id_rows(node_ids=node_id, columns=column_keys.Hierarchy.Child) + children = self.read_node_id_rows( + node_ids=node_id, columns=column_keys.Hierarchy.Child + ) if flatten: if not children: return np.empty(0, dtype=basetypes.NODE_ID) return np.concatenate([x[0].value for x in children.values()]) - return {x: children[x][0].value - if x in children else np.empty(0, dtype=basetypes.NODE_ID) - for x in node_id} - - def get_latest_roots(self, time_stamp: Optional[datetime.datetime] = get_max_time(), - n_threads: int = 1) -> Sequence[np.uint64]: + return { + x: children[x][0].value + if x in children + else np.empty(0, dtype=basetypes.NODE_ID) + for x in node_id + } + + def get_latest_roots( + self, + time_stamp: Optional[datetime.datetime] = get_max_time(), + n_threads: int = 1, + ) -> Sequence[np.uint64]: """ Reads _all_ root ids - :param time_stamp: datetime.datetime :param n_threads: int :return: array of np.uint64 """ + return chunkedgraph_comp.get_latest_roots( + self, time_stamp=time_stamp, n_threads=n_threads + ) - return chunkedgraph_comp.get_latest_roots(self, time_stamp=time_stamp, - n_threads=n_threads) - - def get_delta_roots(self, - time_stamp_start: datetime.datetime, - time_stamp_end: Optional[datetime.datetime] = None, - min_seg_id: int =1, - n_threads: int = 1) -> Sequence[np.uint64]: + def get_delta_roots( + self, + time_stamp_start: datetime.datetime, + time_stamp_end: Optional[datetime.datetime] = None, + min_seg_id: int = 1, + n_threads: int = 1, + ) -> Sequence[np.uint64]: """ Returns root ids that have expired or have been created between two timestamps - :param time_stamp_start: datetime.datetime starting timestamp to return deltas from :param time_stamp_end: datetime.datetime @@ -1655,17 +1669,22 @@ def get_delta_roots(self, expired_ids is list of node_id's for roots the expired after time_stamp_start but before time_stamp_end. """ + return chunkedgraph_comp.get_delta_roots( + self, + time_stamp_start=time_stamp_start, + time_stamp_end=time_stamp_end, + min_seg_id=min_seg_id, + n_threads=n_threads, + ) - return chunkedgraph_comp.get_delta_roots(self, time_stamp_start=time_stamp_start, - time_stamp_end=time_stamp_end, - min_seg_id=min_seg_id, - n_threads=n_threads) - - def get_roots(self, node_ids: Sequence[np.uint64], - time_stamp: Optional[datetime.datetime] = None, - stop_layer: int = None, n_tries: int = 1): + def get_roots( + self, + node_ids: Sequence[np.uint64], + time_stamp: Optional[datetime.datetime] = None, + stop_layer: int = None, + n_tries: int = 1, + ): """ Takes node ids and returns the associated agglomeration ids - :param node_ids: list of uint64 :param time_stamp: None or datetime :return: np.uint64 @@ -1679,18 +1698,16 @@ def get_roots(self, node_ids: Sequence[np.uint64], node_mask = np.ones(len(node_ids), dtype=np.bool) node_mask[self.get_chunk_layers(node_ids) >= stop_layer] = False - for i_try in range(n_tries): + for _ in range(n_tries): parent_ids = np.array(node_ids) - - for i_layer in range(int(stop_layer + 1)): - temp_parent_ids = self.get_parents(parent_ids[node_mask], - time_stamp=time_stamp) - + for _ in range(int(stop_layer + 1)): + temp_parent_ids = self.get_parents( + parent_ids[node_mask], time_stamp=time_stamp + ) if temp_parent_ids is None: break else: parent_ids[node_mask] = temp_parent_ids - node_mask[self.get_chunk_layers(parent_ids) >= stop_layer] = False if np.all(~node_mask): break @@ -1698,16 +1715,18 @@ def get_roots(self, node_ids: Sequence[np.uint64], if np.all(self.get_chunk_layers(parent_ids) >= stop_layer): break else: - time.sleep(.5) - + time.sleep(0.5) return parent_ids - def get_root(self, node_id: np.uint64, - time_stamp: Optional[datetime.datetime] = None, - get_all_parents=False, stop_layer: int = None, - n_tries: int = 1) -> Union[List[np.uint64], np.uint64]: + def get_root( + self, + node_id: np.uint64, + time_stamp: Optional[datetime.datetime] = None, + get_all_parents=False, + stop_layer: int = None, + n_tries: int = 1, + ) -> Union[List[np.uint64], np.uint64]: """ Takes a node id and returns the associated agglomeration ids - :param node_id: uint64 :param time_stamp: None or datetime :return: np.uint64 @@ -1721,58 +1740,53 @@ def get_root(self, node_id: np.uint64, else: stop_layer = self.n_layers - for i_try in range(n_tries): + for _ in range(n_tries): parent_id = node_id - - for i_layer in range(self.get_chunk_layer(node_id), - int(stop_layer + 1)): - - temp_parent_id = self.get_parent(parent_id, - time_stamp=time_stamp) - + for _ in range(self.get_chunk_layer(node_id), int(stop_layer + 1)): + temp_parent_id = self.get_parent(parent_id, time_stamp=time_stamp) if temp_parent_id is None: break else: parent_id = temp_parent_id all_parent_ids.append(parent_id) - if self.get_chunk_layer(parent_id) >= stop_layer: break - if self.get_chunk_layer(parent_id) >= stop_layer: break else: - time.sleep(.5) + time.sleep(0.5) if self.get_chunk_layer(parent_id) < stop_layer: - raise Exception("Cannot find root id {}, {}".format(node_id, - time_stamp)) + raise Exception("Cannot find root id {}, {}".format(node_id, time_stamp)) if get_all_parents: return np.array(all_parent_ids) else: return parent_id - def get_all_parents_dict(self, node_id: np.uint64, - time_stamp: Optional[datetime.datetime] = None - ) -> dict: + def get_all_parents_dict( + self, node_id: np.uint64, time_stamp: Optional[datetime.datetime] = None + ) -> dict: """ Takes a node id and returns all parents and parents' parents up to the top - :param node_id: uint64 :param time_stamp: None or datetime :return: dict """ - parent_ids = self.get_root(node_id=node_id, time_stamp=time_stamp, - get_all_parents=True) + parent_ids = self.get_root( + node_id=node_id, time_stamp=time_stamp, get_all_parents=True + ) parent_id_layers = self.get_chunk_layers(parent_ids) return dict(zip(parent_id_layers, parent_ids)) - def lock_root_loop(self, root_ids: Sequence[np.uint64], - operation_id: np.uint64, max_tries: int = 1, - waittime_s: float = 0.5) -> Tuple[bool, np.ndarray]: + def lock_root_loop( + self, + root_ids: Sequence[np.uint64], + operation_id: np.uint64, + max_tries: int = 1, + waittime_s: float = 0.5, + ) -> Tuple[bool, np.ndarray]: """ Attempts to lock multiple roots at the same time - :param root_ids: list of uint64 :param operation_id: uint64 :param max_tries: int @@ -1780,16 +1794,13 @@ def lock_root_loop(self, root_ids: Sequence[np.uint64], :return: bool, list of uint64s success, latest root ids """ - i_try = 0 while i_try < max_tries: lock_acquired = False - # Collect latest root ids new_root_ids: List[np.uint64] = [] for i_root_id in range(len(root_ids)): future_root_ids = self.get_future_root_ids(root_ids[i_root_id]) - if len(future_root_ids) == 0: new_root_ids.append(root_ids[i_root_id]) else: @@ -1797,14 +1808,12 @@ def lock_root_loop(self, root_ids: Sequence[np.uint64], # Attempt to lock all latest root ids root_ids = np.unique(new_root_ids) - for i_root_id in range(len(root_ids)): - - self.logger.debug("operation id: %d - root id: %d" % - (operation_id, root_ids[i_root_id])) - lock_acquired = self.lock_single_root(root_ids[i_root_id], - operation_id) - + self.logger.debug( + "operation id: %d - root id: %d" + % (operation_id, root_ids[i_root_id]) + ) + lock_acquired = self.lock_single_root(root_ids[i_root_id], operation_id) # Roll back locks if one root cannot be locked if not lock_acquired: for j_root_id in range(len(root_ids)): @@ -1817,11 +1826,9 @@ def lock_root_loop(self, root_ids: Sequence[np.uint64], time.sleep(waittime_s) i_try += 1 self.logger.debug(f"Try {i_try}") - return False, root_ids - def lock_single_root(self, root_id: np.uint64, operation_id: np.uint64 - ) -> bool: + def lock_single_root(self, root_id: np.uint64, operation_id: np.uint64) -> bool: """ Attempts to lock the latest version of a root node :param root_id: uint64 @@ -1830,9 +1837,7 @@ def lock_single_root(self, root_id: np.uint64, operation_id: np.uint64 :return: bool success """ - operation_id_b = serializers.serialize_uint64(operation_id) - lock_column = column_keys.Concurrency.Lock new_parents_column = column_keys.Hierarchy.NewParent @@ -1842,13 +1847,9 @@ def lock_single_root(self, root_id: np.uint64, operation_id: np.uint64 # exists) time_cutoff = datetime.datetime.utcnow() - LOCK_EXPIRED_TIME_DELTA - # Comply to resolution of BigTables TimeRange - time_cutoff -= datetime.timedelta( - microseconds=time_cutoff.microsecond % 1000) - + time_cutoff -= datetime.timedelta(microseconds=time_cutoff.microsecond % 1000) time_filter = TimestampRangeFilter(TimestampRange(start=time_cutoff)) - # lock_key_filter = ColumnQualifierRegexFilter(lock_column.key) # new_parents_key_filter = ColumnQualifierRegexFilter(new_parents_column.key) @@ -1857,45 +1858,51 @@ def lock_single_root(self, root_id: np.uint64, operation_id: np.uint64 start_column=lock_column.key, end_column=lock_column.key, inclusive_start=True, - inclusive_end=True) + inclusive_end=True, + ) new_parents_key_filter = ColumnRangeFilter( column_family_id=new_parents_column.family_id, start_column=new_parents_column.key, end_column=new_parents_column.key, inclusive_start=True, - inclusive_end=True) + inclusive_end=True, + ) # Combine filters together chained_filter = RowFilterChain([time_filter, lock_key_filter]) combined_filter = ConditionalRowFilter( base_filter=chained_filter, true_filter=PassAllFilter(True), - false_filter=new_parents_key_filter) + false_filter=new_parents_key_filter, + ) # Get conditional row using the chained filter - root_row = self.table.row(serializers.serialize_uint64(root_id), - filter_=combined_filter) + root_row = self.table.row( + serializers.serialize_uint64(root_id), filter_=combined_filter + ) # Set row lock if condition returns no results (state == False) time_stamp = get_valid_timestamp(None) - root_row.set_cell(lock_column.family_id, lock_column.key, operation_id_b, state=False, - timestamp=time_stamp) + root_row.set_cell( + lock_column.family_id, + lock_column.key, + operation_id_b, + state=False, + timestamp=time_stamp, + ) # The lock was acquired when set_cell returns False (state) lock_acquired = not root_row.commit() if not lock_acquired: row = self.read_node_id_row(root_id, columns=lock_column) - l_operation_ids = [cell.value for cell in row] self.logger.debug(f"Locked operation ids: {l_operation_ids}") - return lock_acquired def unlock_root(self, root_id: np.uint64, operation_id: np.uint64) -> bool: """ Unlocks a root - This is mainly used for cases where multiple roots need to be locked and locking was not sucessful for all of them @@ -1914,11 +1921,8 @@ def unlock_root(self, root_id: np.uint64, operation_id: np.uint64) -> bool: # the active lock holder time_cutoff = datetime.datetime.utcnow() - LOCK_EXPIRED_TIME_DELTA - # Comply to resolution of BigTables TimeRange - time_cutoff -= datetime.timedelta( - microseconds=time_cutoff.microsecond % 1000) - + time_cutoff -= datetime.timedelta(microseconds=time_cutoff.microsecond % 1000) time_filter = TimestampRangeFilter(TimestampRange(start=time_cutoff)) # column_key_filter = ColumnQualifierRegexFilter(lock_column.key) @@ -1929,55 +1933,52 @@ def unlock_root(self, root_id: np.uint64, operation_id: np.uint64) -> bool: start_column=lock_column.key, end_column=lock_column.key, inclusive_start=True, - inclusive_end=True) + inclusive_end=True, + ) value_filter = ValueRangeFilter( start_value=operation_id_b, end_value=operation_id_b, inclusive_start=True, - inclusive_end=True) + inclusive_end=True, + ) # Chain these filters together - chained_filter = RowFilterChain([time_filter, column_key_filter, - value_filter]) + chained_filter = RowFilterChain([time_filter, column_key_filter, value_filter]) # Get conditional row using the chained filter - root_row = self.table.row(serializers.serialize_uint64(root_id), - filter_=chained_filter) + root_row = self.table.row( + serializers.serialize_uint64(root_id), filter_=chained_filter + ) # Delete row if conditions are met (state == True) root_row.delete_cell(lock_column.family_id, lock_column.key, state=True) - return root_row.commit() - def check_and_renew_root_locks(self, root_ids: Iterable[np.uint64], - operation_id: np.uint64) -> bool: + def check_and_renew_root_locks( + self, root_ids: Iterable[np.uint64], operation_id: np.uint64 + ) -> bool: """ Tests if the roots are locked with the provided operation_id and renews the lock to reset the time_stam - This is mainly used before executing a bulk write - :param root_ids: uint64 :param operation_id: uint64 an id that is unique to the process asking to lock the root node :return: bool success """ - for root_id in root_ids: if not self.check_and_renew_root_lock_single(root_id, operation_id): self.logger.warning(f"check_and_renew_root_locks failed - {root_id}") return False - return True - def check_and_renew_root_lock_single(self, root_id: np.uint64, - operation_id: np.uint64) -> bool: + def check_and_renew_root_lock_single( + self, root_id: np.uint64, operation_id: np.uint64 + ) -> bool: """ Tests if the root is locked with the provided operation_id and renews the lock to reset the time_stam - This is mainly used before executing a bulk write - :param root_id: uint64 :param operation_id: uint64 an id that is unique to the process asking to lock the root node @@ -1986,7 +1987,6 @@ def check_and_renew_root_lock_single(self, root_id: np.uint64, """ lock_column = column_keys.Concurrency.Lock new_parents_column = column_keys.Hierarchy.NewParent - operation_id_b = lock_column.serialize(operation_id) # Build a column filter which tests if a lock was set (== lock column @@ -1997,51 +1997,55 @@ def check_and_renew_root_lock_single(self, root_id: np.uint64, # column_key_filter = ColumnQualifierRegexFilter(lock_column.key) # value_filter = ColumnQualifierRegexFilter(operation_id_b) - column_key_filter = ColumnRangeFilter( column_family_id=lock_column.family_id, start_column=lock_column.key, end_column=lock_column.key, inclusive_start=True, - inclusive_end=True) + inclusive_end=True, + ) value_filter = ValueRangeFilter( start_value=operation_id_b, end_value=operation_id_b, inclusive_start=True, - inclusive_end=True) + inclusive_end=True, + ) new_parents_key_filter = ColumnRangeFilter( column_family_id=self.family_id, start_column=new_parents_column.key, end_column=new_parents_column.key, inclusive_start=True, - inclusive_end=True) + inclusive_end=True, + ) # Chain these filters together chained_filter = RowFilterChain([column_key_filter, value_filter]) combined_filter = ConditionalRowFilter( base_filter=chained_filter, true_filter=new_parents_key_filter, - false_filter=PassAllFilter(True)) + false_filter=PassAllFilter(True), + ) # Get conditional row using the chained filter - root_row = self.table.row(serializers.serialize_uint64(root_id), - filter_=combined_filter) + root_row = self.table.row( + serializers.serialize_uint64(root_id), filter_=combined_filter + ) # Set row lock if condition returns a result (state == True) - root_row.set_cell(lock_column.family_id, lock_column.key, operation_id_b, state=False) + root_row.set_cell( + lock_column.family_id, lock_column.key, operation_id_b, state=False + ) # The lock was acquired when set_cell returns True (state) lock_acquired = not root_row.commit() - return lock_acquired - def read_consolidated_lock_timestamp(self, root_ids: Sequence[np.uint64], - operation_ids: Sequence[np.uint64] - ) -> Union[datetime.datetime, None]: + def read_consolidated_lock_timestamp( + self, root_ids: Sequence[np.uint64], operation_ids: Sequence[np.uint64] + ) -> Union[datetime.datetime, None]: """ Returns minimum of many lock timestamps - :param root_ids: np.ndarray :param operation_ids: np.ndarray :return: @@ -2049,30 +2053,24 @@ def read_consolidated_lock_timestamp(self, root_ids: Sequence[np.uint64], time_stamps = [] for root_id, operation_id in zip(root_ids, operation_ids): time_stamp = self.read_lock_timestamp(root_id, operation_id) - if time_stamp is None: return None - time_stamps.append(time_stamp) - if len(time_stamps) == 0: return None - return np.min(time_stamps) - def read_lock_timestamp(self, root_id: np.uint64, operation_id: np.uint64 - ) -> Union[datetime.datetime, None]: + def read_lock_timestamp( + self, root_id: np.uint64, operation_id: np.uint64 + ) -> Union[datetime.datetime, None]: """ Reads timestamp from lock row to get a consistent timestamp across multiple nodes / pods - :param root_id: np.uint64 :param operation_id: np.uint64 Checks whether the root_id is actually locked with this operation_id :return: datetime.datetime or None """ - row = self.read_node_id_row(root_id, - columns=column_keys.Concurrency.Lock) - + row = self.read_node_id_row(root_id, columns=column_keys.Concurrency.Lock) if len(row) == 0: self.logger.warning(f"No lock found for {root_id}") return None @@ -2080,25 +2078,20 @@ def read_lock_timestamp(self, root_id: np.uint64, operation_id: np.uint64 if row[0].value != operation_id: self.logger.warning(f"{root_id} not locked with {operation_id}") return None - return row[0].timestamp def get_latest_root_id(self, root_id: np.uint64) -> np.ndarray: """ Returns the latest root id associated with the provided root id - :param root_id: uint64 :return: list of uint64s """ - id_working_set = [root_id] column = column_keys.Hierarchy.NewParent latest_root_ids = [] - while len(id_working_set) > 0: next_id = id_working_set[0] - del(id_working_set[0]) + del id_working_set[0] row = self.read_node_id_row(next_id, columns=column) - # Check if a new root id was attached to this root id if row: id_working_set.extend(row[0].value) @@ -2107,14 +2100,14 @@ def get_latest_root_id(self, root_id: np.uint64) -> np.ndarray: return np.unique(latest_root_ids) - def get_future_root_ids(self, root_id: np.uint64, - time_stamp: Optional[datetime.datetime] = - get_max_time())-> np.ndarray: + def get_future_root_ids( + self, + root_id: np.uint64, + time_stamp: Optional[datetime.datetime] = get_max_time(), + ) -> np.ndarray: """ Returns all future root ids emerging from this root - This search happens in a monotic fashion. At no point are past root ids of future root ids taken into account. - :param root_id: np.uint64 :param time_stamp: None or datetime restrict search to ids created before this time_stamp @@ -2126,10 +2119,14 @@ def get_future_root_ids(self, root_id: np.uint64, next_ids = [root_id] while len(next_ids): temp_next_ids = [] - for next_id in next_ids: - row = self.read_node_id_row(next_id, columns=[column_keys.Hierarchy.NewParent, - column_keys.Hierarchy.Child]) + row = self.read_node_id_row( + next_id, + columns=[ + column_keys.Hierarchy.NewParent, + column_keys.Hierarchy.Child, + ], + ) if column_keys.Hierarchy.NewParent in row: ids = row[column_keys.Hierarchy.NewParent][0].value row_time_stamp = row[column_keys.Hierarchy.NewParent][0].timestamp @@ -2137,27 +2134,27 @@ def get_future_root_ids(self, root_id: np.uint64, ids = None row_time_stamp = row[column_keys.Hierarchy.Child][0].timestamp else: - raise cg_exceptions.ChunkedGraphError("Error retrieving future root ID of %s" % next_id) + raise cg_exceptions.ChunkedGraphError( + "Error retrieving future root ID of %s" % next_id + ) if row_time_stamp < time_stamp: if ids is not None: temp_next_ids.extend(ids) - if next_id != root_id: id_history.append(next_id) next_ids = temp_next_ids - return np.unique(np.array(id_history, dtype=np.uint64)) - def get_past_root_ids(self, root_id: np.uint64, - time_stamp: Optional[datetime.datetime] = - get_min_time()) -> np.ndarray: + def get_past_root_ids( + self, + root_id: np.uint64, + time_stamp: Optional[datetime.datetime] = get_min_time(), + ) -> np.ndarray: """ Returns all future root ids emerging from this root - This search happens in a monotic fashion. At no point are future root ids of past root ids taken into account. - :param root_id: np.uint64 :param time_stamp: None or datetime restrict search to ids created after this time_stamp @@ -2169,18 +2166,26 @@ def get_past_root_ids(self, root_id: np.uint64, next_ids = [root_id] while len(next_ids): temp_next_ids = [] - for next_id in next_ids: - row = self.read_node_id_row(next_id, columns=[column_keys.Hierarchy.FormerParent, - column_keys.Hierarchy.Child]) + row = self.read_node_id_row( + next_id, + columns=[ + column_keys.Hierarchy.FormerParent, + column_keys.Hierarchy.Child, + ], + ) if column_keys.Hierarchy.FormerParent in row: ids = row[column_keys.Hierarchy.FormerParent][0].value - row_time_stamp = row[column_keys.Hierarchy.FormerParent][0].timestamp + row_time_stamp = row[column_keys.Hierarchy.FormerParent][ + 0 + ].timestamp elif column_keys.Hierarchy.Child in row: ids = None row_time_stamp = row[column_keys.Hierarchy.Child][0].timestamp else: - raise cg_exceptions.ChunkedGraphError("Error retrieving past root ID of %s" % next_id) + raise cg_exceptions.ChunkedGraphError( + "Error retrieving past root ID of %s" % next_id + ) if row_time_stamp > time_stamp: if ids is not None: @@ -2190,21 +2195,18 @@ def get_past_root_ids(self, root_id: np.uint64, id_history.append(next_id) next_ids = temp_next_ids - return np.unique(np.array(id_history, dtype=np.uint64)) - def get_root_id_history(self, root_id: np.uint64, - time_stamp_past: - Optional[datetime.datetime] = get_min_time(), - time_stamp_future: - Optional[datetime.datetime] = get_max_time() - ) -> np.ndarray: + def get_root_id_history( + self, + root_id: np.uint64, + time_stamp_past: Optional[datetime.datetime] = get_min_time(), + time_stamp_future: Optional[datetime.datetime] = get_max_time(), + ) -> np.ndarray: """ Returns all future root ids emerging from this root - This search happens in a monotic fashion. At no point are future root ids of past root ids or past root ids of future root ids taken into account. - :param root_id: np.uint64 :param time_stamp_past: None or datetime restrict search to ids created after this time_stamp @@ -2214,26 +2216,25 @@ def get_root_id_history(self, root_id: np.uint64, None=search whole future :return: array of uint64 """ - past_ids = self.get_past_root_ids(root_id=root_id, - time_stamp=time_stamp_past) - future_ids = self.get_future_root_ids(root_id=root_id, - time_stamp=time_stamp_future) - - history_ids = np.concatenate([past_ids, - np.array([root_id], dtype=np.uint64), - future_ids]) + past_ids = self.get_past_root_ids(root_id=root_id, time_stamp=time_stamp_past) + future_ids = self.get_future_root_ids( + root_id=root_id, time_stamp=time_stamp_future + ) + history_ids = np.concatenate( + [past_ids, np.array([root_id], dtype=np.uint64), future_ids] + ) return history_ids - def get_change_log(self, root_id: np.uint64, - correct_for_wrong_coord_type: bool = True, - time_stamp_past: Optional[datetime.datetime] = get_min_time() - ) -> dict: + def get_change_log( + self, + root_id: np.uint64, + correct_for_wrong_coord_type: bool = True, + time_stamp_past: Optional[datetime.datetime] = get_min_time(), + ) -> dict: """ Returns all past root ids for this root - This search happens in a monotic fashion. At no point are future root ids of past root ids taken into account. - :param root_id: np.uint64 :param correct_for_wrong_coord_type: bool pinky100? --> True @@ -2243,7 +2244,7 @@ def get_change_log(self, root_id: np.uint64, :return: past ids, merge sv ids, merge edge coords, split sv ids """ if time_stamp_past.tzinfo is None: - time_stamp_past = UTC.localize(time_stamp_past) + time_stamp_past = pytz.UTC.localize(time_stamp_past) id_history = [] merge_history = [] @@ -2254,19 +2255,16 @@ def get_change_log(self, root_id: np.uint64, while len(next_ids): temp_next_ids = [] former_parent_col = column_keys.Hierarchy.FormerParent - row_dict = self.read_node_id_rows(node_ids=next_ids, - columns=[former_parent_col]) - + row_dict = self.read_node_id_rows( + node_ids=next_ids, columns=[former_parent_col] + ) for row in row_dict.values(): if column_keys.Hierarchy.FormerParent in row: if time_stamp_past > row[former_parent_col][0].timestamp: continue - ids = row[former_parent_col][0].value - lock_col = column_keys.Concurrency.Lock - former_row = self.read_node_id_row(ids[0], - columns=[lock_col]) + former_row = self.read_node_id_row(ids[0], columns=[lock_col]) operation_id = former_row[lock_col][0].value log_row = self.read_log_row(operation_id) is_merge = column_keys.OperationLogs.AddedEdge in log_row @@ -2274,21 +2272,23 @@ def get_change_log(self, root_id: np.uint64, for id_ in ids: if id_ in id_history: continue - id_history.append(id_) temp_next_ids.append(id_) if is_merge: added_edges = log_row[column_keys.OperationLogs.AddedEdge] merge_history.append(added_edges) - - coords = [log_row[column_keys.OperationLogs.SourceCoordinate], - log_row[column_keys.OperationLogs.SinkCoordinate]] + coords = [ + log_row[column_keys.OperationLogs.SourceCoordinate], + log_row[column_keys.OperationLogs.SinkCoordinate], + ] if correct_for_wrong_coord_type: # A little hack because we got the datatype wrong... - coords = [np.frombuffer(coords[0]), - np.frombuffer(coords[1])] + coords = [ + np.frombuffer(coords[0]), + np.frombuffer(coords[1]), + ] coords *= self.segmentation_resolution merge_history_edges.append(coords) @@ -2300,56 +2300,71 @@ def get_change_log(self, root_id: np.uint64, continue next_ids = temp_next_ids - - return {"past_ids": np.unique(np.array(id_history, dtype=np.uint64)), - "merge_edges": np.array(merge_history), - "merge_edge_coords": np.array(merge_history_edges), - "split_edges": np.array(split_history)} - - def normalize_bounding_box(self, - bounding_box: Optional[Sequence[Sequence[int]]], - bb_is_coordinate: bool) -> \ - Union[Sequence[Sequence[int]], None]: + return { + "past_ids": np.unique(np.array(id_history, dtype=np.uint64)), + "merge_edges": np.array(merge_history), + "merge_edge_coords": np.array(merge_history_edges), + "split_edges": np.array(split_history), + } + + def normalize_bounding_box( + self, bounding_box: Optional[Sequence[Sequence[int]]], bb_is_coordinate: bool + ) -> Union[Sequence[Sequence[int]], None]: if bounding_box is None: return None if bb_is_coordinate: bounding_box[0] = self.get_chunk_coordinates_from_vol_coordinates( - bounding_box[0][0], bounding_box[0][1], bounding_box[0][2], - resolution=self.cv.resolution, ceil=False) + bounding_box[0][0], + bounding_box[0][1], + bounding_box[0][2], + resolution=self.cv.resolution, + ceil=False, + ) bounding_box[1] = self.get_chunk_coordinates_from_vol_coordinates( - bounding_box[1][0], bounding_box[1][1], bounding_box[1][2], - resolution=self.cv.resolution, ceil=True) + bounding_box[1][0], + bounding_box[1][1], + bounding_box[1][2], + resolution=self.cv.resolution, + ceil=True, + ) return bounding_box else: return np.array(bounding_box, dtype=np.int) def _get_subgraph_higher_layer_nodes( - self, node_id: np.uint64, - bounding_box: Optional[Sequence[Sequence[int]]], - return_layers: Sequence[int], - verbose: bool): - + self, + node_id: np.uint64, + bounding_box: Optional[Sequence[Sequence[int]]], + return_layers: Sequence[int], + verbose: bool, + ): def _get_subgraph_higher_layer_nodes_threaded( - node_ids: Iterable[np.uint64]) -> List[np.uint64]: + node_ids: Iterable[np.uint64] + ) -> List[np.uint64]: children = self.get_children(node_ids, flatten=True) - if len(children) > 0 and bounding_box is not None: - chunk_coordinates = np.array([self.get_chunk_coordinates(c) for c in children]) + chunk_coordinates = np.array( + [self.get_chunk_coordinates(c) for c in children] + ) child_layers = self.get_chunk_layers(children) adapt_child_layers = child_layers - 2 adapt_child_layers[adapt_child_layers < 0] = 0 - - bounding_box_layer = bounding_box[None] / \ - (self.fan_out ** adapt_child_layers)[:, None, None] - - bound_check = np.array([ - np.all(chunk_coordinates < bounding_box_layer[:, 1], axis=1), - np.all(chunk_coordinates + 1 > bounding_box_layer[:, 0], axis=1)]).T + bounding_box_layer = ( + bounding_box[None] + / (self.fan_out ** adapt_child_layers)[:, None, None] + ) + bound_check = np.array( + [ + np.all(chunk_coordinates < bounding_box_layer[:, 1], axis=1), + np.all( + chunk_coordinates + 1 > bounding_box_layer[:, 0], axis=1 + ), + ] + ).T bound_check_mask = np.all(bound_check, axis=1) children = children[bound_check_mask] - return children if bounding_box is not None: @@ -2378,30 +2393,44 @@ def _get_subgraph_higher_layer_nodes_threaded( n_child_ids = len(child_ids) this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) - child_ids = np.fromiter(chain.from_iterable(mu.multithread_func( - _get_subgraph_higher_layer_nodes_threaded, - np.array_split(this_layer_child_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1)), np.uint64) + child_ids = np.fromiter( + chain.from_iterable( + mu.multithread_func( + _get_subgraph_higher_layer_nodes_threaded, + np.array_split(this_layer_child_ids, this_n_threads), + n_threads=this_n_threads, + debug=this_n_threads == 1, + ) + ), + np.uint64, + ) child_ids = np.concatenate([child_ids, next_layer_child_ids]) if verbose: - self.logger.debug("Layer %d: %.3fms for %d children with %d threads" % - (layer, (time.time() - time_start) * 1000, n_child_ids, - this_n_threads)) + self.logger.debug( + "Layer %d: %.3fms for %d children with %d threads" + % ( + layer, + (time.time() - time_start) * 1000, + n_child_ids, + this_n_threads, + ) + ) time_start = time.time() layer -= 1 if layer in return_layers: nodes_per_layer[layer] = child_ids - return nodes_per_layer - def get_subgraph_edges(self, agglomeration_id: np.uint64, - bounding_box: Optional[Sequence[Sequence[int]]] = None, - bb_is_coordinate: bool = False, - connected_edges=True, - verbose: bool = True - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def get_subgraph_edges( + self, + agglomeration_id: np.uint64, + bounding_box: Optional[Sequence[Sequence[int]]] = None, + bb_is_coordinate: bool = False, + connected_edges=True, + verbose: bool = True, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Return all atomic edges between supervoxels belonging to the specified agglomeration ID within the defined bounding box @@ -2409,7 +2438,7 @@ def get_subgraph_edges(self, agglomeration_id: np.uint64, return self.get_subgraph( np.array([agglomeration_id]), bbox=bounding_box, - bbox_is_coordinate=bb_is_coordinate + bbox_is_coordinate=bb_is_coordinate, ) def get_subgraph( @@ -2419,7 +2448,7 @@ def get_subgraph( bbox_is_coordinate: bool = False, cv_threads: int = 1, active_edges: bool = True, - timestamp: datetime.datetime = None + timestamp: datetime.datetime = None, ) -> Tuple[Dict, Dict]: """ 1. get level 2 children ids belonging to the agglomerations @@ -2431,6 +2460,7 @@ def get_subgraph( if parent(v1) == parent(v2) inactive otherwise 7. returns dict of Agglomerations """ + def _read_edges(chunk_ids) -> dict: return get_chunk_edges( self.cv_edges_path, @@ -2444,7 +2474,7 @@ def _read_edges(chunk_ids) -> dict: node_id=agglomeration_id, bounding_box=self.normalize_bounding_box(bbox, bbox_is_coordinate), return_layers=[2], - verbose=False + verbose=False, ) level2_ids.append(layer_nodes_d[2]) level2_ids = np.concatenate(level2_ids) @@ -2458,12 +2488,12 @@ def _read_edges(chunk_ids) -> dict: debug=False, ) edges_dict = concatenate_chunk_edges(chunk_edge_dicts) - edges = reduce(lambda x, y: x+y, edges_dict.values()) + edges = reduce(lambda x, y: x + y, edges_dict.values()) # # include fake edges # chunk_fake_edges_d = self.read_node_id_rows( # node_ids=chunk_ids, # columns=column_keys.Connectivity.FakeEdges) - # fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) + # fake_edges = np.concatenate([list(chunk_fake_edges_d.values())]) # if fake_edges.size: # fake_edges = Edges(fake_edges[:,0], fake_edges[:,1]) # edges += fake_edges @@ -2475,19 +2505,18 @@ def _read_edges(chunk_ids) -> dict: supervoxels = l2id_children_d[l2id] filtered_edges = filter_edges(l2id_children_d[l2id], edges) if active_edges: - filtered_edges = get_active_edges( - filtered_edges, - l2id_children_d - ) + filtered_edges = get_active_edges(filtered_edges, l2id_children_d) l2id_agglomeration_d[l2id] = Agglomeration(supervoxels, filtered_edges) return l2id_agglomeration_d - def get_subgraph_nodes(self, agglomeration_id: np.uint64, - bounding_box: Optional[Sequence[Sequence[int]]] = None, - bb_is_coordinate: bool = False, - return_layers: List[int] = [1], - verbose: bool = True) -> \ - Union[Dict[int, np.ndarray], np.ndarray]: + def get_subgraph_nodes( + self, + agglomeration_id: np.uint64, + bounding_box: Optional[Sequence[Sequence[int]]] = None, + bb_is_coordinate: bool = False, + return_layers: List[int] = [1], + verbose: bool = True, + ) -> Union[Dict[int, np.ndarray], np.ndarray]: """ Return all nodes belonging to the specified agglomeration ID within the defined bounding box and requested layers. @@ -2504,24 +2533,28 @@ def _get_subgraph_layer2_nodes(node_ids: Iterable[np.uint64]) -> np.ndarray: return self.get_children(node_ids, flatten=True) stop_layer = np.min(return_layers) - bounding_box = self.normalize_bounding_box(bounding_box, - bb_is_coordinate) + bounding_box = self.normalize_bounding_box(bounding_box, bb_is_coordinate) # Layer 3+ if stop_layer >= 2: nodes_per_layer = self._get_subgraph_higher_layer_nodes( - node_id=agglomeration_id, bounding_box=bounding_box, - return_layers=return_layers, verbose=verbose) + node_id=agglomeration_id, + bounding_box=bounding_box, + return_layers=return_layers, + verbose=verbose, + ) else: # Need to retrieve layer 2 even if the user doesn't require it nodes_per_layer = self._get_subgraph_higher_layer_nodes( - node_id=agglomeration_id, bounding_box=bounding_box, - return_layers=return_layers+[2], verbose=verbose) + node_id=agglomeration_id, + bounding_box=bounding_box, + return_layers=return_layers + [2], + verbose=verbose, + ) # Layer 2 if verbose: time_start = time.time() - child_ids = nodes_per_layer[2] if 2 not in return_layers: del nodes_per_layer[2] @@ -2530,36 +2563,41 @@ def _get_subgraph_layer2_nodes(node_ids: Iterable[np.uint64]) -> np.ndarray: n_child_ids = len(child_ids) this_n_threads = np.min([int(n_child_ids // 50000) + 1, mu.n_cpus]) - child_ids = np.fromiter(chain.from_iterable(mu.multithread_func( - _get_subgraph_layer2_nodes, - np.array_split(child_ids, this_n_threads), - n_threads=this_n_threads, debug=this_n_threads == 1)), dtype=np.uint64) - + child_ids = np.fromiter( + chain.from_iterable( + mu.multithread_func( + _get_subgraph_layer2_nodes, + np.array_split(child_ids, this_n_threads), + n_threads=this_n_threads, + debug=this_n_threads == 1, + ) + ), + dtype=np.uint64, + ) if verbose: - self.logger.debug("Layer 2: %.3fms for %d children with %d threads" % - ((time.time() - time_start) * 1000, n_child_ids, - this_n_threads)) + self.logger.debug( + "Layer 2: %.3fms for %d children with %d threads" + % ((time.time() - time_start) * 1000, n_child_ids, this_n_threads) + ) nodes_per_layer[1] = child_ids - if len(nodes_per_layer) == 1: return list(nodes_per_layer.values())[0] else: return nodes_per_layer - def flatten_row_dict(self, row_dict: Dict[column_keys._Column, - List[bigtable.row_data.Cell]]) -> Dict: + def flatten_row_dict( + self, row_dict: Dict[column_keys._Column, List[bigtable.row_data.Cell]] + ) -> Dict: """ Flattens multiple entries to columns by appending them :param row_dict: dict family key has to be resolved :return: dict """ - flattened_row_dict = {} for column, column_entries in row_dict.items(): flattened_row_dict[column] = [] - if len(column_entries) > 0: for column_entry in column_entries[::-1]: flattened_row_dict[column].append(column_entry.value) @@ -2567,77 +2605,74 @@ def flatten_row_dict(self, row_dict: Dict[column_keys._Column, if np.isscalar(column_entry.value): flattened_row_dict[column] = np.array(flattened_row_dict[column]) else: - flattened_row_dict[column] = np.concatenate(flattened_row_dict[column]) + flattened_row_dict[column] = np.concatenate( + flattened_row_dict[column] + ) else: - flattened_row_dict[column] = column.deserialize(b'') + flattened_row_dict[column] = column.deserialize(b"") if column == column_keys.Connectivity.Connected: - u_ids, c_ids = np.unique(flattened_row_dict[column], - return_counts=True) - flattened_row_dict[column] = u_ids[(c_ids % 2) == 1].astype(column.basetype) - + u_ids, c_ids = np.unique(flattened_row_dict[column], return_counts=True) + flattened_row_dict[column] = u_ids[(c_ids % 2) == 1].astype( + column.basetype + ) return flattened_row_dict def get_chunk_split_partners(self, atomic_id: np.uint64): """ Finds all atomic nodes beloning to the same supervoxel before chunking (affs == inf) - :param atomic_id: np.uint64 :return: list of np.uint64 """ - chunk_split_partners = [atomic_id] atomic_ids = [atomic_id] - while len(atomic_ids) > 0: atomic_id = atomic_ids[0] del atomic_ids[0] - - partners, affs, _ = self.get_atomic_partners(atomic_id, - include_connected_partners=True, - include_disconnected_partners=False) + partners, affs, _ = self.get_atomic_partners( + atomic_id, + include_connected_partners=True, + include_disconnected_partners=False, + ) m = np.isinf(affs) - inf_partners = partners[m] - new_chunk_split_partners = inf_partners[~np.in1d(inf_partners, chunk_split_partners)] + new_chunk_split_partners = inf_partners[ + ~np.in1d(inf_partners, chunk_split_partners) + ] atomic_ids.extend(new_chunk_split_partners) chunk_split_partners.extend(new_chunk_split_partners) - return chunk_split_partners def get_all_original_partners(self, atomic_id: np.uint64): """ Finds all partners from the unchunked region graph Merges split supervoxels over chunk boundaries first (affs == inf) - :param atomic_id: np.uint64 :return: dict np.uint64 -> np.uint64 """ - atomic_ids = [atomic_id] partner_dict = {} - while len(atomic_ids) > 0: atomic_id = atomic_ids[0] del atomic_ids[0] - - partners, affs, _ = self.get_atomic_partners(atomic_id, - include_connected_partners=True, - include_disconnected_partners=False) + partners, affs, _ = self.get_atomic_partners( + atomic_id, + include_connected_partners=True, + include_disconnected_partners=False, + ) m = np.isinf(affs) partner_dict[atomic_id] = partners[~m] - inf_partners = partners[m] new_chunk_split_partners = inf_partners[ - ~np.in1d(inf_partners, list(partner_dict.keys()))] + ~np.in1d(inf_partners, list(partner_dict.keys())) + ] atomic_ids.extend(new_chunk_split_partners) - return partner_dict - def get_atomic_node_partners(self, atomic_id: np.uint64, - time_stamp: datetime.datetime = get_max_time() - ) -> Dict: + def get_atomic_node_partners( + self, atomic_id: np.uint64, time_stamp: datetime.datetime = get_max_time() + ) -> Dict: """ Reads register partner ids :param atomic_id: np.uint64 @@ -2647,8 +2682,9 @@ def get_atomic_node_partners(self, atomic_id: np.uint64, col_partner = column_keys.Connectivity.Partner col_connected = column_keys.Connectivity.Connected columns = [col_partner, col_connected] - row_dict = self.read_node_id_row(atomic_id, columns=columns, - end_time=time_stamp, end_time_inclusive=True) + row_dict = self.read_node_id_row( + atomic_id, columns=columns, end_time=time_stamp, end_time_inclusive=True + ) flattened_row_dict = self.flatten_row_dict(row_dict) return flattened_row_dict[col_partner][flattened_row_dict[col_connected]] @@ -2660,37 +2696,45 @@ def _get_atomic_node_info_core(self, row_dict) -> Dict: :return: dict """ flattened_row_dict = self.flatten_row_dict(row_dict) - all_ids = np.arange(len(flattened_row_dict[column_keys.Connectivity.Partner]), - dtype=column_keys.Connectivity.Partner.basetype) - disconnected_m = ~np.in1d(all_ids, - flattened_row_dict[column_keys.Connectivity.Connected]) - flattened_row_dict[column_keys.Connectivity.Disconnected] = all_ids[disconnected_m] - + all_ids = np.arange( + len(flattened_row_dict[column_keys.Connectivity.Partner]), + dtype=column_keys.Connectivity.Partner.basetype, + ) + disconnected_m = ~np.in1d( + all_ids, flattened_row_dict[column_keys.Connectivity.Connected] + ) + flattened_row_dict[column_keys.Connectivity.Disconnected] = all_ids[ + disconnected_m + ] return flattened_row_dict - def get_atomic_node_info(self, atomic_id: np.uint64, - time_stamp: datetime.datetime = get_max_time() - ) -> Dict: + def get_atomic_node_info( + self, atomic_id: np.uint64, time_stamp: datetime.datetime = get_max_time() + ) -> Dict: """ Reads connectivity information for a single node - :param atomic_id: np.uint64 :param time_stamp: datetime.datetime :return: dict """ - columns = [column_keys.Connectivity.Connected, column_keys.Connectivity.Affinity, - column_keys.Connectivity.Area, column_keys.Connectivity.Partner, - column_keys.Hierarchy.Parent] - row_dict = self.read_node_id_row(atomic_id, columns=columns, - end_time=time_stamp, end_time_inclusive=True) - + columns = [ + column_keys.Connectivity.Connected, + column_keys.Connectivity.Affinity, + column_keys.Connectivity.Area, + column_keys.Connectivity.Partner, + column_keys.Hierarchy.Parent, + ] + row_dict = self.read_node_id_row( + atomic_id, columns=columns, end_time=time_stamp, end_time_inclusive=True + ) return self._get_atomic_node_info_core(row_dict) - def _get_atomic_partners_core(self, flattened_row_dict: Dict, - include_connected_partners=True, - include_disconnected_partners=False - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def _get_atomic_partners_core( + self, + flattened_row_dict: Dict, + include_connected_partners=True, + include_disconnected_partners=False, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Extracts the atomic partners and affinities for a given timestamp - :param flattened_row_dict: dict :param include_connected_partners: bool :param include_disconnected_partners: bool @@ -2705,22 +2749,23 @@ def _get_atomic_partners_core(self, flattened_row_dict: Dict, included_ids = [] for column in columns: included_ids.extend(flattened_row_dict[column]) - - included_ids = np.array(included_ids, dtype=column_keys.Connectivity.Connected.basetype) + included_ids = np.array( + included_ids, dtype=column_keys.Connectivity.Connected.basetype + ) areas = flattened_row_dict[column_keys.Connectivity.Area][included_ids] affinities = flattened_row_dict[column_keys.Connectivity.Affinity][included_ids] partners = flattened_row_dict[column_keys.Connectivity.Partner][included_ids] - return partners, affinities, areas - def get_atomic_partners(self, atomic_id: np.uint64, - include_connected_partners=True, - include_disconnected_partners=False, - time_stamp: Optional[datetime.datetime] = get_max_time() - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def get_atomic_partners( + self, + atomic_id: np.uint64, + include_connected_partners=True, + include_disconnected_partners=False, + time_stamp: Optional[datetime.datetime] = get_max_time(), + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Extracts the atomic partners and affinities for a given timestamp - :param atomic_id: np.uint64 :param include_connected_partners: bool :param include_disconnected_partners: bool @@ -2728,49 +2773,65 @@ def get_atomic_partners(self, atomic_id: np.uint64, :return: list of np.ndarrays """ assert include_connected_partners or include_disconnected_partners + flattened_row_dict = self.get_atomic_node_info(atomic_id, time_stamp=time_stamp) + return self._get_atomic_partners_core( + flattened_row_dict, + include_connected_partners, + include_disconnected_partners, + ) - flattened_row_dict = self.get_atomic_node_info(atomic_id, - time_stamp=time_stamp) - - return self._get_atomic_partners_core(flattened_row_dict, - include_connected_partners, - include_disconnected_partners) - - def _retrieve_connectivity(self, dict_item: Tuple[np.uint64, Dict[column_keys._Column, List[bigtable.row_data.Cell]]], - connected_edges: bool = True): + def _retrieve_connectivity( + self, + dict_item: Tuple[ + np.uint64, Dict[column_keys._Column, List[bigtable.row_data.Cell]] + ], + connected_edges: bool = True, + ): node_id, row = dict_item - tmp = set() - for x in itertools.chain.from_iterable(generation.value for generation in row[column_keys.Connectivity.Connected][::-1]): + for x in chain.from_iterable( + generation.value + for generation in row[column_keys.Connectivity.Connected][::-1] + ): tmp.remove(x) if x in tmp else tmp.add(x) connected_indices = np.fromiter(tmp, np.uint64) if column_keys.Connectivity.Partner in row: - edges = np.fromiter(itertools.chain.from_iterable( - (node_id, partner_id) - for generation in row[column_keys.Connectivity.Partner][::-1] - for partner_id in generation.value), - dtype=basetypes.NODE_ID).reshape((-1, 2)) - edges = self._connected_or_not(edges, connected_indices, - connected_edges) + edges = np.fromiter( + chain.from_iterable( + (node_id, partner_id) + for generation in row[column_keys.Connectivity.Partner][::-1] + for partner_id in generation.value + ), + dtype=basetypes.NODE_ID, + ).reshape((-1, 2)) + edges = self._connected_or_not(edges, connected_indices, connected_edges) else: edges = np.empty((0, 2), basetypes.NODE_ID) if column_keys.Connectivity.Affinity in row: - affinities = np.fromiter(itertools.chain.from_iterable( - generation.value for generation in row[column_keys.Connectivity.Affinity][::-1]), - dtype=basetypes.EDGE_AFFINITY) - affinities = self._connected_or_not(affinities, connected_indices, - connected_edges) + affinities = np.fromiter( + chain.from_iterable( + generation.value + for generation in row[column_keys.Connectivity.Affinity][::-1] + ), + dtype=basetypes.EDGE_AFFINITY, + ) + affinities = self._connected_or_not( + affinities, connected_indices, connected_edges + ) else: affinities = np.empty(0, basetypes.EDGE_AFFINITY) if column_keys.Connectivity.Area in row: - areas = np.fromiter(itertools.chain.from_iterable( - generation.value for generation in row[column_keys.Connectivity.Area][::-1]), - dtype=basetypes.EDGE_AREA) - areas = self._connected_or_not(areas, connected_indices, - connected_edges) + areas = np.fromiter( + chain.from_iterable( + generation.value + for generation in row[column_keys.Connectivity.Area][::-1] + ), + dtype=basetypes.EDGE_AREA, + ) + areas = self._connected_or_not(areas, connected_indices, connected_edges) else: areas = np.empty(0, basetypes.EDGE_AREA) @@ -2784,17 +2845,18 @@ def _connected_or_not(self, array, connected_indices, connected): """ mask = np.zeros((array.shape[0],), dtype=np.bool) mask[connected_indices] = True - if connected: return array[mask, ...] else: return array[~mask, ...] - def get_subgraph_chunk(self, node_ids: Iterable[np.uint64], - make_unique: bool = True, - connected_edges: bool = True, - time_stamp: Optional[datetime.datetime] = None - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def get_subgraph_chunk( + self, + node_ids: Iterable[np.uint64], + make_unique: bool = True, + connected_edges: bool = True, + time_stamp: Optional[datetime.datetime] = None, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Takes an atomic id and returns the associated agglomeration ids :param node_ids: array of np.uint64 @@ -2807,44 +2869,55 @@ def get_subgraph_chunk(self, node_ids: Iterable[np.uint64], time_stamp = datetime.datetime.utcnow() if time_stamp.tzinfo is None: - time_stamp = UTC.localize(time_stamp) + time_stamp = pytz.UTC.localize(time_stamp) child_ids = self.get_children(node_ids, flatten=True) - - row_dict = self.read_node_id_rows(node_ids=child_ids, - columns=[column_keys.Connectivity.Area, - column_keys.Connectivity.Affinity, - column_keys.Connectivity.Partner, - column_keys.Connectivity.Connected, - column_keys.Connectivity.Disconnected], - end_time=time_stamp, - end_time_inclusive=True) + row_dict = self.read_node_id_rows( + node_ids=child_ids, + columns=[ + column_keys.Connectivity.Area, + column_keys.Connectivity.Affinity, + column_keys.Connectivity.Partner, + column_keys.Connectivity.Connected, + column_keys.Connectivity.Disconnected, + ], + end_time=time_stamp, + end_time_inclusive=True, + ) tmp_edges, tmp_affinites, tmp_areas = [], [], [] for row_dict_item in row_dict.items(): - edges, affinities, areas = self._retrieve_connectivity(row_dict_item, - connected_edges) + edges, affinities, areas = self._retrieve_connectivity( + row_dict_item, connected_edges + ) tmp_edges.append(edges) tmp_affinites.append(affinities) tmp_areas.append(areas) - edges = np.concatenate(tmp_edges) if tmp_edges \ + edges = ( + np.concatenate(tmp_edges) + if tmp_edges else np.empty((0, 2), dtype=basetypes.NODE_ID) - affinities = np.concatenate(tmp_affinites) if tmp_affinites \ + ) + affinities = ( + np.concatenate(tmp_affinites) + if tmp_affinites else np.empty(0, dtype=basetypes.EDGE_AFFINITY) - areas = np.concatenate(tmp_areas) if tmp_areas \ + ) + areas = ( + np.concatenate(tmp_areas) + if tmp_areas else np.empty(0, dtype=basetypes.EDGE_AREA) + ) # If requested, remove duplicate edges. Every edge is stored in each # participating node. Hence, we have many edge pairs that look # like [x, y], [y, x]. We solve this by sorting and calling np.unique # row-wise if make_unique and len(edges) > 0: - edges, idx = np.unique(np.sort(edges, axis=1), axis=0, - return_index=True) + edges, idx = np.unique(np.sort(edges, axis=1), axis=0, return_index=True) affinities = affinities[idx] areas = areas[idx] - return edges, affinities, areas def add_edges( @@ -2939,7 +3012,6 @@ def remove_edges( "Split operation require the same number of source and sink IDs" ) atomic_edges = np.array([source_ids, sink_ids]).transpose() - return SplitOperation( self, user_id=user_id, @@ -2948,7 +3020,9 @@ def remove_edges( sink_coords=sink_coords, ).execute() - def undo_operation(self, user_id: str, operation_id: np.uint64) -> GraphEditOperation.Result: + def undo_operation( + self, user_id: str, operation_id: np.uint64 + ) -> GraphEditOperation.Result: """ Applies the inverse of a previous GraphEditOperation :param user_id: str @@ -2957,7 +3031,9 @@ def undo_operation(self, user_id: str, operation_id: np.uint64) -> GraphEditOper """ return UndoOperation(self, user_id=user_id, operation_id=operation_id).execute() - def redo_operation(self, user_id: str, operation_id: np.uint64) -> GraphEditOperation.Result: + def redo_operation( + self, user_id: str, operation_id: np.uint64 + ) -> GraphEditOperation.Result: """ Re-applies a previous GraphEditOperation :param user_id: str @@ -2966,15 +3042,15 @@ def redo_operation(self, user_id: str, operation_id: np.uint64) -> GraphEditOper """ return RedoOperation(self, user_id=user_id, operation_id=operation_id).execute() - def _run_multicut(self, source_ids: Sequence[np.uint64], - sink_ids: Sequence[np.uint64], - source_coords: Sequence[Sequence[int]], - sink_coords: Sequence[Sequence[int]], - bb_offset: Tuple[int, int, int] = (120, 120, 12)): - - + def _run_multicut( + self, + source_ids: Sequence[np.uint64], + sink_ids: Sequence[np.uint64], + source_coords: Sequence[Sequence[int]], + sink_coords: Sequence[Sequence[int]], + bb_offset: Tuple[int, int, int] = (120, 120, 12), + ): time_start = time.time() - bb_offset = np.array(list(bb_offset)) source_coords = np.array(source_coords) sink_coords = np.array(sink_coords) @@ -2998,15 +3074,18 @@ def _run_multicut(self, source_ids: Sequence[np.uint64], f"All supervoxel must belong to the same object. Already split?" ) - self.logger.debug("Get roots and check: %.3fms" % - ((time.time() - time_start) * 1000)) + self.logger.debug( + "Get roots and check: %.3fms" % ((time.time() - time_start) * 1000) + ) time_start = time.time() # ------------------------------------------ root_id = root_ids.pop() # Get edges between local supervoxels - n_chunks_affected = np.product((np.ceil(bounding_box[1] / self.chunk_size)).astype(np.int) - - (np.floor(bounding_box[0] / self.chunk_size)).astype(np.int)) + n_chunks_affected = np.product( + (np.ceil(bounding_box[1] / self.chunk_size)).astype(np.int) + - (np.floor(bounding_box[0] / self.chunk_size)).astype(np.int) + ) self.logger.debug("Number of affected chunks: %d" % n_chunks_affected) self.logger.debug(f"Bounding box: {bounding_box}") @@ -3015,28 +3094,25 @@ def _run_multicut(self, source_ids: Sequence[np.uint64], self.logger.debug(f"Sink ids: {sink_ids}") self.logger.debug(f"Root id: {root_id}") - edges, affs, areas = self.get_subgraph_edges(root_id, - bounding_box=bounding_box, - bb_is_coordinate=True) - self.logger.debug(f"Get edges and affs: " - f"{(time.time() - time_start) * 1000:.3f}ms") + edges, affs, _ = self.get_subgraph_edges( + root_id, bounding_box=bounding_box, bb_is_coordinate=True + ) + self.logger.debug( + f"Get edges and affs: " f"{(time.time() - time_start) * 1000:.3f}ms" + ) time_start = time.time() # ------------------------------------------ if len(edges) == 0: raise cg_exceptions.PreconditionError( - f"No local edges found. " - f"Something went wrong with the bounding box?" + f"No local edges found. " f"Something went wrong with the bounding box?" ) # Compute mincut atomic_edges = cutting.mincut(edges, affs, source_ids, sink_ids) - self.logger.debug(f"Mincut: {(time.time() - time_start) * 1000:.3f}ms") - if len(atomic_edges) == 0: - raise cg_exceptions.PostconditionError( - f"Mincut failed. Try again...") + raise cg_exceptions.PostconditionError(f"Mincut failed. Try again...") # # Check if any edge in the cutset is infinite (== between chunks) # # We would prevent such a cut @@ -3049,5 +3125,4 @@ def _run_multicut(self, source_ids: Sequence[np.uint64], # if np.any(np.isinf(affs[cutset_mask])): # self.logger.error("inf in cutset") # return False, None - return atomic_edges diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index 8c177dc44..b6f9ae384 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -11,7 +11,7 @@ from . import flatgraph_utils from .chunkedgraph_utils import combine_cross_chunk_edge_dicts from .utils import column_keys, serializers -from .utils.helpers import get_bounding_box +from .utils.general import get_bounding_box from .utils.edge_utils import filter_fake_edges from .utils.edge_utils import map_edges_to_chunks from .utils.edge_utils import get_linking_edges diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index 3449e3668..c6f3d1d61 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -6,19 +6,22 @@ import pytz from google.cloud import bigtable -from google.cloud.bigtable.row_filters import TimestampRange, \ - TimestampRangeFilter, ColumnRangeFilter, RowFilterChain, \ - RowFilterUnion, RowFilter -from pychunkedgraph.backend.utils import column_keys, serializers +from google.cloud.bigtable.row_filters import ( + TimestampRange, + TimestampRangeFilter, + ColumnRangeFilter, + RowFilterChain, + RowFilterUnion, + RowFilter, +) +from .utils import column_keys +from .utils import serializers def compute_indices_pandas(data) -> pd.Series: """ Computes indices of all unique entries - Make sure to remap your array to a dense range starting at zero - https://stackoverflow.com/questions/33281957/faster-alternative-to-numpy-where - :param data: np.ndarray :return: pandas dataframe """ @@ -29,7 +32,6 @@ def compute_indices_pandas(data) -> pd.Series: def log_n(arr, n): """ Computes log to base n - :param arr: array or float :param n: int base @@ -43,91 +45,79 @@ def log_n(arr, n): return np.log(arr) / np.log(n) -def compute_bitmasks(n_layers: int, fan_out: int, s_bits_atomic_layer: int = 8 - ) -> Dict[int, int]: +def compute_bitmasks( + n_layers: int, fan_out: int, s_bits_atomic_layer: int = 8 +) -> Dict[int, int]: """ Computes the bitmasks for each layer. A bitmasks encodes how many bits are used to store the chunk id in each dimension. The smallest number of bits needed to encode this information is chosen. The layer id is always encoded with 8 bits as this information is required a priori. - Currently, encoding of layer 1 is fixed to 8 bits. - :param n_layers: int :param fan_out: int :param s_bits_atomic_layer: int :return: dict layer -> bits for layer id """ - bitmask_dict = {} for i_layer in range(n_layers, 0, -1): layer_exp = n_layers - i_layer - n_bits_for_layers = max(1, np.ceil(log_n(fan_out**layer_exp, fan_out))) - + n_bits_for_layers = max(1, np.ceil(log_n(fan_out ** layer_exp, fan_out))) if i_layer == 1: n_bits_for_layers = np.max([s_bits_atomic_layer, n_bits_for_layers]) n_bits_for_layers = int(n_bits_for_layers) - - # assert n_bits_for_layers <= 8 - bitmask_dict[i_layer] = n_bits_for_layers - - # print(f"Bitmasks: {bitmask_dict}") - return bitmask_dict -def get_google_compatible_time_stamp(time_stamp: datetime.datetime, - round_up: bool =False - ) -> datetime.datetime: +def get_google_compatible_time_stamp( + time_stamp: datetime.datetime, round_up: bool = False +) -> datetime.datetime: """ Makes a datetime.datetime time stamp compatible with googles' services. Google restricts the accuracy of time stamps to milliseconds. Hence, the microseconds are cut of. By default, time stamps are rounded to the lower number. - :param time_stamp: datetime.datetime :param round_up: bool :return: datetime.datetime """ - micro_s_gap = datetime.timedelta(microseconds=time_stamp.microsecond % 1000) - if micro_s_gap == 0: return time_stamp - if round_up: - time_stamp += (datetime.timedelta(microseconds=1000) - micro_s_gap) + time_stamp += datetime.timedelta(microseconds=1000) - micro_s_gap else: time_stamp -= micro_s_gap - return time_stamp def get_column_filter( - columns: Union[Iterable[column_keys._Column], column_keys._Column] = None) -> RowFilter: + columns: Union[Iterable[column_keys._Column], column_keys._Column] = None +) -> RowFilter: """ Generates a RowFilter that accepts the specified columns """ - if isinstance(columns, column_keys._Column): - return ColumnRangeFilter(columns.family_id, - start_column=columns.key, - end_column=columns.key) + return ColumnRangeFilter( + columns.family_id, start_column=columns.key, end_column=columns.key + ) elif len(columns) == 1: - return ColumnRangeFilter(columns[0].family_id, - start_column=columns[0].key, - end_column=columns[0].key) - - return RowFilterUnion([ColumnRangeFilter(col.family_id, - start_column=col.key, - end_column=col.key) for col in columns]) + return ColumnRangeFilter( + columns[0].family_id, start_column=columns[0].key, end_column=columns[0].key + ) + return RowFilterUnion( + [ + ColumnRangeFilter(col.family_id, start_column=col.key, end_column=col.key) + for col in columns + ] + ) def get_time_range_filter( - start_time: Optional[datetime.datetime] = None, - end_time: Optional[datetime.datetime] = None, - end_inclusive: bool = True) -> RowFilter: + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + end_inclusive: bool = True, +) -> RowFilter: """ Generates a TimeStampRangeFilter which is inclusive for start and (optionally) end. - :param start: :param end: :return: @@ -142,14 +132,14 @@ def get_time_range_filter( def get_time_range_and_column_filter( - columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None, - start_time: Optional[datetime.datetime] = None, - end_time: Optional[datetime.datetime] = None, - end_inclusive: bool = False) -> RowFilter: - - time_filter = get_time_range_filter(start_time=start_time, - end_time=end_time, - end_inclusive=end_inclusive) + columns: Optional[Union[Iterable[column_keys._Column], column_keys._Column]] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + end_inclusive: bool = False, +) -> RowFilter: + time_filter = get_time_range_filter( + start_time=start_time, end_time=end_time, end_inclusive=end_inclusive + ) if columns is not None: column_filter = get_column_filter(columns) @@ -160,7 +150,6 @@ def get_time_range_and_column_filter( def get_max_time(): """ Returns the (almost) max time in datetime.datetime - :return: datetime.datetime """ return datetime.datetime(9999, 12, 31, 23, 59, 59, 0) @@ -168,7 +157,6 @@ def get_max_time(): def get_min_time(): """ Returns the min time in datetime.datetime - :return: datetime.datetime """ return datetime.datetime.strptime("01/01/00 00:00", "%d/%m/%y %H:%M") @@ -177,16 +165,13 @@ def get_min_time(): def combine_cross_chunk_edge_dicts(d1, d2, start_layer=2): """ Combines two cross chunk dictionaries Cross chunk dictionaries contain a layer id -> edge list mapping. - :param d1: dict :param d2: dict :param start_layer: int :return: dict """ assert start_layer >= 2 - new_d = {} - for l in d2: if l < start_layer: continue @@ -196,8 +181,7 @@ def combine_cross_chunk_edge_dicts(d1, d2, start_layer=2): for l in layers: if l in d1 and l in d2: - new_d[l] = np.concatenate([d1[l].reshape(-1, 2), - d2[l].reshape(-1, 2)]) + new_d[l] = np.concatenate([d1[l].reshape(-1, 2), d2[l].reshape(-1, 2)]) elif l in d1: new_d[l] = d1[l].reshape(-1, 2) elif l in d2: @@ -205,39 +189,60 @@ def combine_cross_chunk_edge_dicts(d1, d2, start_layer=2): else: raise Exception() - edges_flattened_view = new_d[l].view(dtype='u8,u8') + edges_flattened_view = new_d[l].view(dtype="u8,u8") m = np.unique(edges_flattened_view, return_index=True)[1] new_d[l] = new_d[l][m] - return new_d def time_min(): """ Returns a minimal time stamp that still works with google - :return: datetime.datetime """ return datetime.datetime.strptime("01/01/00 00:00", "%d/%m/%y %H:%M") -def partial_row_data_to_column_dict(partial_row_data: bigtable.row_data.PartialRowData) \ - -> Dict[column_keys._Column, bigtable.row_data.PartialRowData]: +def partial_row_data_to_column_dict( + partial_row_data: bigtable.row_data.PartialRowData +) -> Dict[column_keys._Column, bigtable.row_data.PartialRowData]: new_column_dict = {} - for family_id, column_dict in partial_row_data._cells.items(): for column_key, column_values in column_dict.items(): column = column_keys.from_key(family_id, column_key) new_column_dict[column] = column_values - return new_column_dict def get_valid_timestamp(timestamp): if timestamp is None: timestamp = datetime.datetime.utcnow() - if timestamp.tzinfo is None: timestamp = pytz.UTC.localize(timestamp) - # Comply to resolution of BigTables TimeRange - return get_google_compatible_time_stamp(timestamp, round_up=False) + return get_google_compatible_time_stamp(timestamp, round_up=False) + + +def compute_chunk_id( + layer: int, + x: int, + y: int, + z: int, + s_bits_per_dim: int = 10, + n_bits_layer_id: int = 8, +): + if not ( + x < 2 ** s_bits_per_dim and y < 2 ** s_bits_per_dim and z < 2 ** s_bits_per_dim + ): + raise ValueError( + f"Coordinate is out of range \ + layer: {layer} bits/dim {s_bits_per_dim}. \ + [{x}, {y}, {z}]; max = {2 ** s_bits_per_dim}." + ) + layer_offset = 64 - n_bits_layer_id + x_offset = layer_offset - s_bits_per_dim + y_offset = x_offset - s_bits_per_dim + z_offset = y_offset - s_bits_per_dim + return np.uint64( + layer << layer_offset | x << x_offset | y << y_offset | z << z_offset + ) + diff --git a/pychunkedgraph/backend/definitions/config.py b/pychunkedgraph/backend/definitions/config.py new file mode 100644 index 000000000..ac27583f4 --- /dev/null +++ b/pychunkedgraph/backend/definitions/config.py @@ -0,0 +1,32 @@ +from collections import namedtuple + +datasource_fields = ( + "agglomeration", + "watershed", + "edges", + "components", + "use_raw_edges", + "use_raw_components", + "size", +) +DataSource = namedtuple( + "DataSource", datasource_fields, defaults=(None,) * len(datasource_fields) +) + +graphconfig_fields = ( + "graph_id", + "chunk_size", + "fanout", + "build_graph", + "s_bits_atomic_layer", +) +GraphConfig = namedtuple( + "GraphConfig", graphconfig_fields, defaults=(None, None, 2, True, 8) +) + +bigtableconfig_fields = ("project_id", "instance_id") +BigTableConfig = namedtuple( + "BigTableConfig", + bigtableconfig_fields, + defaults=(None,) * len(bigtableconfig_fields), +) diff --git a/pychunkedgraph/backend/utils/helpers.py b/pychunkedgraph/backend/utils/general.py similarity index 95% rename from pychunkedgraph/backend/utils/helpers.py rename to pychunkedgraph/backend/utils/general.py index 32bba7bb5..ab8ee03b5 100644 --- a/pychunkedgraph/backend/utils/helpers.py +++ b/pychunkedgraph/backend/utils/general.py @@ -2,6 +2,7 @@ import numpy as np + def get_bounding_box( source_coords: Sequence[Sequence[int]], sink_coords: Sequence[Sequence[int]], @@ -17,4 +18,4 @@ def get_bounding_box( bounding_box = [np.min(coords, axis=0), np.max(coords, axis=0)] bounding_box[0] -= bb_offset bounding_box[1] += bb_offset - return bounding_box + return bounding_box diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index f5f72a055..a2c1b4f8f 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -1,65 +1,184 @@ """ cli for running ingest """ +import sys +import time from collections import defaultdict +from itertools import product +from typing import List +from typing import Sequence import numpy as np import click -from flask import current_app from flask.cli import AppGroup -from .ran_ingestion_v2 import ingest_into_chunkedgraph +from .ingestionmanager import IngestionManager from .ran_ingestion_v2 import INGEST_CHANNEL +from .ran_ingestion_v2 import INGEST_QUEUE +from .ran_ingestion_v2 import ingest_into_chunkedgraph +from .ran_ingestion_v2 import enqueue_atomic_tasks +from .ran_ingestion_v2 import create_parent_chunk +from ..utils.redis import get_redis_connection +from ..utils.redis import keys as r_keys +from ..backend.definitions.config import DataSource +from ..backend.definitions.config import GraphConfig +from ..backend.definitions.config import BigTableConfig ingest_cli = AppGroup("ingest") -task_count = 0 + +redis_cnxn = get_redis_connection() + + +@ingest_cli.command("graph") +@click.argument("graph_id", type=str) +# @click.option("--agglomeration", required=True, type=str) +# @click.option("--watershed", required=True, type=str) +# @click.option("--edges", required=True, type=str) +# @click.option("--components", required=True, type=str) +@click.option("--processed", is_flag=True, help="Use processed data to build graph") +# @click.option("--data-size", required=False, nargs=3, type=int) +# @click.option("--chunk-size", required=True, nargs=3, type=int) +# @click.option("--fanout", required=False, type=int) +# @click.option("--gcp-project-id", required=False, type=str) +# @click.option("--bigtable-instance-id", required=False, type=str) +# @click.option("--interval", required=False, type=float) +@click.option("--result-ttl", required=False, type=int) +def ingest_graph( + graph_id, + # agglomeration, + # watershed, + # edges, + # components, + processed, + # chunk_size, + # data_size=None, + # fanout=2, + # gcp_project_id=None, + # bigtable_instance_id=None, + # interval=90.0 + result_ttl=500, +): + # agglomeration = "gs://ranl-scratch/190410_FAFB_v02_ws_size_threshold_200/agg" + # watershed = ( + # "gs://microns-seunglab/drosophila_v0/ws_190410_FAFB_v02_ws_size_threshold_200" + # ) + # edges = "gs://akhilesh-pcg/190410_FAFB_v02/edges" + # components = "gs://akhilesh-pcg/190410_FAFB_v02/components" + + agglomeration = "gs://ranl-scratch/minnie65_0/agg" + watershed = "gs://microns-seunglab/minnie65/ws_minnie65_0" + edges = "gs://chunkedgraph/minnie65_0/edges" + components = "gs://chunkedgraph/minnie65_0/components" + + use_raw_edges = not processed + use_raw_components = not processed + + chunk_size = [256, 256, 512] + fanout = 2 + gcp_project_id = None + bigtable_instance_id = None + build_graph = False + s_bits_atomic_layer = 10 + + data_source = DataSource( + agglomeration, watershed, edges, components, use_raw_edges, use_raw_components + ) + graph_config = GraphConfig( + graph_id, chunk_size, fanout, build_graph, s_bits_atomic_layer + ) + bigtable_config = BigTableConfig(gcp_project_id, bigtable_instance_id) + + redis_cnxn.flushdb() + imanager = ingest_into_chunkedgraph(data_source, graph_config, bigtable_config) + redis_cnxn.set(r_keys.INGESTION_MANAGER, imanager.get_serialized_info(pickled=True)) + enqueue_atomic_tasks(imanager, result_ttl=result_ttl) -def handle_job_result(*args, **kwargs): - """handle worker return""" - global task_count - result = np.frombuffer(args[0]["data"], dtype=np.int32) - layer = result[0] - task_count += 1 +def _get_children_coords( + imanager, layer: int, parent_coords: Sequence[int] +) -> List[np.ndarray]: + layer_bounds = imanager.chunk_id_bounds / (2 ** (layer - 2)) + layer_bounds = np.ceil(layer_bounds).astype(np.int) + children_coords = [] + parent_coords = np.array(parent_coords, dtype=int) + for dcoord in product(*[range(imanager.cg.fan_out)] * 3): + dcoord = np.array(dcoord, dtype=int) + child_coords = parent_coords * imanager.cg.fan_out + dcoord + check_bounds = np.less(child_coords, layer_bounds[:, 1]) + if np.all(check_bounds): + children_coords.append(child_coords) + return children_coords - with open(f"completed_{layer}.txt", "w") as completed_f: - completed_f.write(str(task_count)) +def _parse_results(imanager): + global redis_cnxn + zset_name = f"rq:finished:{INGEST_QUEUE}" + results = redis_cnxn.zrange(zset_name, 0, -1) + layer_counts_d = defaultdict(int) + parent_chunks_d = defaultdict(list) # (layer, x, y, z) as keys + for chunk_str in results: + chunk_str = chunk_str.decode("utf-8") + layer, x, y, z = map(int, chunk_str.split("_")) + layer_counts_d[layer] += 1 + if layer == imanager.n_layers: + print("All jobs completed.") + sys.exit(0) + layer += 1 + x, y, z = np.array([x, y, z], int) // imanager.cg.fan_out + parent_job_id = f"{layer}_{'_'.join(map(str, (x, y, z)))}" + if not redis_cnxn.hget(r_keys.PARENTS_HASH, parent_job_id) is None: + continue + parent_chunks_d[(layer, x, y, z)].append(chunk_str) + return parent_chunks_d, layer_counts_d -@ingest_cli.command("table") -@click.argument("storage_path", type=str) -@click.argument("ws_cv_path", type=str) -@click.argument("cv_path", type=str) -@click.argument("cg_table_id", type=str) -@click.argument("layer", type=int) -def run_ingest(storage_path, ws_cv_path, cv_path, cg_table_id, layer): + +def _enqueue_parent_tasks(): + """ + Helper to enqueue parent tasks + Checks job/chunk ids in redis to determine if parent task can be enqueued """ - run ingestion job - eg: flask ingest table \ - gs://ranl/scratch/pinky100_ca_com/agg \ - gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com \ - gs://akhilesh-pcg \ - akhilesh-pinky100-0 \ - 2 + global redis_cnxn + imanager = IngestionManager.from_pickle(redis_cnxn.get(r_keys.INGESTION_MANAGER)) + parent_chunks_d, layer_counts_d = _parse_results(imanager) + count = 0 + for parent_chunk in parent_chunks_d: + children_coords = _get_children_coords( + imanager, parent_chunk[0] - 1, parent_chunk[1:] + ) + children_results = parent_chunks_d[parent_chunk] + if not len(children_coords) == len(children_results): + continue + job_id = f"{parent_chunk[0]}_{'_'.join(map(str, parent_chunk[1:]))}" + imanager.task_q.enqueue( + create_parent_chunk, + job_id=job_id, + job_timeout="10m", + result_ttl=86400, + args=(imanager.get_serialized_info(), parent_chunk[0], children_coords), + ) + count += 1 + redis_cnxn.hset(r_keys.PARENTS_HASH, job_id, "") + + layers = range(2, imanager.n_layers) + status = ", ".join([f"{l}:{layer_counts_d[l]}" for l in layers]) + print(f"Queued {count} parents.") + print(f"Completed chunks (layer:count)\n{status}") + + +@ingest_cli.command("parents") +@click.option("--interval", required=True, type=float) +def ingest_parent_chunks(interval): """ - chunk_pubsub = current_app.redis.pubsub() - chunk_pubsub.subscribe(**{INGEST_CHANNEL: handle_job_result}) - chunk_pubsub.run_in_thread(sleep_time=0.1) - - data_config = { - "edge_dir": f"{cv_path}/akhilesh-pinky100-1/edges", - "agglomeration_dir": f"{cv_path}/{cg_table_id}/agglomeration", - "use_raw_edge_data": False, - "use_raw_agglomeration_data": True, - } - - ingest_into_chunkedgraph( - storage_path=storage_path, - ws_cv_path=ws_cv_path, - cg_table_id=cg_table_id, - layer=layer, - data_config=data_config, - ) + This can only be used after running `ingest graph` + Uses serialzed ingestion manager information stored in redis + by the `ingest graph` command. + """ + if not redis_cnxn.get(r_keys.INGESTION_MANAGER): + click.secho("Run `ingest graph` before using this command.", fg="red") + sys.exit(1) + while True: + _enqueue_parent_tasks() + time.sleep(interval) def init_ingest_cmds(app): diff --git a/pychunkedgraph/ingest/ingestion_utils.py b/pychunkedgraph/ingest/ingestion_utils.py index 5553615f2..2e468a09c 100644 --- a/pychunkedgraph/ingest/ingestion_utils.py +++ b/pychunkedgraph/ingest/ingestion_utils.py @@ -69,7 +69,7 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, "use_skip_connections": use_skip_connections, "s_bits_atomic_layer": s_bits_atomic_layer, "n_bits_root_counter": n_bits_root_counter, - "is_new": is_new, + "is_new": True, "edge_dir": edge_dir} if instance_id is not None: @@ -79,7 +79,6 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, kwargs["project_id"] = project_id cg = chunkedgraph.ChunkedGraph(**kwargs) - return cg, n_layers_agg @@ -105,4 +104,4 @@ def postprocess_edge_data(im, edge_dict): return new_edge_dict else: - raise Exception(f"Unknown data_version: {data_version}") \ No newline at end of file + raise Exception(f"Unknown data_version: {im.data_version}") \ No newline at end of file diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index abcae28a2..297277d4f 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -1,7 +1,12 @@ import itertools import numpy as np +import pickle -from pychunkedgraph.backend import chunkedgraph + +from ..backend.chunkedgraph_utils import compute_bitmasks +from ..backend.chunkedgraph import ChunkedGraph +from ..utils.redis import get_redis_connection +from ..utils.redis import get_rq_queue class IngestionManager(object): @@ -12,10 +17,16 @@ def __init__( n_layers=None, instance_id=None, project_id=None, + cv=None, + chunk_size=None, data_version=2, + s_bits_atomic_layer=8, use_raw_edge_data=True, use_raw_agglomeration_data=True, - agglomeration_dir=None, + edges_dir=None, + components_dir=None, + task_q_name="test", + build_graph=True, ): self._storage_path = storage_path self._cg_table_id = cg_table_id @@ -23,10 +34,23 @@ def __init__( self._project_id = project_id self._cg = None self._n_layers = n_layers + self._s_bits_atomic_layer = s_bits_atomic_layer self._data_version = data_version + self._cv = cv + self._chunk_size = chunk_size self._use_raw_edge_data = use_raw_edge_data self._use_raw_agglomeration_data = use_raw_agglomeration_data - self._agglomeration_dir = agglomeration_dir + self._edges_dir = edges_dir + self._components_dir = components_dir + self._chunk_coords = None + self._layer_bounds_d = None + self._redis_connection = None + self._task_q_name = task_q_name + self._task_q = None + self._build_graph = build_graph + self._bitmasks = None + self._bounds = None + self._redis = None @property def storage_path(self): @@ -84,19 +108,33 @@ def cg(self): if self._project_id is not None: kwargs["project_id"] = self._project_id - self._cg = chunkedgraph.ChunkedGraph(table_id=self._cg_table_id, **kwargs) + self._cg = ChunkedGraph(table_id=self._cg_table_id, **kwargs) return self._cg @property def bounds(self): - bounds = self.cg.vx_vol_bounds.copy() - bounds -= self.cg.vx_vol_bounds[:, 0:1] - return bounds + if self._bounds: + return self._bounds + cv_bounds = np.array(self._cv.bounds.to_list()).reshape(2, -1).T + self._bounds = cv_bounds.copy() + self._bounds -= cv_bounds[:, 0:1] + return self._bounds @property def chunk_id_bounds(self): - return np.ceil((self.bounds / self.cg.chunk_size[:, None])).astype(np.int) + return np.ceil((self.bounds / self._chunk_size[:, None])).astype(np.int) + + @property + def layer_chunk_bounds(self): + if self._layer_bounds_d: + return self._layer_bounds_d + layer_bounds_d = {} + for layer in range(2, self.n_layers): + layer_bounds = self.chunk_id_bounds / (2 ** (layer - 2)) + layer_bounds_d[layer] = np.ceil(layer_bounds).astype(np.int) + self._layer_bounds_d = layer_bounds_d + return self._layer_bounds_d @property def chunk_coord_gen(self): @@ -104,7 +142,10 @@ def chunk_coord_gen(self): @property def chunk_coords(self): - return np.array(list(self.chunk_coord_gen), dtype=np.int) + if not self._chunk_coords is None: + return self._chunk_coords + self._chunk_coords = np.array(list(self.chunk_coord_gen), dtype=np.int) + return self._chunk_coords @property def n_layers(self): @@ -121,10 +162,32 @@ def use_raw_agglomeration_data(self): return self._use_raw_agglomeration_data @property - def agglomeration_dir(self): - return self._agglomeration_dir + def edges_dir(self): + return self._edges_dir + + @property + def components_dir(self): + return self._components_dir + + @property + def task_q(self): + if self._task_q: + return self._task_q + self._task_q = get_rq_queue(self._task_q_name) + return self._task_q - def get_serialized_info(self): + @property + def redis(self): + if self._redis: + return self._redis + self._redis = get_redis_connection() + return self._redis + + @property + def build_graph(self): + return self._build_graph + + def get_serialized_info(self, pickled=False): info = { "storage_path": self.storage_path, "cg_table_id": self._cg_table_id, @@ -132,15 +195,28 @@ def get_serialized_info(self): "instance_id": self._instance_id, "project_id": self._project_id, "data_version": self.data_version, + "s_bits_atomic_layer": self._s_bits_atomic_layer, "use_raw_edge_data": self._use_raw_edge_data, "use_raw_agglomeration_data": self._use_raw_agglomeration_data, - "agglomeration_dir": self._agglomeration_dir, + "edges_dir": self._edges_dir, + "components_dir": self._components_dir, + "task_q_name": self._task_q_name, + "build_graph": self._build_graph, } - + if pickled: + return pickle.dumps(info) return info - def is_out_of_bounce(self, chunk_coordinate): + def is_out_of_bounds(self, chunk_coordinate): + if not self._bitmasks: + self._bitmasks = compute_bitmasks( + self.n_layers, 2, s_bits_atomic_layer=self._s_bits_atomic_layer + ) return np.any(chunk_coordinate < 0) or np.any( - chunk_coordinate > 2 ** self.cg.bitmasks[1] + chunk_coordinate > 2 ** self._bitmasks[1] ) + @classmethod + def from_pickle(cls, serialized_info): + return cls(**pickle.loads(serialized_info)) + diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 582325217..fe1450b3f 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -21,11 +21,10 @@ def add_layer( time_stamp: Optional[datetime.datetime] = None, n_threads: int = 20, ) -> None: - cross_edge_dict, child_ids = _process_chunks(cg_instance, layer_id, chunk_coords) - edge_ids = _resolve_cross_chunk_edges_thread(layer_id, child_ids, cross_edge_dict) - x, y, z = np.min(chunk_coords, axis=0) // cg_instance.fan_out parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) + cross_edge_dict, child_ids = _process_chunks(cg_instance, layer_id, chunk_coords) + edge_ids = _resolve_cross_chunk_edges_thread(layer_id, child_ids, cross_edge_dict) # Extract connected components isolated_node_mask = ~np.in1d(child_ids, np.unique(edge_ids)) @@ -47,8 +46,7 @@ def add_layer( graph_ids, time_stamp, ) - # to track worker completion - return np.concatenate([[layer_id], chunk_coords]) + return f"{layer_id}_{'_'.join(map(str, (x, y, z)))}" def _process_chunks(cg_instance, layer_id, chunk_coords): @@ -58,7 +56,7 @@ def _process_chunks(cg_instance, layer_id, chunk_coords): ids, cross_edge_d = _process_chunk(cg_instance, layer_id, chunk_coord) node_ids.append(ids) cross_edge_dict = {**cross_edge_dict, **cross_edge_d} - return cross_edge_dict, np.concatenate(node_ids, dtype=np.uint64) + return cross_edge_dict, np.concatenate(node_ids) def _process_chunk(cg_instance, layer_id, chunk_coord): @@ -112,6 +110,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): def _resolve_cross_chunk_edges_thread(layer_id, node_ids, cross_edge_dict) -> None: + cross_edge_dict = defaultdict(dict, cross_edge_dict) atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] for node_id in node_ids: diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index df208d056..39d354c5d 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -2,6 +2,7 @@ Module for ingesting in chunkedgraph format with edges stored outside bigtable """ +import time import collections import itertools import json @@ -15,133 +16,118 @@ import zstandard as zstd from flask import current_app -from ..utils.redis import redis_job, REDIS_URL from . import ingestionmanager, ingestion_utils as iu from .initialization.atomic_layer import add_atomic_edges -from ..backend.definitions.edges import Edges, CX_CHUNK, TYPES as EDGE_TYPES +from .initialization.abstract_layers import add_layer +from ..utils.redis import redis_job +from ..utils.redis import REDIS_URL +from ..utils.redis import keys as r_keys +from ..io.edges import get_chunk_edges +from ..io.edges import put_chunk_edges +from ..io.components import get_chunk_components +from ..io.components import put_chunk_components from ..backend.utils import basetypes -from ..io.edges import get_chunk_edges, put_chunk_edges -from ..io.agglomeration import get_chunk_agglomeration, put_chunk_agglomeration +from ..backend.chunkedgraph_utils import compute_bitmasks +from ..backend.chunkedgraph_utils import compute_chunk_id +from ..backend.definitions.edges import Edges, CX_CHUNK +from ..backend.definitions.edges import TYPES as EDGE_TYPES +from ..backend.definitions.config import DataSource +from ..backend.definitions.config import GraphConfig +from ..backend.definitions.config import BigTableConfig ZSTD_LEVEL = 17 INGEST_CHANNEL = "ingest" +INGEST_QUEUE = "test" def ingest_into_chunkedgraph( - storage_path, - ws_cv_path, - cg_table_id, - chunk_size=[512, 512, 128], - use_skip_connections=True, - fan_out=2, - size=None, - instance_id=None, - project_id=None, - layer=2, - n_chunks=None, - is_new: bool = True, - data_config: Dict = None, + data_source: DataSource, graph_config: GraphConfig, bigtable_config: BigTableConfig ): - """ - :param data_config: - :type Dict: - `data_config` can have the following keys. - Use these options to use either raw data or - processed data when building the chunkedgraph - edge_dir=None, - agglomeration_dir=None, - use_raw_edge_data=True, - use_raw_agglomeration_data=True - """ - storage_path = storage_path.strip("/") - ws_cv_path = ws_cv_path.strip("/") - - cg_mesh_dir = f"{cg_table_id}_meshes" - chunk_size = np.array(chunk_size) - - _, n_layers_agg = iu.initialize_chunkedgraph( - cg_table_id=cg_table_id, - ws_cv_path=ws_cv_path, - chunk_size=chunk_size, - size=size, - use_skip_connections=use_skip_connections, - s_bits_atomic_layer=10, - cg_mesh_dir=cg_mesh_dir, - fan_out=fan_out, - instance_id=instance_id, - project_id=project_id, - edge_dir=data_config["edge_dir"], - is_new=is_new, - ) + storage_path = data_source.agglomeration.strip("/") + ws_cv_path = data_source.watershed.strip("/") + chunk_size = np.array(graph_config.chunk_size) + # cg_mesh_dir = f"{graph_config.graph_id}_meshes" + + # _, n_layers_agg = iu.initialize_chunkedgraph( + # cg_table_id=graph_config.graph_id, + # ws_cv_path=ws_cv_path, + # chunk_size=chunk_size, + # size=data_source.size, + # use_skip_connections=True, + # s_bits_atomic_layer=10, + # cg_mesh_dir=cg_mesh_dir, + # fan_out=graph_config.fanout, + # instance_id=bigtable_config.instance_id, + # project_id=bigtable_config.project_id, + # edge_dir=data_source.edges, + # ) + ws_cv = cloudvolume.CloudVolume(ws_cv_path) + n_layers_agg = iu.calc_n_layers(ws_cv, chunk_size, fan_out=2) imanager = ingestionmanager.IngestionManager( storage_path=storage_path, - cg_table_id=cg_table_id, + cg_table_id=graph_config.graph_id, n_layers=n_layers_agg, - instance_id=instance_id, - project_id=project_id, - data_version=4, - use_raw_edge_data=data_config["use_raw_edge_data"], - use_raw_agglomeration_data=data_config["use_raw_agglomeration_data"], - agglomeration_dir=data_config["agglomeration_dir"], - ) - - if layer < 3: - create_atomic_chunks(imanager) - else: - create_layer(imanager, layer) - - -def create_layer(imanager, layer_id): - child_chunk_coords = imanager.chunk_coords // imanager.cg.fan_out ** (layer_id - 3) - child_chunk_coords = child_chunk_coords.astype(np.int) - child_chunk_coords = np.unique(child_chunk_coords, axis=0) - - parent_chunk_coords = child_chunk_coords // imanager.cg.fan_out - parent_chunk_coords = parent_chunk_coords.astype(np.int) - parent_chunk_coords, indices = np.unique( - parent_chunk_coords, axis=0, return_inverse=True + instance_id=bigtable_config.instance_id, + project_id=bigtable_config.project_id, + data_version=2, + s_bits_atomic_layer=graph_config.s_bits_atomic_layer, + cv=ws_cv, + chunk_size=chunk_size, + edges_dir=data_source.edges, + components_dir=data_source.components, + use_raw_edge_data=data_source.use_raw_edges, + use_raw_agglomeration_data=data_source.use_raw_components, + build_graph=graph_config.build_graph, ) - - order = np.arange(len(parent_chunk_coords), dtype=np.int) - np.random.shuffle(order) - - print(f"Chunk count: {len(order)}") - for parent_idx in order: - children = child_chunk_coords[indices == parent_idx] - current_app.test_q.enqueue( - _create_layer, - job_timeout="59m", - args=(imanager.get_serialized_info(), layer_id, children), - ) - print(f"Queued jobs: {len(current_app.test_q)}") + return imanager @redis_job(REDIS_URL, INGEST_CHANNEL) -def _create_layer(im_info, layer_id, child_chunk_coords): +def create_parent_chunk(im_info, layer, child_chunk_coords): imanager = ingestionmanager.IngestionManager(**im_info) - imanager.cg.add_layer(layer_id, child_chunk_coords, n_threads=2) + return add_layer(imanager.cg, layer, child_chunk_coords) -def create_atomic_chunks(imanager): - """ Creates all atomic chunks""" +def enqueue_atomic_tasks( + imanager, batch_size: int = 50000, interval: float = 300.0, result_ttl: int = 500 +): + # cleanup any old tasks + current_app.test_q.empty() chunk_coords = list(imanager.chunk_coord_gen) np.random.shuffle(chunk_coords) + # test chunks + # chunk_coords = [ + # [0, 0, 0], + # [0, 0, 1], + # [0, 1, 0], + # [0, 1, 1], + # [1, 0, 0], + # [1, 0, 1], + # [1, 1, 0], + # [1, 1, 1], + # ] + print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: + if len(current_app.test_q) > batch_size: + print("Number of queued jobs greater than batch size, sleeping ...") + time.sleep(interval) + job_id = f"{2}_{'_'.join(map(str, chunk_coord))}" current_app.test_q.enqueue( _create_atomic_chunk, - job_timeout="59m", + job_id=job_id, + job_timeout="10m", + result_ttl=result_ttl, args=(imanager.get_serialized_info(), chunk_coord), ) - print(f"Queued jobs: {len(current_app.test_q)}") @redis_job(REDIS_URL, INGEST_CHANNEL) def _create_atomic_chunk(im_info, chunk_coord): - """ helper for create_atomic_chunks """ + """ helper for enqueue_atomic_tasks """ imanager = ingestionmanager.IngestionManager(**im_info) return create_atomic_chunk(imanager, chunk_coord) @@ -149,14 +135,23 @@ def _create_atomic_chunk(im_info, chunk_coord): def create_atomic_chunk(imanager, coord): """ Creates single atomic chunk""" coord = np.array(list(coord), dtype=np.int) - chunk_edges_all, mapping = _get_chunk_data(imanager, coord) chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) + chunk_id_str = f"{2}_{'_'.join(map(str, coord))}" + if not imanager.build_graph: + imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_id_str, "") + return chunk_id_str add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) - # to track workers completion, layer = 2 - return np.concatenate([[2], coord]) + + # n_supervoxels = len(isolated_ids) + # n_edges = 0 + # for edge_type in EDGE_TYPES: + # edges = chunk_edges_all[edge_type] + # n_edges += len(edges) + # n_supervoxels += len(np.unique(edges.get_pairs().ravel())) + return chunk_id_str def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: @@ -167,12 +162,12 @@ def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: chunk_edges = ( _read_raw_edge_data(imanager, coord) if imanager.use_raw_edge_data - else get_chunk_edges(imanager.cg.cv_edges_path, [coord]) + else get_chunk_edges(imanager.edges_dir, [coord]) ) mapping = ( _read_raw_agglomeration_data(imanager, coord) if imanager.use_raw_agglomeration_data - else get_chunk_agglomeration(imanager.agglomeration_dir, coord) + else get_chunk_components(imanager.components_dir, coord) ) return chunk_edges, mapping @@ -200,8 +195,8 @@ def _read_raw_edge_data(imanager, coord) -> Dict: ) no_edges = no_edges and not sv_ids1.size if no_edges: - return None - put_chunk_edges(imanager.cg.cv_edges_path, coord, chunk_edges, ZSTD_LEVEL) + return chunk_edges + put_chunk_edges(imanager.edges_dir, coord, chunk_edges, ZSTD_LEVEL) return chunk_edges @@ -224,7 +219,6 @@ def _get_active_edges(imanager, coord, edges_d, mapping): def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): """ Computes chunk coordinates that compute data between the named chunks - :param imanager: IngestionManagaer :param chunk_coord_a: np.ndarray array of three ints @@ -232,9 +226,7 @@ def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): array of three ints :return: np.ndarray """ - diff = chunk_coord_a - chunk_coord_b - dir_dim = np.where(diff != 0)[0] assert len(dir_dim) == 1 dir_dim = dir_dim[0] @@ -248,16 +240,14 @@ def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): continue c_chunk_coord = chunk_coord_l + np.array([dx, dy, dz]) - if imanager.is_out_of_bounce(c_chunk_coord): + if imanager.is_out_of_bounds(c_chunk_coord): continue c_chunk_coords.append(c_chunk_coord) - return c_chunk_coords def _collect_edge_data(imanager, chunk_coord): """ Loads edge for single chunk - :param imanager: IngestionManager :param chunk_coord: np.ndarray array of three ints @@ -266,44 +256,31 @@ def _collect_edge_data(imanager, chunk_coord): :return: dict of np.ndarrays """ subfolder = "chunked_rg" - base_path = f"{imanager.storage_path}/{subfolder}/" - chunk_coord = np.array(chunk_coord) - - chunk_id = imanager.cg.get_chunk_id( - layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] - ) + x, y, z = chunk_coord + chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) filenames = collections.defaultdict(list) swap = collections.defaultdict(list) - for x in [chunk_coord[0] - 1, chunk_coord[0]]: - for y in [chunk_coord[1] - 1, chunk_coord[1]]: - for z in [chunk_coord[2] - 1, chunk_coord[2]]: - - if imanager.is_out_of_bounce(np.array([x, y, z])): - continue - - # EDGES WITHIN CHUNKS - filename = f"in_chunk_0_{x}_{y}_{z}_{chunk_id}.data" - filenames["in"].append(filename) + x, y, z = chunk_coord + for _x, _y, _z in itertools.product([x - 1, x], [y - 1, y], [z - 1, z]): + if imanager.is_out_of_bounds(np.array([_x, _y, _z])): + continue + # EDGES WITHIN CHUNKS + filename = f"in_chunk_0_{_x}_{_y}_{_z}_{chunk_id}.data" + filenames["in"].append(filename) for d in [-1, 1]: for dim in range(3): diff = np.zeros([3], dtype=np.int) diff[dim] = d - adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = imanager.cg.get_chunk_id( - layer=1, - x=adjacent_chunk_coord[0], - y=adjacent_chunk_coord[1], - z=adjacent_chunk_coord[2], - ) + x, y, z = adjacent_chunk_coord + adjacent_chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) - if imanager.is_out_of_bounce(adjacent_chunk_coord): + if imanager.is_out_of_bounds(adjacent_chunk_coord): continue - c_chunk_coords = _get_cont_chunk_coords( imanager, chunk_coord, adjacent_chunk_coord ) @@ -314,36 +291,24 @@ def _collect_edge_data(imanager, chunk_coord): for c_chunk_coord in c_chunk_coords: x, y, z = c_chunk_coord - # EDGES BETWEEN CHUNKS filename = f"between_chunks_0_{x}_{y}_{z}_{chunk_id_string}.data" filenames["between"].append(filename) - swap[filename] = larger_id == chunk_id # EDGES FROM CUTS OF SVS filename = f"fake_0_{x}_{y}_{z}_{chunk_id_string}.data" filenames["cross"].append(filename) - swap[filename] = larger_id == chunk_id edge_data = {} read_counter = collections.Counter() - for k in filenames: - # print(k, len(filenames[k])) - with cloudvolume.Storage(base_path, n_threads=10) as stor: files = stor.get_files(filenames[k]) - data = [] for file in files: - if file["content"] is None: - # print(f"{file['filename']} not created or empty") - continue - - if file["error"] is not None: - # print(f"error reading {file['filename']}") + if file["error"] or file["content"] is None: continue if swap[file["filename"]]: @@ -356,9 +321,7 @@ def _collect_edge_data(imanager, chunk_coord): content = np.frombuffer(file["content"], dtype=imanager.edge_dtype) data.append(content) - read_counter[k] += 1 - try: edge_data[k] = rfn.stack_arrays(data, usemask=False) except: @@ -369,7 +332,6 @@ def _collect_edge_data(imanager, chunk_coord): edge_data_df.groupby(["sv1", "sv2"]).aggregate(np.sum).reset_index() ) edge_data[k] = edge_data_dfg.to_records() - return edge_data @@ -379,32 +341,22 @@ def _read_agg_files(filenames, base_path): edge_list = [] for file in files: - if file["content"] is None: - continue - - if file["error"] is not None: + if file["error"] or file["content"] is None: continue - content = zstd.ZstdDecompressor().decompressobj().decompress(file["content"]) edge_list.append(np.frombuffer(content, dtype=basetypes.NODE_ID).reshape(-1, 2)) - return edge_list -def _read_raw_agglomeration_data(imanager, chunk_coord): - """ Collects agglomeration information & builds connected component mapping - - :param imanager: IngestionManager - :param chunk_coord: np.ndarray - array of three ints - :return: dictionary +def _read_raw_agglomeration_data(imanager, chunk_coord: np.ndarray): + """ + Collects agglomeration information & builds connected component mapping """ subfolder = "remap" base_path = f"{imanager.storage_path}/{subfolder}/" chunk_coord = np.array(chunk_coord) - chunk_id = imanager.cg.get_chunk_id( - layer=1, x=chunk_coord[0], y=chunk_coord[1], z=chunk_coord[2] - ) + x, y, z = chunk_coord + chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) filenames = [] for mip_level in range(0, int(imanager.n_layers - 1)): @@ -416,12 +368,8 @@ def _read_raw_agglomeration_data(imanager, chunk_coord): diff = np.zeros([3], dtype=np.int) diff[dim] = d adjacent_chunk_coord = chunk_coord + diff - adjacent_chunk_id = imanager.cg.get_chunk_id( - layer=1, - x=adjacent_chunk_coord[0], - y=adjacent_chunk_coord[1], - z=adjacent_chunk_coord[2], - ) + x, y, z = adjacent_chunk_coord + adjacent_chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) for mip_level in range(0, int(imanager.n_layers - 1)): x, y, z = np.array(adjacent_chunk_coord / 2 ** mip_level, dtype=np.int) @@ -429,18 +377,17 @@ def _read_raw_agglomeration_data(imanager, chunk_coord): f"done_{mip_level}_{x}_{y}_{z}_{adjacent_chunk_id}.data.zst" ) - edge_list = _read_agg_files(filenames, base_path) - edges = np.concatenate(edge_list) + edges_list = _read_agg_files(filenames, base_path) G = nx.Graph() - G.add_edges_from(edges) - ccs = nx.connected_components(G) + G.add_edges_from(np.concatenate(edges_list)) mapping = {} - for i_cc, cc in enumerate(ccs): + components = list(nx.connected_components(G)) + for i_cc, cc in enumerate(components): cc = list(cc) mapping.update(dict(zip(cc, [i_cc] * len(cc)))) if mapping: - put_chunk_agglomeration(imanager.agglomeration_dir, mapping, chunk_coord) + put_chunk_components(imanager.components_dir, components, chunk_coord) return mapping @@ -450,15 +397,7 @@ def _define_active_edges(edge_dict, mapping): bool arrays; True: connected (within same segment) isolated node ids """ - - def _mapping_default(key): - if key in mapping: - return mapping[key] - else: - return -1 - - mapping_vec = np.vectorize(_mapping_default) - + mapping_vec = np.vectorize(lambda k: mapping.get(k, -1)) active = {} isolated = [[]] for k in edge_dict: @@ -470,17 +409,13 @@ def _mapping_default(key): continue agg_id_2 = mapping_vec(edge_dict[k].node_ids2) - active[k] = agg_id_1 == agg_id_2 - # Set those with two -1 to False agg_1_m = agg_id_1 == -1 agg_2_m = agg_id_2 == -1 active[k][agg_1_m] = False isolated.append(edge_dict[k].node_ids1[agg_1_m]) - if k == "in": isolated.append(edge_dict[k].node_ids2[agg_2_m]) - return active, np.unique(np.concatenate(isolated).astype(basetypes.NODE_ID)) diff --git a/pychunkedgraph/io/agglomeration.py b/pychunkedgraph/io/agglomeration.py deleted file mode 100644 index fbfdc7c8b..000000000 --- a/pychunkedgraph/io/agglomeration.py +++ /dev/null @@ -1,49 +0,0 @@ -import json -from typing import Dict - -import numpy as np -from cloudvolume.storage import SimpleStorage - -from .protobuf.chunkMapping_pb2 import ChunkMappingMsg -from ..backend.utils import basetypes - - -def serialize(mapping: Dict) -> ChunkMappingMsg: - supervoxels = np.array(list(mapping.keys()), dtype=basetypes.NODE_ID) - components = np.array(list(mapping.values()), dtype=int) - mapping_message = ChunkMappingMsg() - mapping_message.supervoxels = supervoxels.tobytes() - mapping_message.components = components.tobytes() - return mapping_message - - -def deserialize(mapping_message: ChunkMappingMsg) -> Dict: - supervoxels = np.frombuffer(mapping_message.supervoxels, basetypes.NODE_ID) - components = np.frombuffer(mapping_message.components, basetypes.NODE_ID) - return dict(zip(supervoxels, components)) - - -def put_chunk_agglomeration(agglomeration_dir, mapping, chunk_coord) -> None: - # filename format - mapping_x_y_z.serliazation - mapping_message = serialize(mapping) - file_name = f"mapping_{'_'.join(str(coord) for coord in chunk_coord)}.proto" - with SimpleStorage(agglomeration_dir) as storage: - storage.put_file( - file_path=file_name, - content=mapping_message.SerializeToString(), - compress="gzip", - cache_control="no-cache", - ) - - -def get_chunk_agglomeration(agglomeration_dir, chunk_coord) -> Dict: - # filename format - mapping_x_y_z.serliazation - file_name = f"mapping_{'_'.join(str(coord) for coord in chunk_coord)}.proto" - with SimpleStorage(agglomeration_dir) as storage: - content = storage.get_file(file_name) - if not content: - return {} - mapping_message = ChunkMappingMsg() - mapping_message.ParseFromString() - return deserialize(mapping_message) - diff --git a/pychunkedgraph/io/components.py b/pychunkedgraph/io/components.py new file mode 100644 index 000000000..fb7f585f5 --- /dev/null +++ b/pychunkedgraph/io/components.py @@ -0,0 +1,60 @@ +import json +from typing import Dict, Iterable + +import numpy as np +from cloudvolume.storage import SimpleStorage + +from .protobuf.chunkComponents_pb2 import ChunkComponentsMsg +from ..backend.utils import basetypes + + +def serialize(connected_components: Iterable) -> ChunkComponentsMsg: + components = [] + for component in list(connected_components): + component = np.array(list(component), dtype=basetypes.NODE_ID) + components.append(np.array([len(component)], dtype=basetypes.NODE_ID)) + components.append(component) + components_message = ChunkComponentsMsg() + components_message.components[:] = np.concatenate(components) + return components_message + + +def deserialize(components_message: ChunkComponentsMsg) -> Dict: + mapping = {} + components = np.array(components_message.components, basetypes.NODE_ID) + idx = 0 + n_components = 0 + while idx < components.size: + component_size = int(components[idx]) + start = idx + 1 + component = components[start : start + component_size] + mapping.update(dict(zip(component, [n_components] * component_size))) + idx += component_size + 1 + n_components += 1 + return mapping + + +def put_chunk_components(components_dir, components, chunk_coord) -> None: + # filename format - components_x_y_z.serliazation + components_message = serialize(components) + file_name = f"components_{'_'.join(str(coord) for coord in chunk_coord)}.proto" + with SimpleStorage(components_dir) as storage: + storage.put_file( + file_path=file_name, + content=components_message.SerializeToString(), + compress="gzip", + cache_control="no-cache", + ) + + +def get_chunk_components(components_dir, chunk_coord) -> Dict: + # filename format - components_x_y_z.serliazation + file_name = f"components_{'_'.join(str(coord) for coord in chunk_coord)}.proto" + with SimpleStorage(components_dir) as storage: + content = storage.get_file(file_name) + if not content: + return {} + components_message = ChunkComponentsMsg() + components_message.ParseFromString(content) + return deserialize(components_message) + diff --git a/pychunkedgraph/io/edges.py b/pychunkedgraph/io/edges.py index 7e1af9d42..5bdc011c5 100644 --- a/pychunkedgraph/io/edges.py +++ b/pychunkedgraph/io/edges.py @@ -66,7 +66,6 @@ def get_chunk_edges( :param cv_threads: cloudvolume storage client thread count :type int: :return: dictionary {"edge_type": Edges} - :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] """ fnames = [] for chunk_coords in chunks_coordinates: @@ -81,7 +80,6 @@ def get_chunk_edges( ) chunk_edge_dicts = [] - with storage: files = storage.get_files(fnames) for _file in files: diff --git a/pychunkedgraph/io/protobuf/chunkComponents.proto b/pychunkedgraph/io/protobuf/chunkComponents.proto new file mode 100644 index 000000000..f415c3970 --- /dev/null +++ b/pychunkedgraph/io/protobuf/chunkComponents.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package components; + +message ChunkComponentsMsg { + repeated uint64 components = 1; +} \ No newline at end of file diff --git a/pychunkedgraph/io/protobuf/chunkComponents_pb2.py b/pychunkedgraph/io/protobuf/chunkComponents_pb2.py new file mode 100644 index 000000000..eb94988a7 --- /dev/null +++ b/pychunkedgraph/io/protobuf/chunkComponents_pb2.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: chunkComponents.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='chunkComponents.proto', + package='components', + syntax='proto3', + serialized_options=None, + serialized_pb=_b('\n\x15\x63hunkComponents.proto\x12\ncomponents\"(\n\x12\x43hunkComponentsMsg\x12\x12\n\ncomponents\x18\x01 \x03(\x04\x62\x06proto3') +) + + + + +_CHUNKCOMPONENTSMSG = _descriptor.Descriptor( + name='ChunkComponentsMsg', + full_name='components.ChunkComponentsMsg', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='components', full_name='components.ChunkComponentsMsg.components', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=37, + serialized_end=77, +) + +DESCRIPTOR.message_types_by_name['ChunkComponentsMsg'] = _CHUNKCOMPONENTSMSG +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ChunkComponentsMsg = _reflection.GeneratedProtocolMessageType('ChunkComponentsMsg', (_message.Message,), { + 'DESCRIPTOR' : _CHUNKCOMPONENTSMSG, + '__module__' : 'chunkComponents_pb2' + # @@protoc_insertion_point(class_scope:components.ChunkComponentsMsg) + }) +_sym_db.RegisterMessage(ChunkComponentsMsg) + + +# @@protoc_insertion_point(module_scope) diff --git a/pychunkedgraph/io/protobuf/chunkMapping.proto b/pychunkedgraph/io/protobuf/chunkMapping.proto deleted file mode 100644 index 38c0e3f15..000000000 --- a/pychunkedgraph/io/protobuf/chunkMapping.proto +++ /dev/null @@ -1,8 +0,0 @@ -syntax = "proto3"; - -package mapping; - -message ChunkMappingMsg { - bytes supervoxels = 1; - bytes components = 2; -} \ No newline at end of file diff --git a/pychunkedgraph/io/storage.md b/pychunkedgraph/io/storage.md new file mode 100644 index 000000000..ac27334f9 --- /dev/null +++ b/pychunkedgraph/io/storage.md @@ -0,0 +1,66 @@ +## Serialization + +PyChunkedgraph uses protobuf for serialization and zstandard for compression. + +Edges are stored using the following protobuf definition. +You can also find it [here](https://github.com/seung-lab/PyChunkedGraph/blob/akhilesh-jobs-layer-dependency/pychunkedgraph/io/protobuf/chunkEdges.proto) + +``` +syntax = "proto3"; + +package edges; + +message EdgesMsg { + bytes node_ids1 = 1; + bytes node_ids2 = 2; + bytes affinities = 3; + bytes areas = 4; +} + +message ChunkEdgesMsg { + EdgesMsg in_chunk = 1; + EdgesMsg cross_chunk = 2; + EdgesMsg between_chunk = 3; +} +``` + +This format is a result of multiple performance tests. +It provided the best tradeoff between deserialzation speed and storage size. + +To read and write edges in this format, the functions `get_chunk_edges` and `put_chunk_edges` +in the module `pychunkedgraph.io.edges` may be used. + +[CloudVolume](https://github.com/seung-lab/cloud-volume) is used for uploading and downloading this data. + +### Example usage + +``` +from pychunkedgraph.backend.definitions.edges import Edges + +in_chunk = [[1,2],[2,3],[0,2],[2,4]] +between_chunk = [[1,5]] +cross_chunk = [[3,4]] + +in_chunk_edges = Edges(in_chunk[:,0], in_chunk[:,1]) +between_chunk_edges = Edges(between_chunk[:,0], between_chunk[:,1]) +cross_chunk_edges = Edges(cross_chunk[:,0], cross_chunk[:,1]) + +edges_path = "" +chunk_coordinates = np.array([0,0,0]) + +edges_d = { + "in": in_chunk_edges, + "between": between_chunk_edges, + "cross": cross_chunk_edges +} + +put_chunk_edges(edges_path, chunk_coordinates, edges_d, compression_level=22) +# file will be located at /edges_0_0_0.proto.zst + +# reading the file will simply return the previous dictionary +edges_d = get_chunk_edges(edges_path, [chunk_coordinates]) + +# notice the difference between chunk_coordinates parameter +# put_chunk_edges takes in coordinates for a single chunk +# get_chunk_edges takes in a list of chunk coordinates +``` diff --git a/pychunkedgraph/utils/redis.py b/pychunkedgraph/utils/redis.py index 79aa7daea..a71de577a 100644 --- a/pychunkedgraph/utils/redis.py +++ b/pychunkedgraph/utils/redis.py @@ -4,14 +4,27 @@ import os import functools +from collections import namedtuple import redis +from rq import Queue +# REDIS_SERVICE_HOST and REDIS_SERVICE_PORT are added by Kubernetes REDIS_HOST = os.environ.get("REDIS_SERVICE_HOST", "localhost") REDIS_PORT = os.environ.get("REDIS_SERVICE_PORT", "6379") REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" +keys_fields = ("INGESTION_MANAGER", "PARENTS_HASH", "ATOMIC_HASH_FINISHED") +keys_defaults = ("pcg:imanager", "rq:enqueued:parents", "rq:finished:atomic") +Keys = namedtuple("keys", keys_fields, defaults=keys_defaults) + +keys = Keys() + + +def get_redis_connection(redis_url=REDIS_URL): + return redis.Redis.from_url(redis_url) + def redis_job(redis_url, redis_channel): """ @@ -21,7 +34,7 @@ def redis_job(redis_url, redis_channel): """ def redis_job_decorator(func): - r = redis.Redis.from_url(redis_url) + r = get_redis_connection() @functools.wraps(func) def wrapper(*args, **kwargs): @@ -33,3 +46,8 @@ def wrapper(*args, **kwargs): return wrapper return redis_job_decorator + + +def get_rq_queue(queue): + connection = redis.Redis.from_url(REDIS_URL) + return Queue(queue, connection=connection) diff --git a/requirements.txt b/requirements.txt index a4df79be8..732d0126b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ zstandard redis rq middle-auth-client>=0.1.6 -dracopy==0.0.11 +dracopy zmesh fastremap pyopenssl \ No newline at end of file diff --git a/rq_workers/ingest_worker.py b/rq_workers/ingest_worker.py index dfe265e34..a90a8605a 100644 --- a/rq_workers/ingest_worker.py +++ b/rq_workers/ingest_worker.py @@ -3,13 +3,11 @@ # For the flask app use a config class # env REDIS_SERVICE_HOST and REDIS_SERVICE_PORT are added by Kubernetes -REDIS_HOST = os.environ.get('REDIS_SERVICE_HOST') -REDIS_PORT = os.environ.get('REDIS_SERVICE_PORT') -REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD') -REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0' + +from pychunkedgraph.utils.redis import REDIS_URL # Queues to listen on -QUEUES = ['default', 'ingest-chunks'] +QUEUES = ['default', 'ingest'] # If you're using Sentry to collect your runtime exceptions, you can use this # to configure RQ for it in a single step diff --git a/rq_workers/test_worker.py b/rq_workers/test_worker.py index 256957b04..b86f4ca1b 100644 --- a/rq_workers/test_worker.py +++ b/rq_workers/test_worker.py @@ -2,11 +2,7 @@ # This is for monitoring rq with supervisord # For the flask app use a config class -# env REDIS_SERVICE_HOST and REDIS_SERVICE_PORT are added by Kubernetes -REDIS_HOST = os.environ.get('REDIS_SERVICE_HOST') -REDIS_PORT = os.environ.get('REDIS_SERVICE_PORT') -REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD') -REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0' +from pychunkedgraph.utils.redis import REDIS_URL # Queues to listen on QUEUES = ['test'] From 2d9805069e29e9e36def7bc128ec1dfa15006c6f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 2 Oct 2019 14:12:10 -0400 Subject: [PATCH 0242/1097] akhilesh-ingestion-manager-refactor (#168) * TODO * wip: metainfo classes * wip: metainfo classes * wip: metainfo classes * wip: metainfo classes * wip: metainfo classes * wip: metainfo classes * wip: metainfo classes * wip: metainfo classes * cleanup redis key --- pychunkedgraph/backend/chunkedgraph.py | 4 +- pychunkedgraph/backend/definitions/config.py | 22 +- pychunkedgraph/ingest/__init__.py | 9 + pychunkedgraph/ingest/cli.py | 82 +++---- pychunkedgraph/ingest/ingestion_utils.py | 78 ++++--- pychunkedgraph/ingest/ingestionmanager.py | 225 +++++++++---------- pychunkedgraph/ingest/ran_ingestion_v2.py | 64 +----- pychunkedgraph/utils/redis.py | 2 +- 8 files changed, 213 insertions(+), 273 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 50e1e457e..946e13ebc 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -83,8 +83,8 @@ class ChunkedGraph(object): def __init__( self, table_id: str, - instance_id: str = "pychunkedgraph", - project_id: str = "neuromancer-seung-import", + project_id: str, + instance_id: str, chunk_size: Tuple[np.uint64, np.uint64, np.uint64] = None, fan_out: Optional[np.uint64] = None, use_skip_connections: Optional[bool] = True, diff --git a/pychunkedgraph/backend/definitions/config.py b/pychunkedgraph/backend/definitions/config.py index ac27583f4..c41570971 100644 --- a/pychunkedgraph/backend/definitions/config.py +++ b/pychunkedgraph/backend/definitions/config.py @@ -1,32 +1,30 @@ from collections import namedtuple -datasource_fields = ( +_datasource_fields = ( "agglomeration", "watershed", "edges", "components", "use_raw_edges", "use_raw_components", - "size", -) -DataSource = namedtuple( - "DataSource", datasource_fields, defaults=(None,) * len(datasource_fields) + "data_version", ) +_datasource_defaults = (None, None, None, None, True, True, 2) +DataSource = namedtuple("DataSource", _datasource_fields, defaults=_datasource_defaults) -graphconfig_fields = ( +_graphconfig_fields = ( "graph_id", "chunk_size", "fanout", - "build_graph", "s_bits_atomic_layer", ) +_graphconfig_defaults = (None, None, 2, True, 8) GraphConfig = namedtuple( - "GraphConfig", graphconfig_fields, defaults=(None, None, 2, True, 8) + "GraphConfig", _graphconfig_fields, defaults=_graphconfig_defaults ) -bigtableconfig_fields = ("project_id", "instance_id") +_bigtableconfig_fields = ("project_id", "instance_id") +_bigtableconfig_defaults = ("neuromancer-seung-import", "pychunkedgraph") BigTableConfig = namedtuple( - "BigTableConfig", - bigtableconfig_fields, - defaults=(None,) * len(bigtableconfig_fields), + "BigTableConfig", _bigtableconfig_fields, defaults=_bigtableconfig_defaults ) diff --git a/pychunkedgraph/ingest/__init__.py b/pychunkedgraph/ingest/__init__.py index e69de29bb..9e7f42be9 100644 --- a/pychunkedgraph/ingest/__init__.py +++ b/pychunkedgraph/ingest/__init__.py @@ -0,0 +1,9 @@ +from collections import namedtuple + +from ..utils.redis import REDIS_URL + +_ingestconfig_fields = ("build_graph", "flush_redis_db", "task_q_name", "redis_url") +_ingestconfig_defaults = (True, False, "test", REDIS_URL) +IngestConfig = namedtuple( + "IngestConfig", _ingestconfig_fields, defaults=_ingestconfig_defaults +) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a2c1b4f8f..d2c5c9a22 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -12,10 +12,9 @@ import click from flask.cli import AppGroup +from . import IngestConfig from .ingestionmanager import IngestionManager from .ran_ingestion_v2 import INGEST_CHANNEL -from .ran_ingestion_v2 import INGEST_QUEUE -from .ran_ingestion_v2 import ingest_into_chunkedgraph from .ran_ingestion_v2 import enqueue_atomic_tasks from .ran_ingestion_v2 import create_parent_chunk from ..utils.redis import get_redis_connection @@ -26,8 +25,6 @@ ingest_cli = AppGroup("ingest") -redis_cnxn = get_redis_connection() - @ingest_cli.command("graph") @click.argument("graph_id", type=str) @@ -58,62 +55,49 @@ def ingest_graph( # interval=90.0 result_ttl=500, ): - # agglomeration = "gs://ranl-scratch/190410_FAFB_v02_ws_size_threshold_200/agg" - # watershed = ( - # "gs://microns-seunglab/drosophila_v0/ws_190410_FAFB_v02_ws_size_threshold_200" - # ) - # edges = "gs://akhilesh-pcg/190410_FAFB_v02/edges" - # components = "gs://akhilesh-pcg/190410_FAFB_v02/components" - - agglomeration = "gs://ranl-scratch/minnie65_0/agg" - watershed = "gs://microns-seunglab/minnie65/ws_minnie65_0" - edges = "gs://chunkedgraph/minnie65_0/edges" - components = "gs://chunkedgraph/minnie65_0/components" - - use_raw_edges = not processed - use_raw_components = not processed - - chunk_size = [256, 256, 512] - fanout = 2 - gcp_project_id = None - bigtable_instance_id = None - build_graph = False - s_bits_atomic_layer = 10 - + ingest_config = IngestConfig(build_graph=False, flush_redis_db=True) data_source = DataSource( - agglomeration, watershed, edges, components, use_raw_edges, use_raw_components + agglomeration="gs://ranl-scratch/minnie65_0/agg", + watershed="gs://microns-seunglab/minnie65/ws_minnie65_0", + edges="gs://chunkedgraph/minnie65_0/edges", + components="gs://chunkedgraph/minnie65_0/components", + use_raw_edges=not processed, + use_raw_components=not processed, + data_version=2, ) graph_config = GraphConfig( - graph_id, chunk_size, fanout, build_graph, s_bits_atomic_layer + graph_id=graph_id, + chunk_size=np.array([256, 256, 512], dtype=int), + fanout=2, + s_bits_atomic_layer=10, + ) + bigtable_config = BigTableConfig() + imanager = IngestionManager( + ingest_config, data_source, graph_config, bigtable_config ) - bigtable_config = BigTableConfig(gcp_project_id, bigtable_instance_id) - - redis_cnxn.flushdb() - imanager = ingest_into_chunkedgraph(data_source, graph_config, bigtable_config) - redis_cnxn.set(r_keys.INGESTION_MANAGER, imanager.get_serialized_info(pickled=True)) enqueue_atomic_tasks(imanager, result_ttl=result_ttl) def _get_children_coords( - imanager, layer: int, parent_coords: Sequence[int] + imanager: IngestionManager, layer: int, parent_coords: Sequence[int] ) -> List[np.ndarray]: layer_bounds = imanager.chunk_id_bounds / (2 ** (layer - 2)) layer_bounds = np.ceil(layer_bounds).astype(np.int) children_coords = [] parent_coords = np.array(parent_coords, dtype=int) - for dcoord in product(*[range(imanager.cg.fan_out)] * 3): + for dcoord in product(*[range(imanager.graph_config.fan_out)] * 3): dcoord = np.array(dcoord, dtype=int) - child_coords = parent_coords * imanager.cg.fan_out + dcoord + child_coords = parent_coords * imanager.graph_config.fan_out + dcoord check_bounds = np.less(child_coords, layer_bounds[:, 1]) if np.all(check_bounds): children_coords.append(child_coords) return children_coords -def _parse_results(imanager): - global redis_cnxn - zset_name = f"rq:finished:{INGEST_QUEUE}" - results = redis_cnxn.zrange(zset_name, 0, -1) +def _parse_results(imanager: IngestionManager): + results = imanager.redis.zrange( + f"rq:finished:{imanager.ingest_config.task_q_name}", 0, -1 + ) layer_counts_d = defaultdict(int) parent_chunks_d = defaultdict(list) # (layer, x, y, z) as keys for chunk_str in results: @@ -122,23 +106,22 @@ def _parse_results(imanager): layer_counts_d[layer] += 1 if layer == imanager.n_layers: print("All jobs completed.") + imanager.redis.delete(r_keys.INGESTION_MANAGER) sys.exit(0) layer += 1 - x, y, z = np.array([x, y, z], int) // imanager.cg.fan_out + x, y, z = np.array([x, y, z], int) // imanager.graph_config.fan_out parent_job_id = f"{layer}_{'_'.join(map(str, (x, y, z)))}" - if not redis_cnxn.hget(r_keys.PARENTS_HASH, parent_job_id) is None: + if not imanager.redis.hget(r_keys.PARENTS_HASH_ENQUEUED, parent_job_id) is None: continue parent_chunks_d[(layer, x, y, z)].append(chunk_str) return parent_chunks_d, layer_counts_d -def _enqueue_parent_tasks(): +def _enqueue_parent_tasks(imanager: IngestionManager): """ Helper to enqueue parent tasks Checks job/chunk ids in redis to determine if parent task can be enqueued """ - global redis_cnxn - imanager = IngestionManager.from_pickle(redis_cnxn.get(r_keys.INGESTION_MANAGER)) parent_chunks_d, layer_counts_d = _parse_results(imanager) count = 0 for parent_chunk in parent_chunks_d: @@ -157,7 +140,7 @@ def _enqueue_parent_tasks(): args=(imanager.get_serialized_info(), parent_chunk[0], children_coords), ) count += 1 - redis_cnxn.hset(r_keys.PARENTS_HASH, job_id, "") + imanager.redis.hset(r_keys.PARENTS_HASH_ENQUEUED, job_id, "") layers = range(2, imanager.n_layers) status = ", ".join([f"{l}:{layer_counts_d[l]}" for l in layers]) @@ -172,12 +155,15 @@ def ingest_parent_chunks(interval): This can only be used after running `ingest graph` Uses serialzed ingestion manager information stored in redis by the `ingest graph` command. + Should be on the same redis server where ingest is running. """ - if not redis_cnxn.get(r_keys.INGESTION_MANAGER): + redis = get_redis_connection() + imanager_info = redis.get(r_keys.INGESTION_MANAGER) + if not imanager_info: click.secho("Run `ingest graph` before using this command.", fg="red") sys.exit(1) while True: - _enqueue_parent_tasks() + _enqueue_parent_tasks(IngestionManager.from_pickle(imanager_info)) time.sleep(interval) diff --git a/pychunkedgraph/ingest/ingestion_utils.py b/pychunkedgraph/ingest/ingestion_utils.py index 2e468a09c..ddc3e577d 100644 --- a/pychunkedgraph/ingest/ingestion_utils.py +++ b/pychunkedgraph/ingest/ingestion_utils.py @@ -5,18 +5,28 @@ import collections -def calc_n_layers(ws_cv, chunk_size, fan_out): +def get_layer_count(ws_cv, chunk_size, fan_out): bbox = np.array(ws_cv.bounds.to_list()).reshape(2, 3) n_chunks = ((bbox[1] - bbox[0]) / chunk_size).astype(np.int) - n_layers = int( np.ceil(chunkedgraph_utils.log_n(np.max(n_chunks), fan_out))) + 2 + n_layers = int(np.ceil(chunkedgraph_utils.log_n(np.max(n_chunks), fan_out))) + 2 return n_layers -def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, - cg_mesh_dir, use_skip_connections=True, - s_bits_atomic_layer=None, - n_bits_root_counter=8, fan_out=2, instance_id=None, - project_id=None, edge_dir=None, is_new=True): +def initialize_chunkedgraph( + cg_table_id, + ws_cv_path, + chunk_size, + size, + cg_mesh_dir, + use_skip_connections=True, + s_bits_atomic_layer=None, + n_bits_root_counter=8, + fan_out=2, + instance_id=None, + project_id=None, + edge_dir=None, + is_new=True, +): """ Initalizes a chunkedgraph on BigTable :param cg_table_id: str @@ -43,34 +53,34 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, """ ws_cv = cloudvolume.CloudVolume(ws_cv_path) - n_layers_agg = calc_n_layers(ws_cv, chunk_size, fan_out=2) - if size is not None: size = np.array(size) - for i in range(len(ws_cv.info['scales'])): - original_size = ws_cv.info['scales'][i]['size'] + for i in range(len(ws_cv.info["scales"])): + original_size = ws_cv.info["scales"][i]["size"] size = np.min([size, original_size], axis=0) - ws_cv.info['scales'][i]['size'] = [int(x) for x in size] + ws_cv.info["scales"][i]["size"] = [int(x) for x in size] size[:-1] //= 2 - n_layers_cg = calc_n_layers(ws_cv, chunk_size, fan_out=fan_out) + n_layers_cg = get_layer_count(ws_cv, chunk_size, fan_out=fan_out) dataset_info = ws_cv.info dataset_info["mesh"] = cg_mesh_dir dataset_info["data_dir"] = ws_cv_path dataset_info["graph"] = {"chunk_size": [int(s) for s in chunk_size]} - kwargs = {"table_id": cg_table_id, - "chunk_size": chunk_size, - "fan_out": np.uint64(fan_out), - "n_layers": np.uint64(n_layers_cg), - "dataset_info": dataset_info, - "use_skip_connections": use_skip_connections, - "s_bits_atomic_layer": s_bits_atomic_layer, - "n_bits_root_counter": n_bits_root_counter, - "is_new": True, - "edge_dir": edge_dir} + kwargs = { + "table_id": cg_table_id, + "chunk_size": chunk_size, + "fan_out": np.uint64(fan_out), + "n_layers": np.uint64(n_layers_cg), + "dataset_info": dataset_info, + "use_skip_connections": use_skip_connections, + "s_bits_atomic_layer": s_bits_atomic_layer, + "n_bits_root_counter": n_bits_root_counter, + "is_new": True, + "edge_dir": edge_dir, + } if instance_id is not None: kwargs["instance_id"] = instance_id @@ -79,7 +89,7 @@ def initialize_chunkedgraph(cg_table_id, ws_cv_path, chunk_size, size, kwargs["project_id"] = project_id cg = chunkedgraph.ChunkedGraph(**kwargs) - return cg, n_layers_agg + return cg def postprocess_edge_data(im, edge_dict): @@ -88,13 +98,17 @@ def postprocess_edge_data(im, edge_dict): elif im.data_version in [3, 4]: new_edge_dict = {} for k in edge_dict: - areas = edge_dict[k]["area_x"] * im.cg.cv.resolution[0] + \ - edge_dict[k]["area_y"] * im.cg.cv.resolution[1] + \ - edge_dict[k]["area_z"] * im.cg.cv.resolution[2] - - affs = edge_dict[k]["aff_x"] * im.cg.cv.resolution[0] + \ - edge_dict[k]["aff_y"] * im.cg.cv.resolution[1] + \ - edge_dict[k]["aff_z"] * im.cg.cv.resolution[2] + areas = ( + edge_dict[k]["area_x"] * im.cg.cv.resolution[0] + + edge_dict[k]["area_y"] * im.cg.cv.resolution[1] + + edge_dict[k]["area_z"] * im.cg.cv.resolution[2] + ) + + affs = ( + edge_dict[k]["aff_x"] * im.cg.cv.resolution[0] + + edge_dict[k]["aff_y"] * im.cg.cv.resolution[1] + + edge_dict[k]["aff_z"] * im.cg.cv.resolution[2] + ) new_edge_dict[k] = {} new_edge_dict[k]["sv1"] = edge_dict[k]["sv1"] @@ -104,4 +118,4 @@ def postprocess_edge_data(im, edge_dict): return new_edge_dict else: - raise Exception(f"Unknown data_version: {im.data_version}") \ No newline at end of file + raise Exception(f"Unknown data_version: {im.data_version}") diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 297277d4f..fd8641f60 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -2,135 +2,93 @@ import numpy as np import pickle +from cloudvolume import CloudVolume -from ..backend.chunkedgraph_utils import compute_bitmasks -from ..backend.chunkedgraph import ChunkedGraph +from . import IngestConfig +from .ingestion_utils import get_layer_count +from ..utils.redis import keys as r_keys from ..utils.redis import get_redis_connection from ..utils.redis import get_rq_queue +from ..backend.chunkedgraph_utils import compute_bitmasks +from ..backend.chunkedgraph import ChunkedGraph +from ..backend.definitions.config import DataSource +from ..backend.definitions.config import GraphConfig +from ..backend.definitions.config import BigTableConfig class IngestionManager(object): def __init__( self, - storage_path, - cg_table_id=None, - n_layers=None, - instance_id=None, - project_id=None, - cv=None, - chunk_size=None, - data_version=2, - s_bits_atomic_layer=8, - use_raw_edge_data=True, - use_raw_agglomeration_data=True, - edges_dir=None, - components_dir=None, - task_q_name="test", - build_graph=True, + ingest_config: IngestConfig, + data_source: DataSource, + graph_config: GraphConfig, + bigtable_config: BigTableConfig, ): - self._storage_path = storage_path - self._cg_table_id = cg_table_id - self._instance_id = instance_id - self._project_id = project_id + + self._ingest_config = ingest_config + self._data_source = data_source + self._graph_config = graph_config + self._bigtable_config = bigtable_config + self._cg = None - self._n_layers = n_layers - self._s_bits_atomic_layer = s_bits_atomic_layer - self._data_version = data_version - self._cv = cv - self._chunk_size = chunk_size - self._use_raw_edge_data = use_raw_edge_data - self._use_raw_agglomeration_data = use_raw_agglomeration_data - self._edges_dir = edges_dir - self._components_dir = components_dir + self._ws_cv = CloudVolume(data_source.watershed) + self._n_layers = None self._chunk_coords = None self._layer_bounds_d = None - self._redis_connection = None - self._task_q_name = task_q_name + self._task_q = None - self._build_graph = build_graph + self._bitmasks = None self._bounds = None self._redis = None @property - def storage_path(self): - return self._storage_path + def ingest_config(self): + return self._ingest_config @property - def data_version(self): - assert self._data_version in [2, 3, 4] - return self._data_version + def data_source(self): + return self._data_source @property - def edge_dtype(self): - if self.data_version == 4: - dtype = [ - ("sv1", np.uint64), - ("sv2", np.uint64), - ("aff_x", np.float32), - ("area_x", np.uint64), - ("aff_y", np.float32), - ("area_y", np.uint64), - ("aff_z", np.float32), - ("area_z", np.uint64), - ] - elif self.data_version == 3: - dtype = [ - ("sv1", np.uint64), - ("sv2", np.uint64), - ("aff_x", np.float64), - ("area_x", np.uint64), - ("aff_y", np.float64), - ("area_y", np.uint64), - ("aff_z", np.float64), - ("area_z", np.uint64), - ] - elif self.data_version == 2: - dtype = [ - ("sv1", np.uint64), - ("sv2", np.uint64), - ("aff", np.float32), - ("area", np.uint64), - ] - else: - raise Exception() + def graph_config(self): + return self._graph_config - return dtype + @property + def bigtable_config(self): + return self._bigtable_config @property def cg(self): if self._cg is None: - kwargs = {} - - if self._instance_id is not None: - kwargs["instance_id"] = self._instance_id - - if self._project_id is not None: - kwargs["project_id"] = self._project_id - - self._cg = ChunkedGraph(table_id=self._cg_table_id, **kwargs) - + self._cg = ChunkedGraph( + self._graph_config.graph_id, + self._bigtable_config.project_id, + self._bigtable_config.instance_id, + ) return self._cg @property def bounds(self): if self._bounds: return self._bounds - cv_bounds = np.array(self._cv.bounds.to_list()).reshape(2, -1).T + cv_bounds = np.array(self._ws_cv.bounds.to_list()).reshape(2, -1).T self._bounds = cv_bounds.copy() self._bounds -= cv_bounds[:, 0:1] return self._bounds @property def chunk_id_bounds(self): - return np.ceil((self.bounds / self._chunk_size[:, None])).astype(np.int) + return np.ceil((self.bounds / self._graph_config.chunk_size[:, None])).astype( + np.int + ) @property def layer_chunk_bounds(self): if self._layer_bounds_d: return self._layer_bounds_d layer_bounds_d = {} - for layer in range(2, self.n_layers): + for layer in range(2, self.n_layers_cg): layer_bounds = self.chunk_id_bounds / (2 ** (layer - 2)) layer_bounds_d[layer] = np.ceil(layer_bounds).astype(np.int) self._layer_bounds_d = layer_bounds_d @@ -149,59 +107,82 @@ def chunk_coords(self): @property def n_layers(self): - if self._n_layers is None: - self._n_layers = self.cg.n_layers + if not self._n_layers: + self._n_layers = get_layer_count( + self._ws_cv, + self._graph_config.chunk_size, + fan_out=self._graph_config.fanout, + ) return self._n_layers @property - def use_raw_edge_data(self): - return self._use_raw_edge_data - - @property - def use_raw_agglomeration_data(self): - return self._use_raw_agglomeration_data - - @property - def edges_dir(self): - return self._edges_dir - - @property - def components_dir(self): - return self._components_dir + def n_layers_cg(self): + pass @property def task_q(self): if self._task_q: return self._task_q - self._task_q = get_rq_queue(self._task_q_name) + self._task_q = get_rq_queue(self._ingest_config.task_q_name) return self._task_q @property def redis(self): if self._redis: return self._redis - self._redis = get_redis_connection() - return self._redis + self._redis = get_redis_connection(self._ingest_config.redis_url) + if self._ingest_config.flush_redis_db: + self._redis.flushdb() + self._redis.set( + r_keys.INGESTION_MANAGER, self.get_serialized_info(pickled=True) + ) + return self._redis @property - def build_graph(self): - return self._build_graph + def edge_dtype(self): + if self._data_source.data_version == 4: + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff_x", np.float32), + ("area_x", np.uint64), + ("aff_y", np.float32), + ("area_y", np.uint64), + ("aff_z", np.float32), + ("area_z", np.uint64), + ] + elif self._data_source.data_version == 3: + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff_x", np.float64), + ("area_x", np.uint64), + ("aff_y", np.float64), + ("area_y", np.uint64), + ("aff_z", np.float64), + ("area_z", np.uint64), + ] + elif self._data_source.data_version == 2: + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff", np.float32), + ("area", np.uint64), + ] + else: + raise Exception() + return dtype + + @classmethod + def from_pickle(cls, serialized_info): + return cls(**pickle.loads(serialized_info)) def get_serialized_info(self, pickled=False): info = { - "storage_path": self.storage_path, - "cg_table_id": self._cg_table_id, - "n_layers": self.n_layers, - "instance_id": self._instance_id, - "project_id": self._project_id, - "data_version": self.data_version, - "s_bits_atomic_layer": self._s_bits_atomic_layer, - "use_raw_edge_data": self._use_raw_edge_data, - "use_raw_agglomeration_data": self._use_raw_agglomeration_data, - "edges_dir": self._edges_dir, - "components_dir": self._components_dir, - "task_q_name": self._task_q_name, - "build_graph": self._build_graph, + "ingest_config": self._ingest_config, + "data_source": self._data_source, + "graph_config": self._graph_config, + "bigtable_config": self._bigtable_config, } if pickled: return pickle.dumps(info) @@ -210,13 +191,11 @@ def get_serialized_info(self, pickled=False): def is_out_of_bounds(self, chunk_coordinate): if not self._bitmasks: self._bitmasks = compute_bitmasks( - self.n_layers, 2, s_bits_atomic_layer=self._s_bits_atomic_layer + self._n_layers, + self._graph_config.fanout, + s_bits_atomic_layer=self._graph_config.s_bits_atomic_layer, ) return np.any(chunk_coordinate < 0) or np.any( chunk_coordinate > 2 ** self._bitmasks[1] ) - @classmethod - def from_pickle(cls, serialized_info): - return cls(**pickle.loads(serialized_info)) - diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 39d354c5d..298e69ed6 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -38,50 +38,6 @@ ZSTD_LEVEL = 17 INGEST_CHANNEL = "ingest" -INGEST_QUEUE = "test" - - -def ingest_into_chunkedgraph( - data_source: DataSource, graph_config: GraphConfig, bigtable_config: BigTableConfig -): - storage_path = data_source.agglomeration.strip("/") - ws_cv_path = data_source.watershed.strip("/") - chunk_size = np.array(graph_config.chunk_size) - # cg_mesh_dir = f"{graph_config.graph_id}_meshes" - - # _, n_layers_agg = iu.initialize_chunkedgraph( - # cg_table_id=graph_config.graph_id, - # ws_cv_path=ws_cv_path, - # chunk_size=chunk_size, - # size=data_source.size, - # use_skip_connections=True, - # s_bits_atomic_layer=10, - # cg_mesh_dir=cg_mesh_dir, - # fan_out=graph_config.fanout, - # instance_id=bigtable_config.instance_id, - # project_id=bigtable_config.project_id, - # edge_dir=data_source.edges, - # ) - ws_cv = cloudvolume.CloudVolume(ws_cv_path) - n_layers_agg = iu.calc_n_layers(ws_cv, chunk_size, fan_out=2) - - imanager = ingestionmanager.IngestionManager( - storage_path=storage_path, - cg_table_id=graph_config.graph_id, - n_layers=n_layers_agg, - instance_id=bigtable_config.instance_id, - project_id=bigtable_config.project_id, - data_version=2, - s_bits_atomic_layer=graph_config.s_bits_atomic_layer, - cv=ws_cv, - chunk_size=chunk_size, - edges_dir=data_source.edges, - components_dir=data_source.components, - use_raw_edge_data=data_source.use_raw_edges, - use_raw_agglomeration_data=data_source.use_raw_components, - build_graph=graph_config.build_graph, - ) - return imanager @redis_job(REDIS_URL, INGEST_CHANNEL) @@ -140,7 +96,7 @@ def create_atomic_chunk(imanager, coord): imanager, coord, chunk_edges_all, mapping ) chunk_id_str = f"{2}_{'_'.join(map(str, coord))}" - if not imanager.build_graph: + if not imanager.ingest_config.build_graph: imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_id_str, "") return chunk_id_str add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) @@ -161,13 +117,13 @@ def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: """ chunk_edges = ( _read_raw_edge_data(imanager, coord) - if imanager.use_raw_edge_data - else get_chunk_edges(imanager.edges_dir, [coord]) + if imanager.data_source.use_raw_edge_data + else get_chunk_edges(imanager.data_source.edges, [coord]) ) mapping = ( _read_raw_agglomeration_data(imanager, coord) - if imanager.use_raw_agglomeration_data - else get_chunk_components(imanager.components_dir, coord) + if imanager.data_source.use_raw_agglomeration_data + else get_chunk_components(imanager.data_source.components, coord) ) return chunk_edges, mapping @@ -196,7 +152,7 @@ def _read_raw_edge_data(imanager, coord) -> Dict: no_edges = no_edges and not sv_ids1.size if no_edges: return chunk_edges - put_chunk_edges(imanager.edges_dir, coord, chunk_edges, ZSTD_LEVEL) + put_chunk_edges(imanager.data_source.edges, coord, chunk_edges, ZSTD_LEVEL) return chunk_edges @@ -256,7 +212,7 @@ def _collect_edge_data(imanager, chunk_coord): :return: dict of np.ndarrays """ subfolder = "chunked_rg" - base_path = f"{imanager.storage_path}/{subfolder}/" + base_path = f"{imanager.data_source.agglomeration}/{subfolder}/" chunk_coord = np.array(chunk_coord) x, y, z = chunk_coord chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) @@ -267,7 +223,6 @@ def _collect_edge_data(imanager, chunk_coord): for _x, _y, _z in itertools.product([x - 1, x], [y - 1, y], [z - 1, z]): if imanager.is_out_of_bounds(np.array([_x, _y, _z])): continue - # EDGES WITHIN CHUNKS filename = f"in_chunk_0_{_x}_{_y}_{_z}_{chunk_id}.data" filenames["in"].append(filename) @@ -291,7 +246,6 @@ def _collect_edge_data(imanager, chunk_coord): for c_chunk_coord in c_chunk_coords: x, y, z = c_chunk_coord - # EDGES BETWEEN CHUNKS filename = f"between_chunks_0_{x}_{y}_{z}_{chunk_id_string}.data" filenames["between"].append(filename) swap[filename] = larger_id == chunk_id @@ -353,7 +307,7 @@ def _read_raw_agglomeration_data(imanager, chunk_coord: np.ndarray): Collects agglomeration information & builds connected component mapping """ subfolder = "remap" - base_path = f"{imanager.storage_path}/{subfolder}/" + base_path = f"{imanager.data_source.agglomeration}/{subfolder}/" chunk_coord = np.array(chunk_coord) x, y, z = chunk_coord chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) @@ -387,7 +341,7 @@ def _read_raw_agglomeration_data(imanager, chunk_coord: np.ndarray): mapping.update(dict(zip(cc, [i_cc] * len(cc)))) if mapping: - put_chunk_components(imanager.components_dir, components, chunk_coord) + put_chunk_components(imanager.data_source.components, components, chunk_coord) return mapping diff --git a/pychunkedgraph/utils/redis.py b/pychunkedgraph/utils/redis.py index a71de577a..5672817e2 100644 --- a/pychunkedgraph/utils/redis.py +++ b/pychunkedgraph/utils/redis.py @@ -15,7 +15,7 @@ REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" -keys_fields = ("INGESTION_MANAGER", "PARENTS_HASH", "ATOMIC_HASH_FINISHED") +keys_fields = ("INGESTION_MANAGER", "PARENTS_HASH_ENQUEUED", "ATOMIC_HASH_FINISHED") keys_defaults = ("pcg:imanager", "rq:enqueued:parents", "rq:finished:atomic") Keys = namedtuple("keys", keys_fields, defaults=keys_defaults) From dae26dd5dfc2753094b05e25d9be2e63ecfa73b8 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 2 Oct 2019 19:17:26 +0000 Subject: [PATCH 0243/1097] fix: graph config defaults --- pychunkedgraph/backend/definitions/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pychunkedgraph/backend/definitions/config.py b/pychunkedgraph/backend/definitions/config.py index c41570971..f5e67db41 100644 --- a/pychunkedgraph/backend/definitions/config.py +++ b/pychunkedgraph/backend/definitions/config.py @@ -18,7 +18,7 @@ "fanout", "s_bits_atomic_layer", ) -_graphconfig_defaults = (None, None, 2, True, 8) +_graphconfig_defaults = (None, None, 2, 8) GraphConfig = namedtuple( "GraphConfig", _graphconfig_fields, defaults=_graphconfig_defaults ) From 3ee055369fd390982ea29dee65460634a8072f37 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 2 Oct 2019 19:39:40 +0000 Subject: [PATCH 0244/1097] revert default params --- pychunkedgraph/backend/chunkedgraph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 946e13ebc..160dd7758 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -83,8 +83,8 @@ class ChunkedGraph(object): def __init__( self, table_id: str, - project_id: str, - instance_id: str, + project_id: str = "neuromancer-seung-import", + instance_id: str = "pychunkedgraph", chunk_size: Tuple[np.uint64, np.uint64, np.uint64] = None, fan_out: Optional[np.uint64] = None, use_skip_connections: Optional[bool] = True, From 21de557f256845751cabfe3b7108acca6d1432dc Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 14:27:33 +0000 Subject: [PATCH 0245/1097] remove unused decorator --- pychunkedgraph/ingest/ran_ingestion_v2.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 298e69ed6..c2b547484 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -19,8 +19,6 @@ from . import ingestionmanager, ingestion_utils as iu from .initialization.atomic_layer import add_atomic_edges from .initialization.abstract_layers import add_layer -from ..utils.redis import redis_job -from ..utils.redis import REDIS_URL from ..utils.redis import keys as r_keys from ..io.edges import get_chunk_edges from ..io.edges import put_chunk_edges @@ -36,11 +34,6 @@ from ..backend.definitions.config import BigTableConfig -ZSTD_LEVEL = 17 -INGEST_CHANNEL = "ingest" - - -@redis_job(REDIS_URL, INGEST_CHANNEL) def create_parent_chunk(im_info, layer, child_chunk_coords): imanager = ingestionmanager.IngestionManager(**im_info) return add_layer(imanager.cg, layer, child_chunk_coords) @@ -81,7 +74,6 @@ def enqueue_atomic_tasks( ) -@redis_job(REDIS_URL, INGEST_CHANNEL) def _create_atomic_chunk(im_info, chunk_coord): """ helper for enqueue_atomic_tasks """ imanager = ingestionmanager.IngestionManager(**im_info) @@ -152,7 +144,7 @@ def _read_raw_edge_data(imanager, coord) -> Dict: no_edges = no_edges and not sv_ids1.size if no_edges: return chunk_edges - put_chunk_edges(imanager.data_source.edges, coord, chunk_edges, ZSTD_LEVEL) + put_chunk_edges(imanager.data_source.edges, coord, chunk_edges, 17) return chunk_edges From 72722f4ea1af446777d54739aac38b66fae6d3bb Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 14:29:48 +0000 Subject: [PATCH 0246/1097] rearrange imports --- pychunkedgraph/ingest/ran_ingestion_v2.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index c2b547484..1bf12effa 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -16,7 +16,8 @@ import zstandard as zstd from flask import current_app -from . import ingestionmanager, ingestion_utils as iu +from .ingestion_utils import postprocess_edge_data +from .ingestionmanager import IngestionManager from .initialization.atomic_layer import add_atomic_edges from .initialization.abstract_layers import add_layer from ..utils.redis import keys as r_keys @@ -35,7 +36,7 @@ def create_parent_chunk(im_info, layer, child_chunk_coords): - imanager = ingestionmanager.IngestionManager(**im_info) + imanager = IngestionManager(**im_info) return add_layer(imanager.cg, layer, child_chunk_coords) @@ -76,7 +77,7 @@ def enqueue_atomic_tasks( def _create_atomic_chunk(im_info, chunk_coord): """ helper for enqueue_atomic_tasks """ - imanager = ingestionmanager.IngestionManager(**im_info) + imanager = IngestionManager(**im_info) return create_atomic_chunk(imanager, chunk_coord) @@ -122,7 +123,7 @@ def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: def _read_raw_edge_data(imanager, coord) -> Dict: edge_dict = _collect_edge_data(imanager, coord) - edge_dict = iu.postprocess_edge_data(imanager, edge_dict) + edge_dict = postprocess_edge_data(imanager, edge_dict) # flag to check if chunk has edges # avoid writing to cloud storage if there are no edges From a86c613d69ee436defe564d098962c8f8ef71d15 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 14:35:45 +0000 Subject: [PATCH 0247/1097] remove flushdb from IngestionManager --- pychunkedgraph/ingest/__init__.py | 4 ++-- pychunkedgraph/ingest/cli.py | 1 + pychunkedgraph/ingest/ingestionmanager.py | 2 -- pychunkedgraph/ingest/ran_ingestion_v2.py | 6 ------ 4 files changed, 3 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/ingest/__init__.py b/pychunkedgraph/ingest/__init__.py index 9e7f42be9..d75ddface 100644 --- a/pychunkedgraph/ingest/__init__.py +++ b/pychunkedgraph/ingest/__init__.py @@ -2,8 +2,8 @@ from ..utils.redis import REDIS_URL -_ingestconfig_fields = ("build_graph", "flush_redis_db", "task_q_name", "redis_url") -_ingestconfig_defaults = (True, False, "test", REDIS_URL) +_ingestconfig_fields = ("build_graph", "task_q_name", "redis_url") +_ingestconfig_defaults = (True, "test", REDIS_URL) IngestConfig = namedtuple( "IngestConfig", _ingestconfig_fields, defaults=_ingestconfig_defaults ) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index d2c5c9a22..d6e468f8b 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -75,6 +75,7 @@ def ingest_graph( imanager = IngestionManager( ingest_config, data_source, graph_config, bigtable_config ) + imanager.redis.flushdb() enqueue_atomic_tasks(imanager, result_ttl=result_ttl) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index fd8641f60..e0368cea6 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -131,8 +131,6 @@ def redis(self): if self._redis: return self._redis self._redis = get_redis_connection(self._ingest_config.redis_url) - if self._ingest_config.flush_redis_db: - self._redis.flushdb() self._redis.set( r_keys.INGESTION_MANAGER, self.get_serialized_info(pickled=True) ) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 1bf12effa..7c14451b8 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -26,13 +26,9 @@ from ..io.components import get_chunk_components from ..io.components import put_chunk_components from ..backend.utils import basetypes -from ..backend.chunkedgraph_utils import compute_bitmasks from ..backend.chunkedgraph_utils import compute_chunk_id from ..backend.definitions.edges import Edges, CX_CHUNK from ..backend.definitions.edges import TYPES as EDGE_TYPES -from ..backend.definitions.config import DataSource -from ..backend.definitions.config import GraphConfig -from ..backend.definitions.config import BigTableConfig def create_parent_chunk(im_info, layer, child_chunk_coords): @@ -43,8 +39,6 @@ def create_parent_chunk(im_info, layer, child_chunk_coords): def enqueue_atomic_tasks( imanager, batch_size: int = 50000, interval: float = 300.0, result_ttl: int = 500 ): - # cleanup any old tasks - current_app.test_q.empty() chunk_coords = list(imanager.chunk_coord_gen) np.random.shuffle(chunk_coords) From 4952cdf4a4ce3c0ca627b0ddbd39f6a4894a4f69 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 14:38:24 +0000 Subject: [PATCH 0248/1097] fix: use task queue in ingestion manager instead of flask --- pychunkedgraph/ingest/ran_ingestion_v2.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 7c14451b8..31bea6565 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -14,7 +14,6 @@ import numpy as np import numpy.lib.recfunctions as rfn import zstandard as zstd -from flask import current_app from .ingestion_utils import postprocess_edge_data from .ingestionmanager import IngestionManager @@ -56,11 +55,11 @@ def enqueue_atomic_tasks( print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: - if len(current_app.test_q) > batch_size: + if len(imanager.task_q) > batch_size: print("Number of queued jobs greater than batch size, sleeping ...") time.sleep(interval) job_id = f"{2}_{'_'.join(map(str, chunk_coord))}" - current_app.test_q.enqueue( + imanager.task_q.enqueue( _create_atomic_chunk, job_id=job_id, job_timeout="10m", From 4efdd3304acdab29714e7e9aa4c2a567d1cefece Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 14:59:08 +0000 Subject: [PATCH 0249/1097] reuse dictionary from ingestion manager --- pychunkedgraph/ingest/cli.py | 3 +-- pychunkedgraph/ingest/ingestionmanager.py | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index d6e468f8b..a0ef343ef 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -82,8 +82,7 @@ def ingest_graph( def _get_children_coords( imanager: IngestionManager, layer: int, parent_coords: Sequence[int] ) -> List[np.ndarray]: - layer_bounds = imanager.chunk_id_bounds / (2 ** (layer - 2)) - layer_bounds = np.ceil(layer_bounds).astype(np.int) + layer_bounds = imanager.layer_chunk_bounds[layer] children_coords = [] parent_coords = np.array(parent_coords, dtype=int) for dcoord in product(*[range(imanager.graph_config.fan_out)] * 3): diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index e0368cea6..429b53295 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -1,6 +1,7 @@ import itertools import numpy as np import pickle +from typing import Dict from cloudvolume import CloudVolume @@ -84,7 +85,7 @@ def chunk_id_bounds(self): ) @property - def layer_chunk_bounds(self): + def layer_chunk_bounds(self) -> Dict: if self._layer_bounds_d: return self._layer_bounds_d layer_bounds_d = {} From 6952b0f5d18bd4b547cb04e3c28d8df7be886f54 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 18:54:29 +0000 Subject: [PATCH 0250/1097] wip: use hashes to track dependents completion --- pychunkedgraph/ingest/cli.py | 7 +- pychunkedgraph/ingest/ingestionmanager.py | 4 - pychunkedgraph/ingest/ran_ingestion_v2.py | 107 ++++++++++++++++------ pychunkedgraph/utils/redis.py | 4 +- 4 files changed, 84 insertions(+), 38 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a0ef343ef..a65475fb7 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -14,7 +14,6 @@ from . import IngestConfig from .ingestionmanager import IngestionManager -from .ran_ingestion_v2 import INGEST_CHANNEL from .ran_ingestion_v2 import enqueue_atomic_tasks from .ran_ingestion_v2 import create_parent_chunk from ..utils.redis import get_redis_connection @@ -39,7 +38,6 @@ # @click.option("--gcp-project-id", required=False, type=str) # @click.option("--bigtable-instance-id", required=False, type=str) # @click.option("--interval", required=False, type=float) -@click.option("--result-ttl", required=False, type=int) def ingest_graph( graph_id, # agglomeration, @@ -53,7 +51,6 @@ def ingest_graph( # gcp_project_id=None, # bigtable_instance_id=None, # interval=90.0 - result_ttl=500, ): ingest_config = IngestConfig(build_graph=False, flush_redis_db=True) data_source = DataSource( @@ -76,7 +73,7 @@ def ingest_graph( ingest_config, data_source, graph_config, bigtable_config ) imanager.redis.flushdb() - enqueue_atomic_tasks(imanager, result_ttl=result_ttl) + enqueue_atomic_tasks(imanager) def _get_children_coords( @@ -136,7 +133,7 @@ def _enqueue_parent_tasks(imanager: IngestionManager): create_parent_chunk, job_id=job_id, job_timeout="10m", - result_ttl=86400, + result_ttl=0, args=(imanager.get_serialized_info(), parent_chunk[0], children_coords), ) count += 1 diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 429b53295..5e57698b0 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -116,10 +116,6 @@ def n_layers(self): ) return self._n_layers - @property - def n_layers_cg(self): - pass - @property def task_q(self): if self._task_q: diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 31bea6565..4790237e8 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -3,10 +3,13 @@ """ import time -import collections -import itertools import json -from typing import Dict, Tuple +from collections import defaultdict +from collections import Counter +from itertools import product +from typing import Dict +from typing import Tuple +from typing import Sequence import pandas as pd import cloudvolume @@ -30,40 +33,89 @@ from ..backend.definitions.edges import TYPES as EDGE_TYPES +chunk_id_str = lambda layer, coords: f"{layer}_{'_'.join(map(str, coords))}" + + +def _get_children_coords( + imanager: IngestionManager, layer: int, parent_coords: Sequence[int] +) -> np.ndarray: + layer_bounds = imanager.layer_chunk_bounds[layer] + children_coords = [] + parent_coords = np.array(parent_coords, dtype=int) + for dcoord in product(*[range(imanager.graph_config.fan_out)] * 3): + dcoord = np.array(dcoord, dtype=int) + child_coords = parent_coords * imanager.graph_config.fan_out + dcoord + check_bounds = np.less(child_coords, layer_bounds[:, 1]) + if np.all(check_bounds): + children_coords.append(child_coords) + return children_coords + + +def post_task_completion(imanager: IngestionManager, layer: int, coords: np.ndarray): + """ + get parent + add parent to layer hash + add children count to parent hash + decrement children count by 1 + if count is 0 + enqueue parent + delete parent hash + increment complete hash by 1 + """ + x, y, z = np.array(coords, int) // imanager.graph_config.fan_out + parent_layer = layer + 1 + parent_chunk_str = "_".join(map(str, coords)) + + if not imanager.redis.hget(parent_layer, parent_chunk_str): + children_count = len(_get_children_coords(imanager, layer, (x, y, z))) + imanager.redis.hset(parent_layer, parent_chunk_str, children_count) + imanager.redis.hincrby(parent_layer, parent_chunk_str, -1) + children_left = imanager.redis.hget(parent_layer, parent_chunk_str) + + if children_left == 0 and parent_layer <= imanager.n_layers: + children_coords = _get_children_coords(imanager, layer, (x, y, z)) + imanager.task_q.enqueue( + create_parent_chunk, + job_id=chunk_id_str(parent_layer, (x, y, z)), + job_timeout="59m", + result_ttl=0, + args=(imanager.get_serialized_info(), parent_layer, children_coords), + ) + imanager.redis.hincrby(r_keys.STATS_HASH, "completed", 1) + + def create_parent_chunk(im_info, layer, child_chunk_coords): imanager = IngestionManager(**im_info) return add_layer(imanager.cg, layer, child_chunk_coords) -def enqueue_atomic_tasks( - imanager, batch_size: int = 50000, interval: float = 300.0, result_ttl: int = 500 -): +def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 300.0): chunk_coords = list(imanager.chunk_coord_gen) np.random.shuffle(chunk_coords) # test chunks - # chunk_coords = [ - # [0, 0, 0], - # [0, 0, 1], - # [0, 1, 0], - # [0, 1, 1], - # [1, 0, 0], - # [1, 0, 1], - # [1, 1, 0], - # [1, 1, 1], - # ] + chunk_coords = [ + [0, 0, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 1], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + ] print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: if len(imanager.task_q) > batch_size: print("Number of queued jobs greater than batch size, sleeping ...") time.sleep(interval) - job_id = f"{2}_{'_'.join(map(str, chunk_coord))}" + job_id = chunk_id_str(2, chunk_coord) imanager.task_q.enqueue( _create_atomic_chunk, job_id=job_id, - job_timeout="10m", - result_ttl=result_ttl, + job_timeout="59m", + result_ttl=0, args=(imanager.get_serialized_info(), chunk_coord), ) @@ -81,11 +133,12 @@ def create_atomic_chunk(imanager, coord): chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) - chunk_id_str = f"{2}_{'_'.join(map(str, coord))}" + chunk_str = chunk_id_str(2, coord) if not imanager.ingest_config.build_graph: - imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_id_str, "") - return chunk_id_str + imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_str, "") + return chunk_str add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) + post_task_completion(imanager, 2, coord) # n_supervoxels = len(isolated_ids) # n_edges = 0 @@ -175,7 +228,7 @@ def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): chunk_coord_l = chunk_coord_a if diff[dir_dim] > 0 else chunk_coord_b c_chunk_coords = [] - for dx, dy, dz in itertools.product([0, -1], [0, -1], [0, -1]): + for dx, dy, dz in product([0, -1], [0, -1], [0, -1]): if dz == dy == dx == 0: continue if [dx, dy, dz][dir_dim] == 0: @@ -203,10 +256,10 @@ def _collect_edge_data(imanager, chunk_coord): x, y, z = chunk_coord chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) - filenames = collections.defaultdict(list) - swap = collections.defaultdict(list) + filenames = defaultdict(list) + swap = defaultdict(list) x, y, z = chunk_coord - for _x, _y, _z in itertools.product([x - 1, x], [y - 1, y], [z - 1, z]): + for _x, _y, _z in product([x - 1, x], [y - 1, y], [z - 1, z]): if imanager.is_out_of_bounds(np.array([_x, _y, _z])): continue filename = f"in_chunk_0_{_x}_{_y}_{_z}_{chunk_id}.data" @@ -242,7 +295,7 @@ def _collect_edge_data(imanager, chunk_coord): swap[filename] = larger_id == chunk_id edge_data = {} - read_counter = collections.Counter() + read_counter = Counter() for k in filenames: with cloudvolume.Storage(base_path, n_threads=10) as stor: files = stor.get_files(filenames[k]) diff --git a/pychunkedgraph/utils/redis.py b/pychunkedgraph/utils/redis.py index 5672817e2..690338502 100644 --- a/pychunkedgraph/utils/redis.py +++ b/pychunkedgraph/utils/redis.py @@ -15,8 +15,8 @@ REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", "dev") REDIS_URL = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0" -keys_fields = ("INGESTION_MANAGER", "PARENTS_HASH_ENQUEUED", "ATOMIC_HASH_FINISHED") -keys_defaults = ("pcg:imanager", "rq:enqueued:parents", "rq:finished:atomic") +keys_fields = ("INGESTION_MANAGER", "PARENTS_HASH_ENQUEUED", "ATOMIC_HASH_FINISHED", "STATS_HASH") +keys_defaults = ("pcg:imanager", "rq:enqueued:parents", "rq:finished:atomic", "rq:stats") Keys = namedtuple("keys", keys_fields, defaults=keys_defaults) keys = Keys() From cb197235740b6b319cb03446d5affa84c42fbb06 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 19:52:10 +0000 Subject: [PATCH 0251/1097] bigtable initialization --- pychunkedgraph/ingest/cli.py | 95 ++----------------- pychunkedgraph/ingest/ingestion_utils.py | 5 +- .../ingest/initialization/abstract_layers.py | 6 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 53 +++++------ 4 files changed, 36 insertions(+), 123 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a65475fb7..a19f43208 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -13,9 +13,9 @@ from flask.cli import AppGroup from . import IngestConfig +from .ingestion_utils import initialize_chunkedgraph from .ingestionmanager import IngestionManager from .ran_ingestion_v2 import enqueue_atomic_tasks -from .ran_ingestion_v2 import create_parent_chunk from ..utils.redis import get_redis_connection from ..utils.redis import keys as r_keys from ..backend.definitions.config import DataSource @@ -73,95 +73,16 @@ def ingest_graph( ingest_config, data_source, graph_config, bigtable_config ) imanager.redis.flushdb() - enqueue_atomic_tasks(imanager) - - -def _get_children_coords( - imanager: IngestionManager, layer: int, parent_coords: Sequence[int] -) -> List[np.ndarray]: - layer_bounds = imanager.layer_chunk_bounds[layer] - children_coords = [] - parent_coords = np.array(parent_coords, dtype=int) - for dcoord in product(*[range(imanager.graph_config.fan_out)] * 3): - dcoord = np.array(dcoord, dtype=int) - child_coords = parent_coords * imanager.graph_config.fan_out + dcoord - check_bounds = np.less(child_coords, layer_bounds[:, 1]) - if np.all(check_bounds): - children_coords.append(child_coords) - return children_coords - -def _parse_results(imanager: IngestionManager): - results = imanager.redis.zrange( - f"rq:finished:{imanager.ingest_config.task_q_name}", 0, -1 + initialize_chunkedgraph( + graph_config.graph_id, + data_source.watershed, + graph_config.chunk_size, + s_bits_atomic_layer=graph_config.s_bits_atomic_layer, + edge_dir=data_source.edges, ) - layer_counts_d = defaultdict(int) - parent_chunks_d = defaultdict(list) # (layer, x, y, z) as keys - for chunk_str in results: - chunk_str = chunk_str.decode("utf-8") - layer, x, y, z = map(int, chunk_str.split("_")) - layer_counts_d[layer] += 1 - if layer == imanager.n_layers: - print("All jobs completed.") - imanager.redis.delete(r_keys.INGESTION_MANAGER) - sys.exit(0) - layer += 1 - x, y, z = np.array([x, y, z], int) // imanager.graph_config.fan_out - parent_job_id = f"{layer}_{'_'.join(map(str, (x, y, z)))}" - if not imanager.redis.hget(r_keys.PARENTS_HASH_ENQUEUED, parent_job_id) is None: - continue - parent_chunks_d[(layer, x, y, z)].append(chunk_str) - return parent_chunks_d, layer_counts_d - -def _enqueue_parent_tasks(imanager: IngestionManager): - """ - Helper to enqueue parent tasks - Checks job/chunk ids in redis to determine if parent task can be enqueued - """ - parent_chunks_d, layer_counts_d = _parse_results(imanager) - count = 0 - for parent_chunk in parent_chunks_d: - children_coords = _get_children_coords( - imanager, parent_chunk[0] - 1, parent_chunk[1:] - ) - children_results = parent_chunks_d[parent_chunk] - if not len(children_coords) == len(children_results): - continue - job_id = f"{parent_chunk[0]}_{'_'.join(map(str, parent_chunk[1:]))}" - imanager.task_q.enqueue( - create_parent_chunk, - job_id=job_id, - job_timeout="10m", - result_ttl=0, - args=(imanager.get_serialized_info(), parent_chunk[0], children_coords), - ) - count += 1 - imanager.redis.hset(r_keys.PARENTS_HASH_ENQUEUED, job_id, "") - - layers = range(2, imanager.n_layers) - status = ", ".join([f"{l}:{layer_counts_d[l]}" for l in layers]) - print(f"Queued {count} parents.") - print(f"Completed chunks (layer:count)\n{status}") - - -@ingest_cli.command("parents") -@click.option("--interval", required=True, type=float) -def ingest_parent_chunks(interval): - """ - This can only be used after running `ingest graph` - Uses serialzed ingestion manager information stored in redis - by the `ingest graph` command. - Should be on the same redis server where ingest is running. - """ - redis = get_redis_connection() - imanager_info = redis.get(r_keys.INGESTION_MANAGER) - if not imanager_info: - click.secho("Run `ingest graph` before using this command.", fg="red") - sys.exit(1) - while True: - _enqueue_parent_tasks(IngestionManager.from_pickle(imanager_info)) - time.sleep(interval) + enqueue_atomic_tasks(imanager) def init_ingest_cmds(app): diff --git a/pychunkedgraph/ingest/ingestion_utils.py b/pychunkedgraph/ingest/ingestion_utils.py index ddc3e577d..016efe7dd 100644 --- a/pychunkedgraph/ingest/ingestion_utils.py +++ b/pychunkedgraph/ingest/ingestion_utils.py @@ -16,8 +16,8 @@ def initialize_chunkedgraph( cg_table_id, ws_cv_path, chunk_size, - size, - cg_mesh_dir, + cg_mesh_dir="mesh_dir", + size=None, use_skip_connections=True, s_bits_atomic_layer=None, n_bits_root_counter=8, @@ -28,7 +28,6 @@ def initialize_chunkedgraph( is_new=True, ): """ Initalizes a chunkedgraph on BigTable - :param cg_table_id: str name of chunkedgraph :param ws_cv_path: str diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index fe1450b3f..70a8f49a2 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -16,14 +16,14 @@ def add_layer( cg_instance, layer_id: int, - chunk_coords: Sequence[Sequence[int]], + parent_coords: Sequence[int], + children_coords: Sequence[Sequence[int]], *, time_stamp: Optional[datetime.datetime] = None, n_threads: int = 20, ) -> None: - x, y, z = np.min(chunk_coords, axis=0) // cg_instance.fan_out parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) - cross_edge_dict, child_ids = _process_chunks(cg_instance, layer_id, chunk_coords) + cross_edge_dict, child_ids = _process_chunks(cg_instance, layer_id, children_coords) edge_ids = _resolve_cross_chunk_edges_thread(layer_id, child_ids, cross_edge_dict) # Extract connected components diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 4790237e8..a5fe6883c 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -51,7 +51,7 @@ def _get_children_coords( return children_coords -def post_task_completion(imanager: IngestionManager, layer: int, coords: np.ndarray): +def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.ndarray): """ get parent add parent to layer hash @@ -62,31 +62,38 @@ def post_task_completion(imanager: IngestionManager, layer: int, coords: np.ndar delete parent hash increment complete hash by 1 """ - x, y, z = np.array(coords, int) // imanager.graph_config.fan_out parent_layer = layer + 1 - parent_chunk_str = "_".join(map(str, coords)) + if parent_layer > imanager.n_layers: + return + parent_coords = np.array(coords, int) // imanager.graph_config.fan_out + parent_chunk_str = "_".join(map(str, parent_coords)) if not imanager.redis.hget(parent_layer, parent_chunk_str): - children_count = len(_get_children_coords(imanager, layer, (x, y, z))) + children_count = len(_get_children_coords(imanager, layer, parent_coords)) imanager.redis.hset(parent_layer, parent_chunk_str, children_count) imanager.redis.hincrby(parent_layer, parent_chunk_str, -1) children_left = imanager.redis.hget(parent_layer, parent_chunk_str) - if children_left == 0 and parent_layer <= imanager.n_layers: - children_coords = _get_children_coords(imanager, layer, (x, y, z)) + if children_left == 0: imanager.task_q.enqueue( - create_parent_chunk, - job_id=chunk_id_str(parent_layer, (x, y, z)), + _create_parent_chunk, + job_id=chunk_id_str(parent_layer, parent_coords), job_timeout="59m", result_ttl=0, - args=(imanager.get_serialized_info(), parent_layer, children_coords), + args=( + imanager.get_serialized_info(), + parent_layer, + parent_coords, + _get_children_coords(imanager, layer, parent_coords), + ), ) imanager.redis.hincrby(r_keys.STATS_HASH, "completed", 1) -def create_parent_chunk(im_info, layer, child_chunk_coords): +def _create_parent_chunk(im_info, layer, parent_coords, child_chunk_coords): imanager = IngestionManager(**im_info) - return add_layer(imanager.cg, layer, child_chunk_coords) + add_layer(imanager.cg, layer, parent_coords, child_chunk_coords) + _post_task_completion(imanager, 2, parent_coords) def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 300.0): @@ -120,33 +127,19 @@ def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 30 ) -def _create_atomic_chunk(im_info, chunk_coord): - """ helper for enqueue_atomic_tasks """ - imanager = IngestionManager(**im_info) - return create_atomic_chunk(imanager, chunk_coord) - - -def create_atomic_chunk(imanager, coord): +def _create_atomic_chunk(im_info, coord): """ Creates single atomic chunk""" + imanager = IngestionManager(**im_info) coord = np.array(list(coord), dtype=np.int) chunk_edges_all, mapping = _get_chunk_data(imanager, coord) chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) - chunk_str = chunk_id_str(2, coord) if not imanager.ingest_config.build_graph: - imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_str, "") - return chunk_str + imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_id_str(2, coord), "") + return add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) - post_task_completion(imanager, 2, coord) - - # n_supervoxels = len(isolated_ids) - # n_edges = 0 - # for edge_type in EDGE_TYPES: - # edges = chunk_edges_all[edge_type] - # n_edges += len(edges) - # n_supervoxels += len(np.unique(edges.get_pairs().ravel())) - return chunk_id_str + _post_task_completion(imanager, 2, coord) def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: From 7b7daf5b3169f93fffb9fd808ea249e619c00268 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 20:19:00 +0000 Subject: [PATCH 0252/1097] fixes --- .devcontainer/devcontainer.json | 2 +- pychunkedgraph/ingest/cli.py | 17 +++++++++-------- pychunkedgraph/ingest/ran_ingestion_v2.py | 13 ++++++++----- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index e7bf70fc7..ae04de5cd 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -35,7 +35,7 @@ // "settings": { "workbench.colorTheme": "Quiet Light" }, // Uncomment the next line to run commands after the container is created - for example installing git. - "postCreateCommand": "redis-server --requirepass dev --appendonly no", + "postCreateCommand": "rm -rf /app && redis-server --requirepass dev --appendonly no", // Add the IDs of any extensions you want installed in the array below. "extensions": [] diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a19f43208..cf93791c0 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -52,7 +52,7 @@ def ingest_graph( # bigtable_instance_id=None, # interval=90.0 ): - ingest_config = IngestConfig(build_graph=False, flush_redis_db=True) + ingest_config = IngestConfig(build_graph=True) data_source = DataSource( agglomeration="gs://ranl-scratch/minnie65_0/agg", watershed="gs://microns-seunglab/minnie65/ws_minnie65_0", @@ -74,13 +74,14 @@ def ingest_graph( ) imanager.redis.flushdb() - initialize_chunkedgraph( - graph_config.graph_id, - data_source.watershed, - graph_config.chunk_size, - s_bits_atomic_layer=graph_config.s_bits_atomic_layer, - edge_dir=data_source.edges, - ) + if ingest_config.build_graph: + initialize_chunkedgraph( + graph_config.graph_id, + data_source.watershed, + graph_config.chunk_size, + s_bits_atomic_layer=graph_config.s_bits_atomic_layer, + edge_dir=data_source.edges, + ) enqueue_atomic_tasks(imanager) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index a5fe6883c..2b3aff283 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -42,9 +42,9 @@ def _get_children_coords( layer_bounds = imanager.layer_chunk_bounds[layer] children_coords = [] parent_coords = np.array(parent_coords, dtype=int) - for dcoord in product(*[range(imanager.graph_config.fan_out)] * 3): + for dcoord in product(*[range(imanager.graph_config.fanout)] * 3): dcoord = np.array(dcoord, dtype=int) - child_coords = parent_coords * imanager.graph_config.fan_out + dcoord + child_coords = parent_coords * imanager.graph_config.fanout + dcoord check_bounds = np.less(child_coords, layer_bounds[:, 1]) if np.all(check_bounds): children_coords.append(child_coords) @@ -63,10 +63,11 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda increment complete hash by 1 """ parent_layer = layer + 1 + print(parent_layer, imanager.n_layers) if parent_layer > imanager.n_layers: return - parent_coords = np.array(coords, int) // imanager.graph_config.fan_out + parent_coords = np.array(coords, int) // imanager.graph_config.fanout parent_chunk_str = "_".join(map(str, parent_coords)) if not imanager.redis.hget(parent_layer, parent_chunk_str): children_count = len(_get_children_coords(imanager, layer, parent_coords)) @@ -129,12 +130,14 @@ def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 30 def _create_atomic_chunk(im_info, coord): """ Creates single atomic chunk""" + print("HI"*50) imanager = IngestionManager(**im_info) coord = np.array(list(coord), dtype=np.int) chunk_edges_all, mapping = _get_chunk_data(imanager, coord) chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) + print(imanager.ingest_config.build_graph) if not imanager.ingest_config.build_graph: imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_id_str(2, coord), "") return @@ -149,12 +152,12 @@ def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: """ chunk_edges = ( _read_raw_edge_data(imanager, coord) - if imanager.data_source.use_raw_edge_data + if imanager.data_source.use_raw_edges else get_chunk_edges(imanager.data_source.edges, [coord]) ) mapping = ( _read_raw_agglomeration_data(imanager, coord) - if imanager.data_source.use_raw_agglomeration_data + if imanager.data_source.use_raw_components else get_chunk_components(imanager.data_source.components, coord) ) return chunk_edges, mapping From 66bdda96b676c9a0b0f174b8590f583f43ece370 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 20:38:46 +0000 Subject: [PATCH 0253/1097] delete parent key from hash when enqueued --- pychunkedgraph/ingest/ingestionmanager.py | 4 ++-- pychunkedgraph/ingest/initialization/abstract_layers.py | 1 + pychunkedgraph/ingest/ran_ingestion_v2.py | 9 +++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 5e57698b0..21a76b042 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -71,7 +71,7 @@ def cg(self): @property def bounds(self): - if self._bounds: + if not self._bounds is None: return self._bounds cv_bounds = np.array(self._ws_cv.bounds.to_list()).reshape(2, -1).T self._bounds = cv_bounds.copy() @@ -89,7 +89,7 @@ def layer_chunk_bounds(self) -> Dict: if self._layer_bounds_d: return self._layer_bounds_d layer_bounds_d = {} - for layer in range(2, self.n_layers_cg): + for layer in range(2, self.n_layers): layer_bounds = self.chunk_id_bounds / (2 ** (layer - 2)) layer_bounds_d[layer] = np.ceil(layer_bounds).astype(np.int) self._layer_bounds_d = layer_bounds_d diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 70a8f49a2..a679ef5dd 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -22,6 +22,7 @@ def add_layer( time_stamp: Optional[datetime.datetime] = None, n_threads: int = 20, ) -> None: + x, y, z = parent_coords parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) cross_edge_dict, child_ids = _process_chunks(cg_instance, layer_id, children_coords) edge_ids = _resolve_cross_chunk_edges_thread(layer_id, child_ids, cross_edge_dict) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 2b3aff283..6818218e8 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -63,7 +63,6 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda increment complete hash by 1 """ parent_layer = layer + 1 - print(parent_layer, imanager.n_layers) if parent_layer > imanager.n_layers: return @@ -73,8 +72,11 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda children_count = len(_get_children_coords(imanager, layer, parent_coords)) imanager.redis.hset(parent_layer, parent_chunk_str, children_count) imanager.redis.hincrby(parent_layer, parent_chunk_str, -1) - children_left = imanager.redis.hget(parent_layer, parent_chunk_str) + children_left = int( + imanager.redis.hget(parent_layer, parent_chunk_str).decode("utf-8") + ) + print(children_left) if children_left == 0: imanager.task_q.enqueue( _create_parent_chunk, @@ -88,6 +90,7 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda _get_children_coords(imanager, layer, parent_coords), ), ) + imanager.redis.hdel(parent_layer, parent_chunk_str) imanager.redis.hincrby(r_keys.STATS_HASH, "completed", 1) @@ -130,14 +133,12 @@ def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 30 def _create_atomic_chunk(im_info, coord): """ Creates single atomic chunk""" - print("HI"*50) imanager = IngestionManager(**im_info) coord = np.array(list(coord), dtype=np.int) chunk_edges_all, mapping = _get_chunk_data(imanager, coord) chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) - print(imanager.ingest_config.build_graph) if not imanager.ingest_config.build_graph: imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_id_str(2, coord), "") return From 1c36e6496e237bc78454f0a916a8bb2b3fc99371 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 3 Oct 2019 20:57:30 +0000 Subject: [PATCH 0254/1097] queue parents at front to avoid large queue size --- pychunkedgraph/ingest/ran_ingestion_v2.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 6818218e8..7d1e42369 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -76,13 +76,13 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda imanager.redis.hget(parent_layer, parent_chunk_str).decode("utf-8") ) - print(children_left) if children_left == 0: imanager.task_q.enqueue( _create_parent_chunk, job_id=chunk_id_str(parent_layer, parent_coords), job_timeout="59m", result_ttl=0, + at_front=True, args=( imanager.get_serialized_info(), parent_layer, @@ -105,16 +105,16 @@ def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 30 np.random.shuffle(chunk_coords) # test chunks - chunk_coords = [ - [0, 0, 0], - [0, 0, 1], - [0, 1, 0], - [0, 1, 1], - [1, 0, 0], - [1, 0, 1], - [1, 1, 0], - [1, 1, 1], - ] + # chunk_coords = [ + # [0, 0, 0], + # [0, 0, 1], + # [0, 1, 0], + # [0, 1, 1], + # [1, 0, 0], + # [1, 0, 1], + # [1, 1, 0], + # [1, 1, 1], + # ] print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: From 9367e476ddec541b7ac5038a667bf3efebdd3a1e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 4 Oct 2019 01:03:19 +0000 Subject: [PATCH 0255/1097] use separate queues for atomic and parent chunks --- pychunkedgraph/ingest/__init__.py | 13 +++++++++++-- pychunkedgraph/ingest/ingestionmanager.py | 10 ++++------ pychunkedgraph/ingest/ran_ingestion_v2.py | 23 +++++++++++++++-------- rq_workers/test_worker.py | 2 +- 4 files changed, 31 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/ingest/__init__.py b/pychunkedgraph/ingest/__init__.py index d75ddface..0e577c96b 100644 --- a/pychunkedgraph/ingest/__init__.py +++ b/pychunkedgraph/ingest/__init__.py @@ -2,8 +2,17 @@ from ..utils.redis import REDIS_URL -_ingestconfig_fields = ("build_graph", "task_q_name", "redis_url") -_ingestconfig_defaults = (True, "test", REDIS_URL) +_ingestconfig_fields = ( + "build_graph", + "redis_url", + "atomic_q_name", + "atomic_q_limit", + "atomic_q_interval", + "parents_q_name", + "parents_q_limit", + "parents_q_interval", +) +_ingestconfig_defaults = (True, REDIS_URL, "atomic", 50000, 60, "parents", 25000, 120) IngestConfig = namedtuple( "IngestConfig", _ingestconfig_fields, defaults=_ingestconfig_defaults ) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 21a76b042..09ed9fc8a 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -2,6 +2,7 @@ import numpy as np import pickle from typing import Dict +from collections import defaultdict from cloudvolume import CloudVolume @@ -37,7 +38,7 @@ def __init__( self._chunk_coords = None self._layer_bounds_d = None - self._task_q = None + self._task_queues = defaultdict(get_rq_queue) self._bitmasks = None self._bounds = None @@ -117,11 +118,8 @@ def n_layers(self): return self._n_layers @property - def task_q(self): - if self._task_q: - return self._task_q - self._task_q = get_rq_queue(self._ingest_config.task_q_name) - return self._task_q + def task_queues(self) -> Dict: + return self._task_queues @property def redis(self): diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 7d1e42369..a9354d983 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -77,7 +77,11 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda ) if children_left == 0: - imanager.task_q.enqueue( + parents_queue = imanager.task_queues[imanager.ingest_config.parents_q_name] + while len(parents_queue) > imanager.ingest_config.parents_q_limit: + print(f"Sleeping {imanager.ingest_config.parents_q_interval}s...") + time.sleep(imanager.ingest_config.parents_q_interval) + parents_queue.enqueue( _create_parent_chunk, job_id=chunk_id_str(parent_layer, parent_coords), job_timeout="59m", @@ -100,7 +104,9 @@ def _create_parent_chunk(im_info, layer, parent_coords, child_chunk_coords): _post_task_completion(imanager, 2, parent_coords) -def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 300.0): +def enqueue_atomic_tasks( + imanager: IngestionManager, batch_size: int = 50000, interval: float = 300.0 +): chunk_coords = list(imanager.chunk_coord_gen) np.random.shuffle(chunk_coords) @@ -118,13 +124,14 @@ def enqueue_atomic_tasks(imanager, batch_size: int = 50000, interval: float = 30 print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: - if len(imanager.task_q) > batch_size: - print("Number of queued jobs greater than batch size, sleeping ...") - time.sleep(interval) - job_id = chunk_id_str(2, chunk_coord) - imanager.task_q.enqueue( + atomic_queue = imanager.task_queues[imanager.ingest_config.atomic_q_name] + # for optimal use of redis memory wait if queue limit is reached + if len(atomic_queue) > imanager.ingest_config.atomic_q_limit: + print(f"Sleeping {imanager.ingest_config.atomic_q_interval}s...") + time.sleep(imanager.ingest_config.atomic_q_interval) + atomic_queue.enqueue( _create_atomic_chunk, - job_id=job_id, + job_id=chunk_id_str(2, chunk_coord), job_timeout="59m", result_ttl=0, args=(imanager.get_serialized_info(), chunk_coord), diff --git a/rq_workers/test_worker.py b/rq_workers/test_worker.py index b86f4ca1b..608666804 100644 --- a/rq_workers/test_worker.py +++ b/rq_workers/test_worker.py @@ -5,7 +5,7 @@ from pychunkedgraph.utils.redis import REDIS_URL # Queues to listen on -QUEUES = ['test'] +QUEUES = ["atomic", "parents"] # If you're using Sentry to collect your runtime exceptions, you can use this # to configure RQ for it in a single step From 443cd49f94699b75194f784c063f6542445d47d0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 4 Oct 2019 01:06:53 +0000 Subject: [PATCH 0256/1097] track completed count for each layer --- pychunkedgraph/ingest/ran_ingestion_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index a9354d983..1ece8ecc3 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -95,7 +95,7 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda ), ) imanager.redis.hdel(parent_layer, parent_chunk_str) - imanager.redis.hincrby(r_keys.STATS_HASH, "completed", 1) + imanager.redis.hincrby("completed", parent_layer, 1) def _create_parent_chunk(im_info, layer, parent_coords, child_chunk_coords): From 56e99e99f128bac4dc8182b9734cd4403668806f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 4 Oct 2019 14:27:37 +0000 Subject: [PATCH 0257/1097] use different workers and queues for atomic and parents --- pychunkedgraph/ingest/ingestionmanager.py | 12 +++++----- pychunkedgraph/ingest/ran_ingestion_v2.py | 18 +++++++-------- .../{test_worker.py => atomic_worker.py} | 2 +- rq_workers/mesh_worker.py | 23 ------------------- .../{ingest_worker.py => parent_worker.py} | 4 +--- 5 files changed, 17 insertions(+), 42 deletions(-) rename rq_workers/{test_worker.py => atomic_worker.py} (94%) delete mode 100644 rq_workers/mesh_worker.py rename rq_workers/{ingest_worker.py => parent_worker.py} (82%) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 09ed9fc8a..7fe9d93b7 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -21,13 +21,13 @@ class IngestionManager(object): def __init__( self, - ingest_config: IngestConfig, + config: IngestConfig, data_source: DataSource, graph_config: GraphConfig, bigtable_config: BigTableConfig, ): - self._ingest_config = ingest_config + self._config = config self._data_source = data_source self._graph_config = graph_config self._bigtable_config = bigtable_config @@ -45,8 +45,8 @@ def __init__( self._redis = None @property - def ingest_config(self): - return self._ingest_config + def config(self): + return self._config @property def data_source(self): @@ -125,7 +125,7 @@ def task_queues(self) -> Dict: def redis(self): if self._redis: return self._redis - self._redis = get_redis_connection(self._ingest_config.redis_url) + self._redis = get_redis_connection(self._config.redis_url) self._redis.set( r_keys.INGESTION_MANAGER, self.get_serialized_info(pickled=True) ) @@ -172,7 +172,7 @@ def from_pickle(cls, serialized_info): def get_serialized_info(self, pickled=False): info = { - "ingest_config": self._ingest_config, + "config": self._config, "data_source": self._data_source, "graph_config": self._graph_config, "bigtable_config": self._bigtable_config, diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 1ece8ecc3..89e7f995a 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -77,10 +77,10 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda ) if children_left == 0: - parents_queue = imanager.task_queues[imanager.ingest_config.parents_q_name] - while len(parents_queue) > imanager.ingest_config.parents_q_limit: - print(f"Sleeping {imanager.ingest_config.parents_q_interval}s...") - time.sleep(imanager.ingest_config.parents_q_interval) + parents_queue = imanager.task_queues[imanager.config.parents_q_name] + while len(parents_queue) > imanager.config.parents_q_limit: + print(f"Sleeping {imanager.config.parents_q_interval}s...") + time.sleep(imanager.config.parents_q_interval) parents_queue.enqueue( _create_parent_chunk, job_id=chunk_id_str(parent_layer, parent_coords), @@ -124,11 +124,11 @@ def enqueue_atomic_tasks( print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: - atomic_queue = imanager.task_queues[imanager.ingest_config.atomic_q_name] + atomic_queue = imanager.task_queues[imanager.config.atomic_q_name] # for optimal use of redis memory wait if queue limit is reached - if len(atomic_queue) > imanager.ingest_config.atomic_q_limit: - print(f"Sleeping {imanager.ingest_config.atomic_q_interval}s...") - time.sleep(imanager.ingest_config.atomic_q_interval) + if len(atomic_queue) > imanager.config.atomic_q_limit: + print(f"Sleeping {imanager.config.atomic_q_interval}s...") + time.sleep(imanager.config.atomic_q_interval) atomic_queue.enqueue( _create_atomic_chunk, job_id=chunk_id_str(2, chunk_coord), @@ -146,7 +146,7 @@ def _create_atomic_chunk(im_info, coord): chunk_edges_active, isolated_ids = _get_active_edges( imanager, coord, chunk_edges_all, mapping ) - if not imanager.ingest_config.build_graph: + if not imanager.config.build_graph: imanager.redis.hset(r_keys.ATOMIC_HASH_FINISHED, chunk_id_str(2, coord), "") return add_atomic_edges(imanager.cg, coord, chunk_edges_active, isolated=isolated_ids) diff --git a/rq_workers/test_worker.py b/rq_workers/atomic_worker.py similarity index 94% rename from rq_workers/test_worker.py rename to rq_workers/atomic_worker.py index 608666804..fd2785733 100644 --- a/rq_workers/test_worker.py +++ b/rq_workers/atomic_worker.py @@ -5,7 +5,7 @@ from pychunkedgraph.utils.redis import REDIS_URL # Queues to listen on -QUEUES = ["atomic", "parents"] +QUEUES = ["atomic"] # If you're using Sentry to collect your runtime exceptions, you can use this # to configure RQ for it in a single step diff --git a/rq_workers/mesh_worker.py b/rq_workers/mesh_worker.py deleted file mode 100644 index 686d3aef8..000000000 --- a/rq_workers/mesh_worker.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -# This is for monitoring rq with supervisord -# For the flask app use a config class - -# env REDIS_SERVICE_HOST and REDIS_SERVICE_PORT are added by Kubernetes -REDIS_HOST = os.environ.get('REDIS_SERVICE_HOST') -REDIS_PORT = os.environ.get('REDIS_SERVICE_PORT') -REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD') -if REDIS_PASSWORD is None: - REDIS_URL = f'redis://:{REDIS_HOST}:{REDIS_PORT}/0' -else: - REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0' - -# Queues to listen on -QUEUES = ['default', 'mesh-chunks'] - -# If you're using Sentry to collect your runtime exceptions, you can use this -# to configure RQ for it in a single step -# The 'sync+' prefix is required for raven: https://github.com/nvie/rq/issues/350#issuecomment-43592410 -# SENTRY_DSN = 'sync+http://public:secret@example.com/1' - -# If you want custom worker name -# NAME = 'worker-1024' diff --git a/rq_workers/ingest_worker.py b/rq_workers/parent_worker.py similarity index 82% rename from rq_workers/ingest_worker.py rename to rq_workers/parent_worker.py index a90a8605a..a0a98f389 100644 --- a/rq_workers/ingest_worker.py +++ b/rq_workers/parent_worker.py @@ -2,12 +2,10 @@ # This is for monitoring rq with supervisord # For the flask app use a config class -# env REDIS_SERVICE_HOST and REDIS_SERVICE_PORT are added by Kubernetes - from pychunkedgraph.utils.redis import REDIS_URL # Queues to listen on -QUEUES = ['default', 'ingest'] +QUEUES = ["parents"] # If you're using Sentry to collect your runtime exceptions, you can use this # to configure RQ for it in a single step From 3c6bb232fe3a6afbeb9e08f48dea1732b07bef52 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 4 Oct 2019 14:44:53 +0000 Subject: [PATCH 0258/1097] queue status command multiple queues --- pychunkedgraph/app/rq_cli.py | 23 ++++++++++++----------- pychunkedgraph/ingest/ingestionmanager.py | 12 +++++++----- pychunkedgraph/ingest/ran_ingestion_v2.py | 4 ++-- 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/pychunkedgraph/app/rq_cli.py b/pychunkedgraph/app/rq_cli.py index b4fdfe22c..75237148e 100644 --- a/pychunkedgraph/app/rq_cli.py +++ b/pychunkedgraph/app/rq_cli.py @@ -26,19 +26,20 @@ @rq_cli.command("status") -@click.argument("queue", type=str, default="test") +@click.argument("queues", nargs=-1, type=str) @click.option("--show-busy", is_flag=True) -def get_status(queue, show_busy): +def get_status(queues, show_busy): print("NOTE: Use --show-busy to display count of non idle workers\n") - q = Queue(queue, connection=connection) - print(f"Queue name \t: {queue}") - print(f"Jobs queued \t: {len(q)}") - print(f"Workers total \t: {Worker.count(queue=q)}") - if show_busy: - workers = Worker.all(queue=q) - count = sum([worker.get_state() == WorkerStatus.BUSY for worker in workers]) - print(f"Workers busy \t: {count}") - print(f"Jobs failed \t: {q.failed_job_registry.count}") + for queue in queues: + q = Queue(queue, connection=connection) + print(f"Queue name \t: {queue}") + print(f"Jobs queued \t: {len(q)}") + print(f"Workers total \t: {Worker.count(queue=q)}") + if show_busy: + workers = Worker.all(queue=q) + count = sum([worker.get_state() == WorkerStatus.BUSY for worker in workers]) + print(f"Workers busy \t: {count}") + print(f"Jobs failed \t: {q.failed_job_registry.count}\n") @rq_cli.command("failed_ids") diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 7fe9d93b7..2575c298c 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -38,7 +38,7 @@ def __init__( self._chunk_coords = None self._layer_bounds_d = None - self._task_queues = defaultdict(get_rq_queue) + self._task_queues = {} self._bitmasks = None self._bounds = None @@ -117,10 +117,6 @@ def n_layers(self): ) return self._n_layers - @property - def task_queues(self) -> Dict: - return self._task_queues - @property def redis(self): if self._redis: @@ -170,6 +166,12 @@ def edge_dtype(self): def from_pickle(cls, serialized_info): return cls(**pickle.loads(serialized_info)) + def get_task_queue(self, q_name): + if q_name in self._task_queues: + return self._task_queues[q_name] + self._task_queues[q_name] = get_rq_queue(q_name) + return self._task_queues[q_name] + def get_serialized_info(self, pickled=False): info = { "config": self._config, diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 89e7f995a..e0bb50d3a 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -77,7 +77,7 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda ) if children_left == 0: - parents_queue = imanager.task_queues[imanager.config.parents_q_name] + parents_queue = imanager.get_task_queue(imanager.config.parents_q_name) while len(parents_queue) > imanager.config.parents_q_limit: print(f"Sleeping {imanager.config.parents_q_interval}s...") time.sleep(imanager.config.parents_q_interval) @@ -124,7 +124,7 @@ def enqueue_atomic_tasks( print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: - atomic_queue = imanager.task_queues[imanager.config.atomic_q_name] + atomic_queue = imanager.get_task_queue(imanager.config.atomic_q_name) # for optimal use of redis memory wait if queue limit is reached if len(atomic_queue) > imanager.config.atomic_q_limit: print(f"Sleeping {imanager.config.atomic_q_interval}s...") From 014728c3938198ccb0f151fd0d114a26e33b3681 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 4 Oct 2019 18:55:07 +0000 Subject: [PATCH 0259/1097] fix: incorrect variable passed --- pychunkedgraph/ingest/ran_ingestion_v2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index e0bb50d3a..cfd1812f5 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -95,13 +95,13 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda ), ) imanager.redis.hdel(parent_layer, parent_chunk_str) - imanager.redis.hincrby("completed", parent_layer, 1) + imanager.redis.hincrby("completed", layer, 1) def _create_parent_chunk(im_info, layer, parent_coords, child_chunk_coords): imanager = IngestionManager(**im_info) add_layer(imanager.cg, layer, parent_coords, child_chunk_coords) - _post_task_completion(imanager, 2, parent_coords) + _post_task_completion(imanager, layer, parent_coords) def enqueue_atomic_tasks( From 3363a76d40a0bd25103394e03c2dfc743402899b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 7 Oct 2019 19:04:16 +0000 Subject: [PATCH 0260/1097] fix duplication bug --- .../ingest/initialization/abstract_layers.py | 14 +++---- test_add_layer.py | 42 +++++++++++++++++++ 2 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 test_add_layer.py diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index a679ef5dd..4e73ddef5 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -63,12 +63,13 @@ def _process_chunks(cg_instance, layer_id, chunk_coords): def _process_chunk(cg_instance, layer_id, chunk_coord): cross_edge_dict = defaultdict(dict) row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) - for row_id in cross_edge_columns_d: - cell_family = cross_edge_columns_d[row_id] - for l in range(layer_id - 1, cg_instance.n_layers): - cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] - if cross_edges_key in cell_family: - cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value + for row_id in row_ids: + if row_id in cross_edge_columns_d: + cell_family = cross_edge_columns_d[row_id] + for l in range(layer_id - 1, cg_instance.n_layers): + cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] + if cross_edges_key in cell_family: + cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value return row_ids, cross_edge_dict @@ -79,7 +80,6 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): for l in range(layer_id - 1, cg_instance.n_layers) ] range_read = cg_instance.range_read_chunk(layer_id - 1, x, y, z, columns=columns) - # Deserialize row keys and store child with highest id for # comparison row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) diff --git a/test_add_layer.py b/test_add_layer.py new file mode 100644 index 000000000..7f47f5c44 --- /dev/null +++ b/test_add_layer.py @@ -0,0 +1,42 @@ +import numpy as np + +from pychunkedgraph.ingest import IngestConfig +from pychunkedgraph.ingest.ingestionmanager import IngestionManager +from pychunkedgraph.backend.definitions.config import DataSource +from pychunkedgraph.backend.definitions.config import GraphConfig +from pychunkedgraph.backend.definitions.config import BigTableConfig + +from pychunkedgraph.backend.chunkedgraph import ChunkedGraph +from pychunkedgraph.ingest.initialization.abstract_layers import add_layer +from pychunkedgraph.ingest.ran_ingestion_v2 import enqueue_atomic_tasks +from pychunkedgraph.ingest.ran_ingestion_v2 import _get_children_coords + +processed = True +graph_id = "akhilesh-minnie65" + +ingest_config = IngestConfig(build_graph=True) +data_source = DataSource( + agglomeration="gs://ranl-scratch/minnie65_0/agg", + watershed="gs://microns-seunglab/minnie65/ws_minnie65_0", + edges="gs://chunkedgraph/minnie65_0/edges", + components="gs://chunkedgraph/minnie65_0/components", + use_raw_edges=not processed, + use_raw_components=not processed, + data_version=2, +) +graph_config = GraphConfig( + graph_id=graph_id, + chunk_size=np.array([256, 256, 512], dtype=int), + fanout=2, + s_bits_atomic_layer=10, +) +bigtable_config = BigTableConfig() +imanager = IngestionManager(ingest_config, data_source, graph_config, bigtable_config) + +cg = ChunkedGraph("akhilesh-minnie65") +layer = 7 +parent_coords = [10, 6, 0] +children_coords = _get_children_coords(imanager, layer-1, parent_coords) + +print(len(children_coords)) +add_layer(cg, layer, parent_coords, children_coords) From 0245c0199fb232fb2f2554ede4a4235142486249 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 7 Oct 2019 19:23:38 +0000 Subject: [PATCH 0261/1097] use hash to track completed chunks --- pychunkedgraph/ingest/cli.py | 13 +++++++++++++ pychunkedgraph/ingest/ran_ingestion_v2.py | 9 +++++---- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index cf93791c0..72b1195fd 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -88,3 +88,16 @@ def ingest_graph( def init_ingest_cmds(app): app.cli.add_command(ingest_cli) + + + +# for layer_id in range(2, 13): +# print(layer_id) +# child_chunk_coords = im.chunk_coords // 2 ** (layer_id - 3) +# child_chunk_coords = child_chunk_coords.astype(np.int) +# child_chunk_coords = np.unique(child_chunk_coords, axis=0) + +# parent_chunk_coords = child_chunk_coords // 2 +# parent_chunk_coords = parent_chunk_coords.astype(np.int) +# parent_chunk_coords = np.unique(parent_chunk_coords, axis=0) +# print(len(child_chunk_coords), len(parent_chunk_coords)) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index cfd1812f5..497b6774c 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -39,6 +39,9 @@ def _get_children_coords( imanager: IngestionManager, layer: int, parent_coords: Sequence[int] ) -> np.ndarray: + """ + :param: layer - layer of children chunks + """ layer_bounds = imanager.layer_chunk_bounds[layer] children_coords = [] parent_coords = np.array(parent_coords, dtype=int) @@ -78,9 +81,6 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda if children_left == 0: parents_queue = imanager.get_task_queue(imanager.config.parents_q_name) - while len(parents_queue) > imanager.config.parents_q_limit: - print(f"Sleeping {imanager.config.parents_q_interval}s...") - time.sleep(imanager.config.parents_q_interval) parents_queue.enqueue( _create_parent_chunk, job_id=chunk_id_str(parent_layer, parent_coords), @@ -95,7 +95,8 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda ), ) imanager.redis.hdel(parent_layer, parent_chunk_str) - imanager.redis.hincrby("completed", layer, 1) + # put in completed (c) hash + imanager.redis.hset(f"{parent_layer}c", parent_chunk_str, "") def _create_parent_chunk(im_info, layer, parent_coords, child_chunk_coords): From 06158704d4ee9bf904f502ac731c023a68ca2e1c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 7 Oct 2019 19:35:31 +0000 Subject: [PATCH 0262/1097] better task tracking --- pychunkedgraph/ingest/ran_ingestion_v2.py | 37 ++++++++++------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 497b6774c..2e8a7a825 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -55,16 +55,11 @@ def _get_children_coords( def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.ndarray): - """ - get parent - add parent to layer hash - add children count to parent hash - decrement children count by 1 - if count is 0 - enqueue parent - delete parent hash - increment complete hash by 1 - """ + chunk_str = "_".join(map(str, coords)) + # remove from queued hash and put in completed hash + imanager.redis.hdel(f"{layer}q", chunk_str) + imanager.redis.hset(f"{layer}c", chunk_str, "") + parent_layer = layer + 1 if parent_layer > imanager.n_layers: return @@ -96,7 +91,7 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda ) imanager.redis.hdel(parent_layer, parent_chunk_str) # put in completed (c) hash - imanager.redis.hset(f"{parent_layer}c", parent_chunk_str, "") + imanager.redis.hset(f"{parent_layer}q", parent_chunk_str, "") def _create_parent_chunk(im_info, layer, parent_coords, child_chunk_coords): @@ -112,16 +107,16 @@ def enqueue_atomic_tasks( np.random.shuffle(chunk_coords) # test chunks - # chunk_coords = [ - # [0, 0, 0], - # [0, 0, 1], - # [0, 1, 0], - # [0, 1, 1], - # [1, 0, 0], - # [1, 0, 1], - # [1, 1, 0], - # [1, 1, 1], - # ] + chunk_coords = [ + [0, 0, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 1], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + ] print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: From 7a1607592d7af62fe822783995f8dc11a545d7a1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 9 Oct 2019 16:04:52 +0000 Subject: [PATCH 0263/1097] parallelize higher layer construc tion --- .../ingest/initialization/abstract_layers.py | 46 ++++++++++++------- .../ingest/initialization/atomic_layer.py | 4 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 20 ++++---- 3 files changed, 42 insertions(+), 28 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 4e73ddef5..b1a64d310 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -7,10 +7,12 @@ from typing import Optional, Sequence import numpy as np +from multiwrapper import multiprocessing_utils as mu -from pychunkedgraph.backend import flatgraph_utils -from pychunkedgraph.backend.chunkedgraph_utils import get_valid_timestamp -from pychunkedgraph.backend.utils import serializers, column_keys +from ...backend import flatgraph_utils +from ...backend.chunkedgraph import ChunkedGraph +from ...backend.chunkedgraph_utils import get_valid_timestamp +from ...backend.utils import serializers, column_keys def add_layer( @@ -20,16 +22,20 @@ def add_layer( children_coords: Sequence[Sequence[int]], *, time_stamp: Optional[datetime.datetime] = None, - n_threads: int = 20, + n_threads: int = 8, ) -> None: x, y, z = parent_coords parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) - cross_edge_dict, child_ids = _process_chunks(cg_instance, layer_id, children_coords) - edge_ids = _resolve_cross_chunk_edges_thread(layer_id, child_ids, cross_edge_dict) + children_ids, cross_edge_dict = _read_children_chunks( + n_threads, cg_instance, layer_id, children_coords + ) + + # cross_edge_dict, children_ids = _process_chunks(cg_instance, layer_id, children_coords) + edge_ids = _resolve_cross_chunk_edges(layer_id, children_ids, cross_edge_dict) # Extract connected components - isolated_node_mask = ~np.in1d(child_ids, np.unique(edge_ids)) - add_node_ids = child_ids[isolated_node_mask].squeeze() + isolated_node_mask = ~np.in1d(children_ids, np.unique(edge_ids)) + add_node_ids = children_ids[isolated_node_mask].squeeze() add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T edge_ids.extend(add_edge_ids) @@ -50,17 +56,25 @@ def add_layer( return f"{layer_id}_{'_'.join(map(str, (x, y, z)))}" -def _process_chunks(cg_instance, layer_id, chunk_coords): - node_ids = [] +def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): + cg_info = cg_instance.get_serialized_info() + multi_args = [] + for child_coord in children_coords: + multi_args.append((cg_info, layer_id, child_coord)) + chunk_info = mu.multithread_func( + _process_chunk, multi_args, n_threads=min(n_threads, len(multi_args)) + ) + + children_ids = [] cross_edge_dict = {} - for chunk_coord in chunk_coords: - ids, cross_edge_d = _process_chunk(cg_instance, layer_id, chunk_coord) - node_ids.append(ids) + for ids, cross_edge_d in chunk_info: + children_ids.append(ids) cross_edge_dict = {**cross_edge_dict, **cross_edge_d} - return cross_edge_dict, np.concatenate(node_ids) + return np.concatenate(children_ids), cross_edge_dict -def _process_chunk(cg_instance, layer_id, chunk_coord): +def _process_chunk(cg_info, layer_id, chunk_coord): + cg_instance = ChunkedGraph(**cg_info) cross_edge_dict = defaultdict(dict) row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) for row_id in row_ids: @@ -110,7 +124,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): return row_ids, cross_edge_columns_d -def _resolve_cross_chunk_edges_thread(layer_id, node_ids, cross_edge_dict) -> None: +def _resolve_cross_chunk_edges(layer_id, node_ids, cross_edge_dict) -> None: cross_edge_dict = defaultdict(dict, cross_edge_dict) atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] diff --git a/pychunkedgraph/ingest/initialization/atomic_layer.py b/pychunkedgraph/ingest/initialization/atomic_layer.py index 949b7fe7e..78a962fb8 100644 --- a/pychunkedgraph/ingest/initialization/atomic_layer.py +++ b/pychunkedgraph/ingest/initialization/atomic_layer.py @@ -106,7 +106,7 @@ def _process_component( rows = [] chunk_out_edges = [] # out = between + cross for node_id in node_ids: - _edges = _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping) + _edges = _get_outgoing_edges(node_id, chunk_edges_d, sparse_indices, remapping) chunk_out_edges.append(_edges) val_dict = {column_keys.Hierarchy.Parent: parent_id} r_key = serializers.serialize_uint64(node_id) @@ -128,7 +128,7 @@ def _process_component( return rows -def _get_out_edges(node_id, chunk_edges_d, sparse_indices, remapping): +def _get_outgoing_edges(node_id, chunk_edges_d, sparse_indices, remapping): """ TODO add docs returns edges of node_id pointing outside the chunk (between and cross) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 2e8a7a825..18ed9ac25 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -107,16 +107,16 @@ def enqueue_atomic_tasks( np.random.shuffle(chunk_coords) # test chunks - chunk_coords = [ - [0, 0, 0], - [0, 0, 1], - [0, 1, 0], - [0, 1, 1], - [1, 0, 0], - [1, 0, 1], - [1, 1, 0], - [1, 1, 1], - ] + # chunk_coords = [ + # [0, 0, 0], + # [0, 0, 1], + # [0, 1, 0], + # [0, 1, 1], + # [1, 0, 0], + # [1, 0, 1], + # [1, 1, 0], + # [1, 1, 1], + # ] print(f"Chunk count: {len(chunk_coords)}") for chunk_coord in chunk_coords: From 533a65c00e320cbf98e3f2c3144cd777572e32b1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 9 Oct 2019 16:14:26 +0000 Subject: [PATCH 0264/1097] wip: parallelize higher layer construc tion --- .../ingest/initialization/abstract_layers.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index b1a64d310..e6d5687f7 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -125,18 +125,20 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): def _resolve_cross_chunk_edges(layer_id, node_ids, cross_edge_dict) -> None: - cross_edge_dict = defaultdict(dict, cross_edge_dict) - atomic_partner_id_dict = {} - atomic_child_id_dict_pairs = [] - for node_id in node_ids: - if int(layer_id - 1) in cross_edge_dict[node_id]: - atomic_cross_edges = cross_edge_dict[node_id][layer_id - 1] - if len(atomic_cross_edges) > 0: - atomic_partner_id_dict[node_id] = atomic_cross_edges[:, 1] - new_pairs = zip( - atomic_cross_edges[:, 0], [node_id] * len(atomic_cross_edges) - ) - atomic_child_id_dict_pairs.extend(new_pairs) + def _resolve_helper(node_ids): + cross_edge_dict = defaultdict(dict, cross_edge_dict) + atomic_partner_id_dict = {} + atomic_child_id_dict_pairs = [] + for node_id in node_ids: + if int(layer_id - 1) in cross_edge_dict[node_id]: + atomic_cross_edges = cross_edge_dict[node_id][layer_id - 1] + if len(atomic_cross_edges) > 0: + atomic_partner_id_dict[node_id] = atomic_cross_edges[:, 1] + new_pairs = zip( + atomic_cross_edges[:, 0], [node_id] * len(atomic_cross_edges) + ) + atomic_child_id_dict_pairs.extend(new_pairs) + return atomic_child_id_dict, atomic_child_id_dict_pairs d = dict(atomic_child_id_dict_pairs) atomic_child_id_dict = defaultdict(np.uint64, d) @@ -151,10 +153,8 @@ def _resolve_cross_chunk_edges(layer_id, node_ids, cross_edge_dict) -> None: } if len(partners) > 0: partners = np.array(list(partners), dtype=np.uint64)[:, None] - this_ids = np.array([child_key] * len(partners), dtype=np.uint64)[:, None] these_edges = np.concatenate([this_ids, partners], axis=1) - edge_ids.extend(these_edges) return edge_ids From f228f8ca90b5fa5b084317d71e4973402a76c2aa Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 9 Oct 2019 18:32:31 +0000 Subject: [PATCH 0265/1097] wip: parallelize higher layer construction --- .../ingest/initialization/abstract_layers.py | 71 ++++++++++++------- pychunkedgraph/utils/general.py | 8 +++ 2 files changed, 55 insertions(+), 24 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index e6d5687f7..364259c66 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -9,6 +9,7 @@ import numpy as np from multiwrapper import multiprocessing_utils as mu +from ...utils.general import chunks from ...backend import flatgraph_utils from ...backend.chunkedgraph import ChunkedGraph from ...backend.chunkedgraph_utils import get_valid_timestamp @@ -31,13 +32,15 @@ def add_layer( ) # cross_edge_dict, children_ids = _process_chunks(cg_instance, layer_id, children_coords) - edge_ids = _resolve_cross_chunk_edges(layer_id, children_ids, cross_edge_dict) + edge_ids = _resolve_cross_chunk_edges( + n_threads, layer_id, children_ids, cross_edge_dict + ) # Extract connected components isolated_node_mask = ~np.in1d(children_ids, np.unique(edge_ids)) add_node_ids = children_ids[isolated_node_mask].squeeze() add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T - edge_ids.extend(add_edge_ids) + edge_ids = np.concatenate([edge_ids, add_edge_ids]) graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True @@ -124,9 +127,10 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): return row_ids, cross_edge_columns_d -def _resolve_cross_chunk_edges(layer_id, node_ids, cross_edge_dict) -> None: - def _resolve_helper(node_ids): - cross_edge_dict = defaultdict(dict, cross_edge_dict) +def _resolve_cross_chunk_edges(n_threads, layer_id, node_ids, cross_edge_dict) -> None: + cross_edge_dict = defaultdict(dict, cross_edge_dict) + + def _resolve_helper_1(node_ids): atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] for node_id in node_ids: @@ -138,25 +142,44 @@ def _resolve_helper(node_ids): atomic_cross_edges[:, 0], [node_id] * len(atomic_cross_edges) ) atomic_child_id_dict_pairs.extend(new_pairs) - return atomic_child_id_dict, atomic_child_id_dict_pairs - - d = dict(atomic_child_id_dict_pairs) - atomic_child_id_dict = defaultdict(np.uint64, d) - - edge_ids = [] - for child_key in atomic_partner_id_dict: - this_atomic_partner_ids = atomic_partner_id_dict[child_key] - partners = { - atomic_child_id_dict[atomic_cross_id] - for atomic_cross_id in this_atomic_partner_ids - if atomic_child_id_dict[atomic_cross_id] != 0 - } - if len(partners) > 0: - partners = np.array(list(partners), dtype=np.uint64)[:, None] - this_ids = np.array([child_key] * len(partners), dtype=np.uint64)[:, None] - these_edges = np.concatenate([this_ids, partners], axis=1) - edge_ids.extend(these_edges) - return edge_ids + return atomic_partner_id_dict, atomic_child_id_dict_pairs + + multi_args = list(chunks(node_ids, n_threads * 3)) + if not len(multi_args): + return + + atomic_info = mu.multithread_func( + _resolve_helper_1, multi_args, n_threads=n_threads + ) + + atomic_partner_id_dict = {} + atomic_child_id_dict_pairs = [] + for partner_d, pairs in atomic_info: + atomic_partner_id_dict = {**atomic_partner_id_dict, **partner_d} + atomic_child_id_dict_pairs.append(pairs) + + atomic_child_id_dict = defaultdict(np.uint64, dict(atomic_child_id_dict_pairs)) + + def _resolve_helper_2(atomic_partner_id_dict_keys): + for k in atomic_partner_id_dict_keys: + this_atomic_partner_ids = atomic_partner_id_dict[k] + partners = { + atomic_child_id_dict[atomic_cross_id] + for atomic_cross_id in this_atomic_partner_ids + if atomic_child_id_dict[atomic_cross_id] != 0 + } + if len(partners) > 0: + partners = np.array(list(partners), dtype=np.uint64)[:, None] + this_ids = np.array([k] * len(partners), dtype=np.uint64)[:, None] + return np.concatenate([this_ids, partners], axis=1) + return [] + + multi_args = list(chunks(atomic_partner_id_dict.keys(), n_threads * 3)) + if not len(multi_args): + return + + edge_ids = mu.multithread_func(_resolve_helper_1, multi_args, n_threads=n_threads) + return np.concatenate(edge_ids) def _write_out_connected_components( diff --git a/pychunkedgraph/utils/general.py b/pychunkedgraph/utils/general.py index b2825f671..682f7b9cb 100644 --- a/pychunkedgraph/utils/general.py +++ b/pychunkedgraph/utils/general.py @@ -1,6 +1,8 @@ """ generic helper funtions """ +from typing import Sequence + import numpy as np @@ -18,3 +20,9 @@ def reverse_dictionary(dictionary): vals = np.concatenate(vals) return {k: v for k, v in zip(vals, keys)} + + +def chunks(l: Sequence, n: int): + """Yield successive n-sized chunks from l.""" + for i in range(0, len(l), n): + yield l[i:i + n] From d3645a7cb1a9b9a67c0526ed72ee02a23f91efb4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 9 Oct 2019 20:06:43 +0000 Subject: [PATCH 0266/1097] wip: parallelize higher layer construction --- .../ingest/initialization/abstract_layers.py | 23 +++++++++++++++---- pychunkedgraph/ingest/ran_ingestion_v2.py | 1 + 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 364259c66..24cfc58cf 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -23,7 +23,7 @@ def add_layer( children_coords: Sequence[Sequence[int]], *, time_stamp: Optional[datetime.datetime] = None, - n_threads: int = 8, + n_threads: int = 32, ) -> None: x, y, z = parent_coords parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) @@ -64,8 +64,9 @@ def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): multi_args = [] for child_coord in children_coords: multi_args.append((cg_info, layer_id, child_coord)) + print(f"Reading children chunks, threads {min(n_threads, len(multi_args))}") chunk_info = mu.multithread_func( - _process_chunk, multi_args, n_threads=min(n_threads, len(multi_args)) + _process_chunk_thread, multi_args, n_threads=min(n_threads, len(multi_args)) ) children_ids = [] @@ -73,10 +74,13 @@ def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): for ids, cross_edge_d in chunk_info: children_ids.append(ids) cross_edge_dict = {**cross_edge_dict, **cross_edge_d} + print("Reading chunks complete") return np.concatenate(children_ids), cross_edge_dict -def _process_chunk(cg_info, layer_id, chunk_coord): +def _process_chunk_thread(args): + cg_info, layer_id, chunk_coord = args + print(f"Reading chunk {chunk_coord}") cg_instance = ChunkedGraph(**cg_info) cross_edge_dict = defaultdict(dict) row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) @@ -87,6 +91,7 @@ def _process_chunk(cg_info, layer_id, chunk_coord): cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] if cross_edges_key in cell_family: cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value + print(f"Done reading {chunk_coord}") return row_ids, cross_edge_dict @@ -128,6 +133,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): def _resolve_cross_chunk_edges(n_threads, layer_id, node_ids, cross_edge_dict) -> None: + print("Resolve cross chunk edges") cross_edge_dict = defaultdict(dict, cross_edge_dict) def _resolve_helper_1(node_ids): @@ -148,8 +154,11 @@ def _resolve_helper_1(node_ids): if not len(multi_args): return + print( + f"node ids {len(node_ids)}, multiargs {len(multi_args)} job size {len(multi_args[0])}" + ) atomic_info = mu.multithread_func( - _resolve_helper_1, multi_args, n_threads=n_threads + _resolve_helper_1, multi_args, n_threads=n_threads * 3 ) atomic_partner_id_dict = {} @@ -178,7 +187,11 @@ def _resolve_helper_2(atomic_partner_id_dict_keys): if not len(multi_args): return - edge_ids = mu.multithread_func(_resolve_helper_1, multi_args, n_threads=n_threads) + print( + f"partner keys {len(atomic_partner_id_dict.keys())}, multiargs {len(multi_args)}" + ) + edge_ids = mu.multithread_func(_resolve_helper_2, multi_args, n_threads=n_threads) + print("Resolve cross chunk edges complete") return np.concatenate(edge_ids) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 18ed9ac25..e14aeb42b 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -73,6 +73,7 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda children_left = int( imanager.redis.hget(parent_layer, parent_chunk_str).decode("utf-8") ) + return if children_left == 0: parents_queue = imanager.get_task_queue(imanager.config.parents_q_name) From 6b682447f65f8afe1974012a7eb13bcc64554c08 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 9 Oct 2019 20:34:13 +0000 Subject: [PATCH 0267/1097] fix job size --- pychunkedgraph/ingest/initialization/abstract_layers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 24cfc58cf..f679acce7 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -150,7 +150,9 @@ def _resolve_helper_1(node_ids): atomic_child_id_dict_pairs.extend(new_pairs) return atomic_partner_id_dict, atomic_child_id_dict_pairs - multi_args = list(chunks(node_ids, n_threads * 3)) + n_jobs = n_threads * 3 + chunk_size = len(node_ids) // n_jobs + multi_args = list(chunks(node_ids, chunk_size)) if not len(multi_args): return @@ -183,7 +185,9 @@ def _resolve_helper_2(atomic_partner_id_dict_keys): return np.concatenate([this_ids, partners], axis=1) return [] - multi_args = list(chunks(atomic_partner_id_dict.keys(), n_threads * 3)) + n_jobs = n_threads * 3 + chunk_size = len(node_ids) // n_jobs + multi_args = list(chunks(atomic_partner_id_dict.keys(), chunk_size)) if not len(multi_args): return From 7b3cfb18feb1a57b2cd2b65586c2c312670c0bf0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 9 Oct 2019 23:30:02 +0000 Subject: [PATCH 0268/1097] wip: parallelize --- .../ingest/initialization/abstract_layers.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index f679acce7..577e47999 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -80,10 +80,10 @@ def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): def _process_chunk_thread(args): cg_info, layer_id, chunk_coord = args - print(f"Reading chunk {chunk_coord}") cg_instance = ChunkedGraph(**cg_info) - cross_edge_dict = defaultdict(dict) row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) + + cross_edge_dict = defaultdict(dict) for row_id in row_ids: if row_id in cross_edge_columns_d: cell_family = cross_edge_columns_d[row_id] @@ -91,7 +91,7 @@ def _process_chunk_thread(args): cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] if cross_edges_key in cell_family: cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value - print(f"Done reading {chunk_coord}") + return row_ids, cross_edge_dict @@ -101,9 +101,12 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): column_keys.Connectivity.CrossChunkEdge[l] for l in range(layer_id - 1, cg_instance.n_layers) ] + print(f"Reading chunk {chunk_coord}") range_read = cg_instance.range_read_chunk(layer_id - 1, x, y, z, columns=columns) - # Deserialize row keys and store child with highest id for - # comparison + print(f"Done reading {chunk_coord}") + + print(f"Getting relevant children ids {chunk_coord}") + # Deserialize row keys and store child with highest id for comparison row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) cross_edge_columns_d = {} @@ -129,6 +132,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] counter[max_child_ids[i_row]] += 1 row_ids = row_ids[max_child_ids_occ_so_far == 0] + print(f"Getting relevant children ids {chunk_coord} done") return row_ids, cross_edge_columns_d From 58bc86505c6ee22a2c1bcc048ff6d7b84cb40cff Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 10 Oct 2019 00:42:33 +0000 Subject: [PATCH 0269/1097] fix: extend, not append --- pychunkedgraph/ingest/cli.py | 12 ++++++++++-- .../ingest/initialization/abstract_layers.py | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 72b1195fd..c4c83c774 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -86,11 +86,19 @@ def ingest_graph( enqueue_atomic_tasks(imanager) +@ingest_cli.command("status") +def ingest_status(): + redis = get_redis_connection() + imanager = IngestionManager.from_pickle(redis.get(r_keys.INGESTION_MANAGER)) + for layer in range(2, imanager.n_layers): + layer_count = redis.hlen(f"{layer}c") + print(f"{layer}\t: {layer_count}") + + def init_ingest_cmds(app): app.cli.add_command(ingest_cli) - # for layer_id in range(2, 13): # print(layer_id) # child_chunk_coords = im.chunk_coords // 2 ** (layer_id - 3) @@ -100,4 +108,4 @@ def init_ingest_cmds(app): # parent_chunk_coords = child_chunk_coords // 2 # parent_chunk_coords = parent_chunk_coords.astype(np.int) # parent_chunk_coords = np.unique(parent_chunk_coords, axis=0) -# print(len(child_chunk_coords), len(parent_chunk_coords)) +# print(len(child_chunk_coords), len(parent_chunk_coords)) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 577e47999..a142147b2 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -171,7 +171,7 @@ def _resolve_helper_1(node_ids): atomic_child_id_dict_pairs = [] for partner_d, pairs in atomic_info: atomic_partner_id_dict = {**atomic_partner_id_dict, **partner_d} - atomic_child_id_dict_pairs.append(pairs) + atomic_child_id_dict_pairs.extend(pairs) atomic_child_id_dict = defaultdict(np.uint64, dict(atomic_child_id_dict_pairs)) From 3f8dd907c584b2a5423210a215529939ef8e4350 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 10 Oct 2019 02:09:45 +0000 Subject: [PATCH 0270/1097] wip: use processes to utilize multiple cores --- .../ingest/initialization/abstract_layers.py | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index a142147b2..176aa10c1 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -35,6 +35,7 @@ def add_layer( edge_ids = _resolve_cross_chunk_edges( n_threads, layer_id, children_ids, cross_edge_dict ) + print(len(children_ids), len(edge_ids)) # Extract connected components isolated_node_mask = ~np.in1d(children_ids, np.unique(edge_ids)) @@ -64,8 +65,7 @@ def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): multi_args = [] for child_coord in children_coords: multi_args.append((cg_info, layer_id, child_coord)) - print(f"Reading children chunks, threads {min(n_threads, len(multi_args))}") - chunk_info = mu.multithread_func( + chunk_info = mu.multiprocess_func( _process_chunk_thread, multi_args, n_threads=min(n_threads, len(multi_args)) ) @@ -74,7 +74,6 @@ def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): for ids, cross_edge_d in chunk_info: children_ids.append(ids) cross_edge_dict = {**cross_edge_dict, **cross_edge_d} - print("Reading chunks complete") return np.concatenate(children_ids), cross_edge_dict @@ -101,11 +100,8 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): column_keys.Connectivity.CrossChunkEdge[l] for l in range(layer_id - 1, cg_instance.n_layers) ] - print(f"Reading chunk {chunk_coord}") range_read = cg_instance.range_read_chunk(layer_id - 1, x, y, z, columns=columns) - print(f"Done reading {chunk_coord}") - print(f"Getting relevant children ids {chunk_coord}") # Deserialize row keys and store child with highest id for comparison row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) @@ -132,12 +128,10 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] counter[max_child_ids[i_row]] += 1 row_ids = row_ids[max_child_ids_occ_so_far == 0] - print(f"Getting relevant children ids {chunk_coord} done") return row_ids, cross_edge_columns_d def _resolve_cross_chunk_edges(n_threads, layer_id, node_ids, cross_edge_dict) -> None: - print("Resolve cross chunk edges") cross_edge_dict = defaultdict(dict, cross_edge_dict) def _resolve_helper_1(node_ids): @@ -160,9 +154,6 @@ def _resolve_helper_1(node_ids): if not len(multi_args): return - print( - f"node ids {len(node_ids)}, multiargs {len(multi_args)} job size {len(multi_args[0])}" - ) atomic_info = mu.multithread_func( _resolve_helper_1, multi_args, n_threads=n_threads * 3 ) @@ -191,15 +182,11 @@ def _resolve_helper_2(atomic_partner_id_dict_keys): n_jobs = n_threads * 3 chunk_size = len(node_ids) // n_jobs - multi_args = list(chunks(atomic_partner_id_dict.keys(), chunk_size)) + multi_args = list(chunks(list(atomic_partner_id_dict), chunk_size)) if not len(multi_args): return - print( - f"partner keys {len(atomic_partner_id_dict.keys())}, multiargs {len(multi_args)}" - ) edge_ids = mu.multithread_func(_resolve_helper_2, multi_args, n_threads=n_threads) - print("Resolve cross chunk edges complete") return np.concatenate(edge_ids) From 50c56e17ee862f8f579cfe860b2ff648ee37811e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 11 Oct 2019 00:39:25 +0000 Subject: [PATCH 0271/1097] wip: higher layer ingest speed up --- .../ingest/initialization/abstract_layers.py | 104 +++++++++++------- 1 file changed, 62 insertions(+), 42 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 176aa10c1..5319be130 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -2,6 +2,7 @@ Functions for creating parents in level 3 and above """ +import time import datetime from collections import defaultdict from typing import Optional, Sequence @@ -27,14 +28,19 @@ def add_layer( ) -> None: x, y, z = parent_coords parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) + + start = time.time() children_ids, cross_edge_dict = _read_children_chunks( n_threads, cg_instance, layer_id, children_coords ) + print(f"_read_children_chunks: {time.time()-start}") # cross_edge_dict, children_ids = _process_chunks(cg_instance, layer_id, children_coords) + start = time.time() edge_ids = _resolve_cross_chunk_edges( n_threads, layer_id, children_ids, cross_edge_dict ) + print(f"_resolve_cross_chunk_edges: {time.time()-start}") print(len(children_ids), len(edge_ids)) # Extract connected components @@ -48,6 +54,7 @@ def add_layer( ) ccs = flatgraph_utils.connected_components(graph) + start = time.time() _write_out_connected_components( cg_instance, layer_id, @@ -57,15 +64,17 @@ def add_layer( graph_ids, time_stamp, ) + print(f"_write_out_connected_components: {time.time()-start}") return f"{layer_id}_{'_'.join(map(str, (x, y, z)))}" def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): cg_info = cg_instance.get_serialized_info() + del cg_info["credentials"] multi_args = [] for child_coord in children_coords: multi_args.append((cg_info, layer_id, child_coord)) - chunk_info = mu.multiprocess_func( + chunk_info = mu.multithread_func( _process_chunk_thread, multi_args, n_threads=min(n_threads, len(multi_args)) ) @@ -81,7 +90,7 @@ def _process_chunk_thread(args): cg_info, layer_id, chunk_coord = args cg_instance = ChunkedGraph(**cg_info) row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) - + cross_edge_dict = defaultdict(dict) for row_id in row_ids: if row_id in cross_edge_columns_d: @@ -90,7 +99,7 @@ def _process_chunk_thread(args): cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] if cross_edges_key in cell_family: cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value - + return row_ids, cross_edge_dict @@ -101,7 +110,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): for l in range(layer_id - 1, cg_instance.n_layers) ] range_read = cg_instance.range_read_chunk(layer_id - 1, x, y, z, columns=columns) - + # Deserialize row keys and store child with highest id for comparison row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) @@ -134,29 +143,47 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): def _resolve_cross_chunk_edges(n_threads, layer_id, node_ids, cross_edge_dict) -> None: cross_edge_dict = defaultdict(dict, cross_edge_dict) - def _resolve_helper_1(node_ids): - atomic_partner_id_dict = {} - atomic_child_id_dict_pairs = [] - for node_id in node_ids: - if int(layer_id - 1) in cross_edge_dict[node_id]: - atomic_cross_edges = cross_edge_dict[node_id][layer_id - 1] - if len(atomic_cross_edges) > 0: - atomic_partner_id_dict[node_id] = atomic_cross_edges[:, 1] - new_pairs = zip( - atomic_cross_edges[:, 0], [node_id] * len(atomic_cross_edges) - ) - atomic_child_id_dict_pairs.extend(new_pairs) - return atomic_partner_id_dict, atomic_child_id_dict_pairs + start = time.time() + atomic_partner_id_dict, atomic_child_id_dict_pairs = _get_atomic_partners( + n_threads, layer_id, node_ids, cross_edge_dict + ) + print(f"_get_atomic_partners: {time.time()-start}") + atomic_child_id_dict = defaultdict(np.uint64, dict(atomic_child_id_dict_pairs)) + + start = time.time() + cross_edges = _get_cross_edges(atomic_partner_id_dict, atomic_child_id_dict) + print(f"_get_cross_edges: {time.time()-start}") + return cross_edges + +def _get_atomic_partners_thread(args): + layer_id, node_ids, cross_edge_dict = args + atomic_partner_id_dict = {} + atomic_child_id_dict_pairs = [] + for node_id in node_ids: + if int(layer_id - 1) in cross_edge_dict[node_id]: + atomic_cross_edges = cross_edge_dict[node_id][layer_id - 1] + if len(atomic_cross_edges) > 0: + atomic_partner_id_dict[node_id] = atomic_cross_edges[:, 1] + new_pairs = zip( + atomic_cross_edges[:, 0], [node_id] * len(atomic_cross_edges) + ) + atomic_child_id_dict_pairs.extend(new_pairs) + return atomic_partner_id_dict, atomic_child_id_dict_pairs + + +def _get_atomic_partners(n_threads, layer_id, node_ids, cross_edge_dict): n_jobs = n_threads * 3 chunk_size = len(node_ids) // n_jobs - multi_args = list(chunks(node_ids, chunk_size)) + multi_args = [] + + for node_ids_chunk in chunks(node_ids, chunk_size): + cross_edge_dict_part = {key: cross_edge_dict[key] for key in node_ids_chunk} + multi_args.append((layer_id, node_ids_chunk, cross_edge_dict_part)) if not len(multi_args): return - atomic_info = mu.multithread_func( - _resolve_helper_1, multi_args, n_threads=n_threads * 3 - ) + atomic_info = mu.multithread_func(_get_atomic_partners_thread, multi_args) atomic_partner_id_dict = {} atomic_child_id_dict_pairs = [] @@ -164,29 +191,22 @@ def _resolve_helper_1(node_ids): atomic_partner_id_dict = {**atomic_partner_id_dict, **partner_d} atomic_child_id_dict_pairs.extend(pairs) - atomic_child_id_dict = defaultdict(np.uint64, dict(atomic_child_id_dict_pairs)) - - def _resolve_helper_2(atomic_partner_id_dict_keys): - for k in atomic_partner_id_dict_keys: - this_atomic_partner_ids = atomic_partner_id_dict[k] - partners = { - atomic_child_id_dict[atomic_cross_id] - for atomic_cross_id in this_atomic_partner_ids - if atomic_child_id_dict[atomic_cross_id] != 0 - } - if len(partners) > 0: - partners = np.array(list(partners), dtype=np.uint64)[:, None] - this_ids = np.array([k] * len(partners), dtype=np.uint64)[:, None] - return np.concatenate([this_ids, partners], axis=1) - return [] + return atomic_partner_id_dict, atomic_child_id_dict_pairs - n_jobs = n_threads * 3 - chunk_size = len(node_ids) // n_jobs - multi_args = list(chunks(list(atomic_partner_id_dict), chunk_size)) - if not len(multi_args): - return - edge_ids = mu.multithread_func(_resolve_helper_2, multi_args, n_threads=n_threads) +def _get_cross_edges(atomic_partner_id_dict, atomic_child_id_dict): + edge_ids = [] + for k in atomic_partner_id_dict: + this_atomic_partner_ids = atomic_partner_id_dict[k] + partners = { + atomic_child_id_dict[atomic_cross_id] + for atomic_cross_id in this_atomic_partner_ids + if atomic_child_id_dict[atomic_cross_id] != 0 + } + if len(partners) > 0: + partners = np.array(list(partners), dtype=np.uint64)[:, None] + this_ids = np.array([k] * len(partners), dtype=np.uint64)[:, None] + edge_ids.append(np.concatenate([this_ids, partners], axis=1)) return np.concatenate(edge_ids) From 523637adf12194c84403d2fc8068596f4de815f0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 11 Oct 2019 19:42:19 +0000 Subject: [PATCH 0272/1097] wip: read cross edges from layer 2 --- pychunkedgraph/backend/__init__.py | 54 ++++++++++++++++++++ pychunkedgraph/backend/chunkedgraph_utils.py | 22 +++++++- 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/backend/__init__.py b/pychunkedgraph/backend/__init__.py index e69de29bb..be941a50c 100644 --- a/pychunkedgraph/backend/__init__.py +++ b/pychunkedgraph/backend/__init__.py @@ -0,0 +1,54 @@ +from typing import Sequence, Dict + +import numpy as np +from cloudvolume import CloudVolume + + +from .chunkedgraph_utils import get_voxels_boundary +from .chunkedgraph_utils import get_chunks_boundary +from .definitions.config import DataSource +from .definitions.config import GraphConfig +from .definitions.config import BigTableConfig +from .chunkedgraph_utils import log_n + + +class GraphMeta: + def __init__( + self, + data_source: DataSource, + graph_config: GraphConfig, + bigtable_config: BigTableConfig, + ): + self._data_source = data_source + self._graph_config = graph_config + self._bigtable_config = bigtable_config + + self._ws_cv = CloudVolume(data_source.watershed) + self._layer_bounds_d = None + self._layer_count = None + + @property + def layer_count(self) -> int: + if self._layer_count: + return self._layer_count + bbox = np.array(self._ws_cv.bounds.to_list()).reshape(2, 3) + n_chunks = ((bbox[1] - bbox[0]) / self._graph_config.chunk_size).astype(np.int) + n_layers = int(np.ceil(log_n(np.max(n_chunks), self._graph_config.fanout))) + 2 + return n_layers + + @property + def layer_chunk_bounds(self) -> Dict: + if self._layer_bounds_d: + return self._layer_bounds_d + + voxels_boundary = get_voxels_boundary(self._ws_cv) + chunks_boundary = get_chunks_boundary( + voxels_boundary, self._graph_config.chunk_size + ) + + layer_bounds_d = {} + for layer in range(2, self.layer_count): + layer_bounds = chunks_boundary / (2 ** (layer - 2)) + layer_bounds_d[layer] = np.ceil(layer_bounds).astype(np.int) + self._layer_bounds_d = layer_bounds_d + return self._layer_bounds_d diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index c6f3d1d61..0b212af34 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -1,10 +1,13 @@ import datetime -from typing import Dict, Iterable, Optional, Union +from typing import Dict +from typing import Iterable +from typing import Optional +from typing import Union +from typing import Sequence import numpy as np import pandas as pd import pytz - from google.cloud import bigtable from google.cloud.bigtable.row_filters import ( TimestampRange, @@ -14,6 +17,8 @@ RowFilterUnion, RowFilter, ) +from cloudvolume import CloudVolume + from .utils import column_keys from .utils import serializers @@ -246,3 +251,16 @@ def compute_chunk_id( layer << layer_offset | x << x_offset | y << y_offset | z << z_offset ) + +def get_voxels_boundary(cv: CloudVolume) -> Sequence[int]: + """returns number of voxels in each dimension""" + cv_bounds = np.array(cv.bounds.to_list()).reshape(2, -1).T + voxel_counts = cv_bounds.copy() + voxel_counts -= cv_bounds[:, 0:1] + voxel_counts = voxel_counts[:, 1] + return voxel_counts + + +def get_chunks_boundary(voxel_boundary, chunk_size): + return np.ceil((voxel_boundary / chunk_size)).astype(np.int) + From bc3672454fbdf63a28533581eb9e740a1e1e8fd0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 11 Oct 2019 20:06:27 +0000 Subject: [PATCH 0273/1097] wip: read cross edges from layer 2 --- pychunkedgraph/backend/__init__.py | 4 +- pychunkedgraph/ingest/ingestionmanager.py | 61 ++--------------------- 2 files changed, 7 insertions(+), 58 deletions(-) diff --git a/pychunkedgraph/backend/__init__.py b/pychunkedgraph/backend/__init__.py index be941a50c..db47f7858 100644 --- a/pychunkedgraph/backend/__init__.py +++ b/pychunkedgraph/backend/__init__.py @@ -1,4 +1,5 @@ -from typing import Sequence, Dict +from typing import Sequence +from typing import Dict import numpy as np from cloudvolume import CloudVolume @@ -38,6 +39,7 @@ def layer_count(self) -> int: @property def layer_chunk_bounds(self) -> Dict: + """number of chunks in each dimension in each layer""" if self._layer_bounds_d: return self._layer_bounds_d diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 2575c298c..51b05cbb6 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -11,6 +11,7 @@ from ..utils.redis import keys as r_keys from ..utils.redis import get_redis_connection from ..utils.redis import get_rq_queue +from ..backend import GraphMeta from ..backend.chunkedgraph_utils import compute_bitmasks from ..backend.chunkedgraph import ChunkedGraph from ..backend.definitions.config import DataSource @@ -33,6 +34,7 @@ def __init__( self._bigtable_config = bigtable_config self._cg = None + self._graph_meta = GraphMeta(data_source, graph_config, bigtable_config) self._ws_cv = CloudVolume(data_source.watershed) self._n_layers = None self._chunk_coords = None @@ -49,16 +51,8 @@ def config(self): return self._config @property - def data_source(self): - return self._data_source - - @property - def graph_config(self): - return self._graph_config - - @property - def bigtable_config(self): - return self._bigtable_config + def graph_meta(self): + return self._graph_meta @property def cg(self): @@ -70,53 +64,6 @@ def cg(self): ) return self._cg - @property - def bounds(self): - if not self._bounds is None: - return self._bounds - cv_bounds = np.array(self._ws_cv.bounds.to_list()).reshape(2, -1).T - self._bounds = cv_bounds.copy() - self._bounds -= cv_bounds[:, 0:1] - return self._bounds - - @property - def chunk_id_bounds(self): - return np.ceil((self.bounds / self._graph_config.chunk_size[:, None])).astype( - np.int - ) - - @property - def layer_chunk_bounds(self) -> Dict: - if self._layer_bounds_d: - return self._layer_bounds_d - layer_bounds_d = {} - for layer in range(2, self.n_layers): - layer_bounds = self.chunk_id_bounds / (2 ** (layer - 2)) - layer_bounds_d[layer] = np.ceil(layer_bounds).astype(np.int) - self._layer_bounds_d = layer_bounds_d - return self._layer_bounds_d - - @property - def chunk_coord_gen(self): - return itertools.product(*[range(*r) for r in self.chunk_id_bounds]) - - @property - def chunk_coords(self): - if not self._chunk_coords is None: - return self._chunk_coords - self._chunk_coords = np.array(list(self.chunk_coord_gen), dtype=np.int) - return self._chunk_coords - - @property - def n_layers(self): - if not self._n_layers: - self._n_layers = get_layer_count( - self._ws_cv, - self._graph_config.chunk_size, - fan_out=self._graph_config.fanout, - ) - return self._n_layers - @property def redis(self): if self._redis: From 0390ec432cd92128aeb8e7dc43602e8664dd69f4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 11 Oct 2019 20:18:41 +0000 Subject: [PATCH 0274/1097] wip: read cross edges from layer 2 --- pychunkedgraph/backend/__init__.py | 12 +++++++ pychunkedgraph/ingest/ingestionmanager.py | 38 +++++++---------------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/pychunkedgraph/backend/__init__.py b/pychunkedgraph/backend/__init__.py index db47f7858..ad0314f39 100644 --- a/pychunkedgraph/backend/__init__.py +++ b/pychunkedgraph/backend/__init__.py @@ -28,6 +28,18 @@ def __init__( self._layer_bounds_d = None self._layer_count = None + @property + def data_source(self): + return self._data_source + + @property + def graph_config(self): + return self._graph_config + + @property + def bigtable_config(self): + return self._bigtable_config + @property def layer_count(self) -> int: if self._layer_count: diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 51b05cbb6..2a06a8a67 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -20,22 +20,13 @@ class IngestionManager(object): - def __init__( - self, - config: IngestConfig, - data_source: DataSource, - graph_config: GraphConfig, - bigtable_config: BigTableConfig, - ): + def __init__(self, config: IngestConfig, graph_meta: GraphMeta): self._config = config - self._data_source = data_source - self._graph_config = graph_config - self._bigtable_config = bigtable_config self._cg = None - self._graph_meta = GraphMeta(data_source, graph_config, bigtable_config) - self._ws_cv = CloudVolume(data_source.watershed) + self._graph_meta = graph_meta + self._ws_cv = CloudVolume(graph_meta.data_source.watershed) self._n_layers = None self._chunk_coords = None self._layer_bounds_d = None @@ -58,9 +49,9 @@ def graph_meta(self): def cg(self): if self._cg is None: self._cg = ChunkedGraph( - self._graph_config.graph_id, - self._bigtable_config.project_id, - self._bigtable_config.instance_id, + self._graph_meta.graph_config.graph_id, + self._graph_meta.bigtable_config.project_id, + self._graph_meta.bigtable_config.instance_id, ) return self._cg @@ -76,7 +67,7 @@ def redis(self): @property def edge_dtype(self): - if self._data_source.data_version == 4: + if self._graph_meta.data_source.data_version == 4: dtype = [ ("sv1", np.uint64), ("sv2", np.uint64), @@ -87,7 +78,7 @@ def edge_dtype(self): ("aff_z", np.float32), ("area_z", np.uint64), ] - elif self._data_source.data_version == 3: + elif self._graph_meta.data_source.data_version == 3: dtype = [ ("sv1", np.uint64), ("sv2", np.uint64), @@ -98,7 +89,7 @@ def edge_dtype(self): ("aff_z", np.float64), ("area_z", np.uint64), ] - elif self._data_source.data_version == 2: + elif self._graph_meta.data_source.data_version == 2: dtype = [ ("sv1", np.uint64), ("sv2", np.uint64), @@ -120,12 +111,7 @@ def get_task_queue(self, q_name): return self._task_queues[q_name] def get_serialized_info(self, pickled=False): - info = { - "config": self._config, - "data_source": self._data_source, - "graph_config": self._graph_config, - "bigtable_config": self._bigtable_config, - } + info = {"config": self._config, "graph_meta": self._graph_meta} if pickled: return pickle.dumps(info) return info @@ -134,8 +120,8 @@ def is_out_of_bounds(self, chunk_coordinate): if not self._bitmasks: self._bitmasks = compute_bitmasks( self._n_layers, - self._graph_config.fanout, - s_bits_atomic_layer=self._graph_config.s_bits_atomic_layer, + self._graph_meta.graph_config.fanout, + s_bits_atomic_layer=self._graph_meta.graph_config.s_bits_atomic_layer, ) return np.any(chunk_coordinate < 0) or np.any( chunk_coordinate > 2 ** self._bitmasks[1] From b9033337fb5391df76b8ec2393217c1afc103b8d Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 11 Oct 2019 22:49:14 +0000 Subject: [PATCH 0275/1097] renames and refactors --- pychunkedgraph/backend/__init__.py | 4 +-- pychunkedgraph/backend/chunkedgraph_edits.py | 2 +- pychunkedgraph/backend/chunkedgraph_utils.py | 18 +++++++++++ pychunkedgraph/backend/utils/general.py | 21 ------------- pychunkedgraph/ingest/ingestionmanager.py | 30 +++++++++---------- .../ingest/initialization/abstract_layers.py | 4 +-- pychunkedgraph/utils/general.py | 2 +- 7 files changed, 39 insertions(+), 42 deletions(-) delete mode 100644 pychunkedgraph/backend/utils/general.py diff --git a/pychunkedgraph/backend/__init__.py b/pychunkedgraph/backend/__init__.py index ad0314f39..2a7477ce8 100644 --- a/pychunkedgraph/backend/__init__.py +++ b/pychunkedgraph/backend/__init__.py @@ -13,7 +13,7 @@ from .chunkedgraph_utils import log_n -class GraphMeta: +class ChunkedGraphMeta: def __init__( self, data_source: DataSource, @@ -51,7 +51,7 @@ def layer_count(self) -> int: @property def layer_chunk_bounds(self) -> Dict: - """number of chunks in each dimension in each layer""" + """number of chunks in each dimension in each layer {layer: count}""" if self._layer_bounds_d: return self._layer_bounds_d diff --git a/pychunkedgraph/backend/chunkedgraph_edits.py b/pychunkedgraph/backend/chunkedgraph_edits.py index b6f9ae384..cffd44041 100644 --- a/pychunkedgraph/backend/chunkedgraph_edits.py +++ b/pychunkedgraph/backend/chunkedgraph_edits.py @@ -10,8 +10,8 @@ from . import flatgraph_utils from .chunkedgraph_utils import combine_cross_chunk_edge_dicts +from .chunkedgraph_utils import get_bounding_box from .utils import column_keys, serializers -from .utils.general import get_bounding_box from .utils.edge_utils import filter_fake_edges from .utils.edge_utils import map_edges_to_chunks from .utils.edge_utils import get_linking_edges diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index 0b212af34..17e6fd512 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -227,6 +227,24 @@ def get_valid_timestamp(timestamp): return get_google_compatible_time_stamp(timestamp, round_up=False) +def get_bounding_box( + source_coords: Sequence[Sequence[int]], + sink_coords: Sequence[Sequence[int]], + bb_offset: Tuple[int, int, int] = (120, 120, 12), +): + if not source_coords: + return None + bb_offset = np.array(list(bb_offset)) + source_coords = np.array(source_coords) + sink_coords = np.array(sink_coords) + + coords = np.concatenate([source_coords, sink_coords]) + bounding_box = [np.min(coords, axis=0), np.max(coords, axis=0)] + bounding_box[0] -= bb_offset + bounding_box[1] += bb_offset + return bounding_box + + def compute_chunk_id( layer: int, x: int, diff --git a/pychunkedgraph/backend/utils/general.py b/pychunkedgraph/backend/utils/general.py deleted file mode 100644 index ab8ee03b5..000000000 --- a/pychunkedgraph/backend/utils/general.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Sequence, Tuple - -import numpy as np - - -def get_bounding_box( - source_coords: Sequence[Sequence[int]], - sink_coords: Sequence[Sequence[int]], - bb_offset: Tuple[int, int, int] = (120, 120, 12), -): - if not source_coords: - return None - bb_offset = np.array(list(bb_offset)) - source_coords = np.array(source_coords) - sink_coords = np.array(sink_coords) - - coords = np.concatenate([source_coords, sink_coords]) - bounding_box = [np.min(coords, axis=0), np.max(coords, axis=0)] - bounding_box[0] -= bb_offset - bounding_box[1] += bb_offset - return bounding_box diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 2a06a8a67..b3819225b 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -11,7 +11,7 @@ from ..utils.redis import keys as r_keys from ..utils.redis import get_redis_connection from ..utils.redis import get_rq_queue -from ..backend import GraphMeta +from ..backend import ChunkedGraphMeta from ..backend.chunkedgraph_utils import compute_bitmasks from ..backend.chunkedgraph import ChunkedGraph from ..backend.definitions.config import DataSource @@ -20,13 +20,13 @@ class IngestionManager(object): - def __init__(self, config: IngestConfig, graph_meta: GraphMeta): + def __init__(self, config: IngestConfig, chunkedgraph_meta: ChunkedGraphMeta): self._config = config self._cg = None - self._graph_meta = graph_meta - self._ws_cv = CloudVolume(graph_meta.data_source.watershed) + self._chunkedgraph_meta = chunkedgraph_meta + self._ws_cv = CloudVolume(chunkedgraph_meta.data_source.watershed) self._n_layers = None self._chunk_coords = None self._layer_bounds_d = None @@ -42,16 +42,16 @@ def config(self): return self._config @property - def graph_meta(self): - return self._graph_meta + def chunkedgraph_meta(self): + return self._chunkedgraph_meta @property def cg(self): if self._cg is None: self._cg = ChunkedGraph( - self._graph_meta.graph_config.graph_id, - self._graph_meta.bigtable_config.project_id, - self._graph_meta.bigtable_config.instance_id, + self._chunkedgraph_meta.graph_config.graph_id, + self._chunkedgraph_meta.bigtable_config.project_id, + self._chunkedgraph_meta.bigtable_config.instance_id, ) return self._cg @@ -67,7 +67,7 @@ def redis(self): @property def edge_dtype(self): - if self._graph_meta.data_source.data_version == 4: + if self._chunkedgraph_meta.data_source.data_version == 4: dtype = [ ("sv1", np.uint64), ("sv2", np.uint64), @@ -78,7 +78,7 @@ def edge_dtype(self): ("aff_z", np.float32), ("area_z", np.uint64), ] - elif self._graph_meta.data_source.data_version == 3: + elif self._chunkedgraph_meta.data_source.data_version == 3: dtype = [ ("sv1", np.uint64), ("sv2", np.uint64), @@ -89,7 +89,7 @@ def edge_dtype(self): ("aff_z", np.float64), ("area_z", np.uint64), ] - elif self._graph_meta.data_source.data_version == 2: + elif self._chunkedgraph_meta.data_source.data_version == 2: dtype = [ ("sv1", np.uint64), ("sv2", np.uint64), @@ -111,7 +111,7 @@ def get_task_queue(self, q_name): return self._task_queues[q_name] def get_serialized_info(self, pickled=False): - info = {"config": self._config, "graph_meta": self._graph_meta} + info = {"config": self._config, "chunkedgraph_meta": self._chunkedgraph_meta} if pickled: return pickle.dumps(info) return info @@ -120,8 +120,8 @@ def is_out_of_bounds(self, chunk_coordinate): if not self._bitmasks: self._bitmasks = compute_bitmasks( self._n_layers, - self._graph_meta.graph_config.fanout, - s_bits_atomic_layer=self._graph_meta.graph_config.s_bits_atomic_layer, + self._chunkedgraph_meta.graph_config.fanout, + s_bits_atomic_layer=self._chunkedgraph_meta.graph_config.s_bits_atomic_layer, ) return np.any(chunk_coordinate < 0) or np.any( chunk_coordinate > 2 ** self._bitmasks[1] diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 5319be130..c9ae0753b 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -10,7 +10,7 @@ import numpy as np from multiwrapper import multiprocessing_utils as mu -from ...utils.general import chunks +from ...utils.general import chunked from ...backend import flatgraph_utils from ...backend.chunkedgraph import ChunkedGraph from ...backend.chunkedgraph_utils import get_valid_timestamp @@ -177,7 +177,7 @@ def _get_atomic_partners(n_threads, layer_id, node_ids, cross_edge_dict): chunk_size = len(node_ids) // n_jobs multi_args = [] - for node_ids_chunk in chunks(node_ids, chunk_size): + for node_ids_chunk in chunked(node_ids, chunk_size): cross_edge_dict_part = {key: cross_edge_dict[key] for key in node_ids_chunk} multi_args.append((layer_id, node_ids_chunk, cross_edge_dict_part)) if not len(multi_args): diff --git a/pychunkedgraph/utils/general.py b/pychunkedgraph/utils/general.py index 682f7b9cb..890be43b8 100644 --- a/pychunkedgraph/utils/general.py +++ b/pychunkedgraph/utils/general.py @@ -22,7 +22,7 @@ def reverse_dictionary(dictionary): return {k: v for k, v in zip(vals, keys)} -def chunks(l: Sequence, n: int): +def chunked(l: Sequence, n: int): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] From 3a16285362a490df5a67577bc40b9412fcb37050 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 12 Oct 2019 00:13:52 +0000 Subject: [PATCH 0276/1097] wip --- pychunkedgraph/backend/chunkedgraph_utils.py | 20 +++++++++++++++++++ .../ingest/initialization/helpers.py | 20 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 pychunkedgraph/ingest/initialization/helpers.py diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index 17e6fd512..7b1e0694f 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -4,6 +4,8 @@ from typing import Optional from typing import Union from typing import Sequence +from typing import Tuple +from itertools import product import numpy as np import pandas as pd @@ -19,6 +21,7 @@ ) from cloudvolume import CloudVolume +from . import ChunkedGraphMeta from .utils import column_keys from .utils import serializers @@ -245,6 +248,23 @@ def get_bounding_box( return bounding_box +def get_children_chunk_coords( + chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] +) -> np.ndarray: + chunk_coords = np.array(chunk_coords, dtype=int) + children_layer = layer - 1 + layer_boundaries = chunkedgraph_meta.layer_chunk_bounds[children_layer] + children_coords = [] + + for dcoord in product(*[range(chunkedgraph_meta.graph_config.fanout)] * 3): + dcoord = np.array(dcoord, dtype=int) + child_coords = chunk_coords * chunkedgraph_meta.graph_config.fanout + dcoord + check_bounds = np.less(child_coords, layer_boundaries) + if np.all(check_bounds): + children_coords.append(child_coords) + return children_coords + + def compute_chunk_id( layer: int, x: int, diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py new file mode 100644 index 000000000..1c7455166 --- /dev/null +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -0,0 +1,20 @@ +from typing import Sequence + +import numpy as np + +from ...backend import ChunkedGraphMeta +from ...backend.chunkedgraph_utils import get_children_chunk_coords + + +def get_touching_atomic_chunks( + chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] +): + """get atomic chunks along touching faces of children chunks of a parent chunk""" + + parent_atomic_chunk_count = chunkedgraph_meta.graph_config.fanout ** (layer - 2) + + layer2_chunk_boundaries = chunkedgraph_meta.layer_chunk_bounds[2] + children_layer = layer - 1 + + + From b7f1c8bc0ee6a9f06e61355e74a954e2b5879aca Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 12 Oct 2019 19:18:51 +0000 Subject: [PATCH 0277/1097] get relevant atomic chunk coords --- .../ingest/initialization/helpers.py | 43 ++++++++++++++++--- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index 1c7455166..bede2a73d 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -10,11 +10,40 @@ def get_touching_atomic_chunks( chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] ): """get atomic chunks along touching faces of children chunks of a parent chunk""" - - parent_atomic_chunk_count = chunkedgraph_meta.graph_config.fanout ** (layer - 2) - - layer2_chunk_boundaries = chunkedgraph_meta.layer_chunk_bounds[2] - children_layer = layer - 1 - - + chunk_coords = np.array(chunk_coords, dtype=int) + touching_atomic_chunks = set() + + atomic_chunk_count = chunkedgraph_meta.graph_config.fanout ** (layer - 2) + layer2_chunk_bounds = chunkedgraph_meta.layer_chunk_bounds[2] + + chunk_offset = chunk_coords * atomic_chunk_count + mid = (atomic_chunk_count // 2) - 1 + + # relevant chunks along touching planes at center + for axis_1 in range(atomic_chunk_count): + for axis_2 in range(atomic_chunk_count): + # x-y plane + chunk_1 = chunk_offset + np.array((axis_1, axis_2, mid)) + chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) + touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.add(chunk_2) + + # x-z plane + chunk_1 = chunk_offset + np.array((axis_1, mid, axis_2)) + chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) + touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.add(chunk_2) + + # y-z plane + chunk_1 = chunk_offset + np.array((mid, axis_1, axis_2)) + chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) + touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.add(chunk_2) + + result = [] + for coords in touching_atomic_chunks: + if np.all(np.less(coords, layer2_chunk_bounds)): + result.append(coords) + + return np.array(result, dtype=int) From ae71661472dbc4a3c208ad951c399360cca33247 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 12 Oct 2019 23:48:48 +0000 Subject: [PATCH 0278/1097] wip: read relevant atomic chunk cross edges --- .../ingest/initialization/abstract_layers.py | 154 ++++++------------ .../ingest/initialization/helpers.py | 39 +++-- 2 files changed, 68 insertions(+), 125 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index c9ae0753b..1b8528575 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -10,6 +10,7 @@ import numpy as np from multiwrapper import multiprocessing_utils as mu +from .helpers import get_touching_atomic_chunks from ...utils.general import chunked from ...backend import flatgraph_utils from ...backend.chunkedgraph import ChunkedGraph @@ -30,7 +31,7 @@ def add_layer( parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) start = time.time() - children_ids, cross_edge_dict = _read_children_chunks( + children_ids = _read_children_chunks( n_threads, cg_instance, layer_id, children_coords ) print(f"_read_children_chunks: {time.time()-start}") @@ -71,61 +72,49 @@ def add_layer( def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): cg_info = cg_instance.get_serialized_info() del cg_info["credentials"] + multi_args = [] for child_coord in children_coords: - multi_args.append((cg_info, layer_id, child_coord)) - chunk_info = mu.multithread_func( - _process_chunk_thread, multi_args, n_threads=min(n_threads, len(multi_args)) + multi_args.append((cg_info, layer_id-1, child_coord)) + children_ids = mu.multithread_func( + _read_chunk_thread, multi_args, n_threads=len(multi_args) ) + return np.concatenate(children_ids) - children_ids = [] - cross_edge_dict = {} - for ids, cross_edge_d in chunk_info: - children_ids.append(ids) - cross_edge_dict = {**cross_edge_dict, **cross_edge_d} - return np.concatenate(children_ids), cross_edge_dict +def _get_cross_edges(cg_instance, layer_id, chunk_coord): + layer2_chunks = get_touching_atomic_chunks(cg_instance.meta, layer_id, chunk_coord) -def _process_chunk_thread(args): - cg_info, layer_id, chunk_coord = args - cg_instance = ChunkedGraph(**cg_info) - row_ids, cross_edge_columns_d = _read_chunk(cg_instance, layer_id, chunk_coord) + cg_info = cg_instance.get_serialized_info() + del cg_info["credentials"] + + multi_args = [] + for layer2_chunk in layer2_chunks: + multi_args.append((cg_info, 2, layer2_chunk)) + children_ids = mu.multithread_func( + _read_chunk_thread, multi_args, n_threads=len(multi_args) + ) + return np.concatenate(children_ids) - cross_edge_dict = defaultdict(dict) - for row_id in row_ids: - if row_id in cross_edge_columns_d: - cell_family = cross_edge_columns_d[row_id] - for l in range(layer_id - 1, cg_instance.n_layers): - cross_edges_key = column_keys.Connectivity.CrossChunkEdge[l] - if cross_edges_key in cell_family: - cross_edge_dict[row_id][l] = cell_family[cross_edges_key][0].value - return row_ids, cross_edge_dict +def _read_chunk_thread(args): + cg_info, layer_id, chunk_coord = args + cg_instance = ChunkedGraph(**cg_info) + return _read_chunk(cg_instance, layer_id, chunk_coord) def _read_chunk(cg_instance, layer_id, chunk_coord): x, y, z = chunk_coord - columns = [column_keys.Hierarchy.Child] + [ - column_keys.Connectivity.CrossChunkEdge[l] - for l in range(layer_id - 1, cg_instance.n_layers) - ] - range_read = cg_instance.range_read_chunk(layer_id - 1, x, y, z, columns=columns) + range_read = cg_instance.range_read_chunk( + layer_id, x, y, z, columns=column_keys.Hierarchy.Child + ) # Deserialize row keys and store child with highest id for comparison row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - cross_edge_columns_d = {} max_child_ids = [] - for row_id, row_data in range_read.items(): - cross_edge_columns = { - k: v - for (k, v) in row_data.items() - if k.family_id == cg_instance.cross_edge_family_id - } - if cross_edge_columns: - cross_edge_columns_d[row_id] = cross_edge_columns - node_child_ids = row_data[column_keys.Hierarchy.Child][0].value - max_child_ids.append(np.max(node_child_ids)) + for row_data in range_read.values(): + max_child_ids.append(np.max(row_data[0].value)) sorting = np.argsort(segment_ids)[::-1] row_ids = row_ids[sorting] @@ -137,78 +126,33 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] counter[max_child_ids[i_row]] += 1 row_ids = row_ids[max_child_ids_occ_so_far == 0] - return row_ids, cross_edge_columns_d - + return row_ids -def _resolve_cross_chunk_edges(n_threads, layer_id, node_ids, cross_edge_dict) -> None: - cross_edge_dict = defaultdict(dict, cross_edge_dict) - start = time.time() - atomic_partner_id_dict, atomic_child_id_dict_pairs = _get_atomic_partners( - n_threads, layer_id, node_ids, cross_edge_dict +def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): + x, y, z = chunk_coord + range_read = cg_instance.range_read_chunk( + 2, x, y, z, columns=column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] ) - print(f"_get_atomic_partners: {time.time()-start}") - atomic_child_id_dict = defaultdict(np.uint64, dict(atomic_child_id_dict_pairs)) - - start = time.time() - cross_edges = _get_cross_edges(atomic_partner_id_dict, atomic_child_id_dict) - print(f"_get_cross_edges: {time.time()-start}") - return cross_edges - - -def _get_atomic_partners_thread(args): - layer_id, node_ids, cross_edge_dict = args - atomic_partner_id_dict = {} - atomic_child_id_dict_pairs = [] - for node_id in node_ids: - if int(layer_id - 1) in cross_edge_dict[node_id]: - atomic_cross_edges = cross_edge_dict[node_id][layer_id - 1] - if len(atomic_cross_edges) > 0: - atomic_partner_id_dict[node_id] = atomic_cross_edges[:, 1] - new_pairs = zip( - atomic_cross_edges[:, 0], [node_id] * len(atomic_cross_edges) - ) - atomic_child_id_dict_pairs.extend(new_pairs) - return atomic_partner_id_dict, atomic_child_id_dict_pairs + # Deserialize row keys and store child with highest id for comparison + row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) + segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) + max_child_ids = [] + for row_data in range_read.values(): + max_child_ids.append(np.max(row_data[0].value)) -def _get_atomic_partners(n_threads, layer_id, node_ids, cross_edge_dict): - n_jobs = n_threads * 3 - chunk_size = len(node_ids) // n_jobs - multi_args = [] - - for node_ids_chunk in chunked(node_ids, chunk_size): - cross_edge_dict_part = {key: cross_edge_dict[key] for key in node_ids_chunk} - multi_args.append((layer_id, node_ids_chunk, cross_edge_dict_part)) - if not len(multi_args): - return - - atomic_info = mu.multithread_func(_get_atomic_partners_thread, multi_args) - - atomic_partner_id_dict = {} - atomic_child_id_dict_pairs = [] - for partner_d, pairs in atomic_info: - atomic_partner_id_dict = {**atomic_partner_id_dict, **partner_d} - atomic_child_id_dict_pairs.extend(pairs) - - return atomic_partner_id_dict, atomic_child_id_dict_pairs - - -def _get_cross_edges(atomic_partner_id_dict, atomic_child_id_dict): - edge_ids = [] - for k in atomic_partner_id_dict: - this_atomic_partner_ids = atomic_partner_id_dict[k] - partners = { - atomic_child_id_dict[atomic_cross_id] - for atomic_cross_id in this_atomic_partner_ids - if atomic_child_id_dict[atomic_cross_id] != 0 - } - if len(partners) > 0: - partners = np.array(list(partners), dtype=np.uint64)[:, None] - this_ids = np.array([k] * len(partners), dtype=np.uint64)[:, None] - edge_ids.append(np.concatenate([this_ids, partners], axis=1)) - return np.concatenate(edge_ids) + sorting = np.argsort(segment_ids)[::-1] + row_ids = row_ids[sorting] + max_child_ids = np.array(max_child_ids, dtype=np.uint64)[sorting] + counter = defaultdict(int) + max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) + for i_row in range(len(max_child_ids)): + max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] + counter[max_child_ids[i_row]] += 1 + row_ids = row_ids[max_child_ids_occ_so_far == 0] + return row_ids def _write_out_connected_components( cg_instance, layer_id, parent_chunk_id, ccs, cross_edge_dict, graph_ids, time_stamp diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index bede2a73d..32bc5e82e 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -1,5 +1,5 @@ from typing import Sequence - +from itertools import product import numpy as np from ...backend import ChunkedGraphMeta @@ -20,25 +20,24 @@ def get_touching_atomic_chunks( mid = (atomic_chunk_count // 2) - 1 # relevant chunks along touching planes at center - for axis_1 in range(atomic_chunk_count): - for axis_2 in range(atomic_chunk_count): - # x-y plane - chunk_1 = chunk_offset + np.array((axis_1, axis_2, mid)) - chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) - touching_atomic_chunks.add(chunk_1) - touching_atomic_chunks.add(chunk_2) - - # x-z plane - chunk_1 = chunk_offset + np.array((axis_1, mid, axis_2)) - chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) - touching_atomic_chunks.add(chunk_1) - touching_atomic_chunks.add(chunk_2) - - # y-z plane - chunk_1 = chunk_offset + np.array((mid, axis_1, axis_2)) - chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) - touching_atomic_chunks.add(chunk_1) - touching_atomic_chunks.add(chunk_2) + for axis_1, axis_2 in product(*[range(atomic_chunk_count)] * 2): + # x-y plane + chunk_1 = chunk_offset + np.array((axis_1, axis_2, mid)) + chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) + touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.add(chunk_2) + + # x-z plane + chunk_1 = chunk_offset + np.array((axis_1, mid, axis_2)) + chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) + touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.add(chunk_2) + + # y-z plane + chunk_1 = chunk_offset + np.array((mid, axis_1, axis_2)) + chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) + touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.add(chunk_2) result = [] for coords in touching_atomic_chunks: From 930df8b2704f580c7ad54d76500b397933ba2d98 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 13 Oct 2019 22:38:41 +0000 Subject: [PATCH 0279/1097] wip: higher layer ingest speed up --- .../ingest/initialization/abstract_layers.py | 22 +++++-------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 1b8528575..4a11faa01 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -135,24 +135,12 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): 2, x, y, z, columns=column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] ) - # Deserialize row keys and store child with highest id for comparison - row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) - segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - max_child_ids = [] - for row_data in range_read.values(): - max_child_ids.append(np.max(row_data[0].value)) + cross_edges = [r[0].value for r in range_read.values()] + cross_edges = np.concatenate(cross_edges) - sorting = np.argsort(segment_ids)[::-1] - row_ids = row_ids[sorting] - max_child_ids = np.array(max_child_ids, dtype=np.uint64)[sorting] - - counter = defaultdict(int) - max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) - for i_row in range(len(max_child_ids)): - max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] - counter[max_child_ids[i_row]] += 1 - row_ids = row_ids[max_child_ids_occ_so_far == 0] - return row_ids + sv_ids1 = cross_edges[:,0] + sv_ids2 = cross_edges[:,1] + return def _write_out_connected_components( cg_instance, layer_id, parent_chunk_id, ccs, cross_edge_dict, graph_ids, time_stamp From 10db4bb3b3b06828e0c46899cdad0c7f95ad93bc Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 14 Oct 2019 15:34:57 +0000 Subject: [PATCH 0280/1097] wip --- .../ingest/initialization/abstract_layers.py | 125 +++++++++++------- .../ingest/initialization/helpers.py | 23 ++-- 2 files changed, 93 insertions(+), 55 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 4a11faa01..1a5bb903b 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -28,7 +28,7 @@ def add_layer( n_threads: int = 32, ) -> None: x, y, z = parent_coords - parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) + # parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) start = time.time() children_ids = _read_children_chunks( @@ -36,12 +36,9 @@ def add_layer( ) print(f"_read_children_chunks: {time.time()-start}") - # cross_edge_dict, children_ids = _process_chunks(cg_instance, layer_id, children_coords) start = time.time() - edge_ids = _resolve_cross_chunk_edges( - n_threads, layer_id, children_ids, cross_edge_dict - ) - print(f"_resolve_cross_chunk_edges: {time.time()-start}") + edge_ids = _get_cross_edges(n_threads, layer_id, parent_coords) + print(f"_get_cross_edges: {time.time()-start}") print(len(children_ids), len(edge_ids)) # Extract connected components @@ -50,54 +47,39 @@ def add_layer( add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T edge_ids = np.concatenate([edge_ids, add_edge_ids]) - graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( - edge_ids, make_directed=True - ) - - ccs = flatgraph_utils.connected_components(graph) - start = time.time() - _write_out_connected_components( - cg_instance, - layer_id, - parent_chunk_id, - ccs, - cross_edge_dict, - graph_ids, - time_stamp, - ) - print(f"_write_out_connected_components: {time.time()-start}") + # graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( + # edge_ids, make_directed=True + # ) + + # ccs = flatgraph_utils.connected_components(graph) + # start = time.time() + # _write_out_connected_components( + # cg_instance, + # layer_id, + # parent_chunk_id, + # ccs, + # cross_edge_dict, + # graph_ids, + # time_stamp, + # ) + # print(f"_write_out_connected_components: {time.time()-start}") return f"{layer_id}_{'_'.join(map(str, (x, y, z)))}" def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): cg_info = cg_instance.get_serialized_info() del cg_info["credentials"] - + multi_args = [] for child_coord in children_coords: - multi_args.append((cg_info, layer_id-1, child_coord)) + multi_args.append((cg_info, layer_id - 1, child_coord)) children_ids = mu.multithread_func( - _read_chunk_thread, multi_args, n_threads=len(multi_args) + _read_chunk_helper, multi_args, n_threads=len(multi_args) ) return np.concatenate(children_ids) -def _get_cross_edges(cg_instance, layer_id, chunk_coord): - layer2_chunks = get_touching_atomic_chunks(cg_instance.meta, layer_id, chunk_coord) - - cg_info = cg_instance.get_serialized_info() - del cg_info["credentials"] - - multi_args = [] - for layer2_chunk in layer2_chunks: - multi_args.append((cg_info, 2, layer2_chunk)) - children_ids = mu.multithread_func( - _read_chunk_thread, multi_args, n_threads=len(multi_args) - ) - return np.concatenate(children_ids) - - -def _read_chunk_thread(args): +def _read_chunk_helper(args): cg_info, layer_id, chunk_coord = args cg_instance = ChunkedGraph(**cg_info) return _read_chunk(cg_instance, layer_id, chunk_coord) @@ -129,18 +111,69 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): return row_ids +def _get_cross_edges(cg_instance, layer_id, chunk_coord): + start = time.time() + layer2_chunks = get_touching_atomic_chunks( + cg_instance.meta, layer_id, chunk_coord, include_both=False + ) + print(f"get_touching_atomic_chunks: {time.time()-start}") + + print(f"touching chunks count (1 side): {len(layer2_chunks)}") + + cg_info = cg_instance.get_serialized_info() + del cg_info["credentials"] + + start = time.time() + multi_args = [] + for layer2_chunk in layer2_chunks: + multi_args.append((cg_info, layer2_chunk, layer_id)) + cross_edges = mu.multithread_func( + _read_atomic_chunk_cross_edges_helper, multi_args, n_threads=len(multi_args) + ) + print(f"_read_atomic_chunk_cross_edges: {time.time()-start}") + + return np.concatenate(cross_edges) + + +def _read_atomic_chunk_cross_edges_helper(args): + cg_info, layer2_chunk, cross_edge_layer = args + cg_instance = ChunkedGraph(**cg_info) + return _read_atomic_chunk_cross_edges(cg_instance, layer2_chunk, cross_edge_layer) + + def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): x, y, z = chunk_coord range_read = cg_instance.range_read_chunk( 2, x, y, z, columns=column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] ) - cross_edges = [r[0].value for r in range_read.values()] - cross_edges = np.concatenate(cross_edges) + parent_neighboring_chunk_supervoxels_d = defaultdict(list) + for l2_id, row_data in range_read.items(): + edges = row_data[0].value + parent_neighboring_chunk_supervoxels_d[l2_id] = edges[:, 1] + + l2_ids = list(parent_neighboring_chunk_supervoxels_d.keys()) + segment_ids = cg_instance.get_roots(l2_ids, stop_layer=cross_edge_layer) + + cross_edges = [] + for i, l2_id in enumerate(parent_neighboring_chunk_supervoxels_d): + segment_id = segment_ids[i] + neighboring_supervoxels = parent_neighboring_chunk_supervoxels_d[l2_id] + neighboring_segment_ids = cg_instance.get_roots( + neighboring_supervoxels, stop_layer=cross_edge_layer + ) + + edges = np.vstack( + [ + np.array([segment_id] * len(neighboring_supervoxels)), + neighboring_segment_ids, + ] + ).T + cross_edges.append(edges) + + cross_edges = np.unique(np.concatenate(cross_edges), axis=0) + return cross_edges - sv_ids1 = cross_edges[:,0] - sv_ids2 = cross_edges[:,1] - return def _write_out_connected_components( cg_instance, layer_id, parent_chunk_id, ccs, cross_edge_dict, graph_ids, time_stamp diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index 32bc5e82e..2bb3fe198 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -7,7 +7,10 @@ def get_touching_atomic_chunks( - chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] + chunkedgraph_meta: ChunkedGraphMeta, + layer: int, + chunk_coords: Sequence[int], + include_both=True, ): """get atomic chunks along touching faces of children chunks of a parent chunk""" chunk_coords = np.array(chunk_coords, dtype=int) @@ -23,21 +26,23 @@ def get_touching_atomic_chunks( for axis_1, axis_2 in product(*[range(atomic_chunk_count)] * 2): # x-y plane chunk_1 = chunk_offset + np.array((axis_1, axis_2, mid)) - chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) touching_atomic_chunks.add(chunk_1) - touching_atomic_chunks.add(chunk_2) - # x-z plane chunk_1 = chunk_offset + np.array((axis_1, mid, axis_2)) - chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) touching_atomic_chunks.add(chunk_1) - touching_atomic_chunks.add(chunk_2) - # y-z plane chunk_1 = chunk_offset + np.array((mid, axis_1, axis_2)) - chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) touching_atomic_chunks.add(chunk_1) - touching_atomic_chunks.add(chunk_2) + + if include_both: + chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) + touching_atomic_chunks.add(chunk_2) + + chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) + touching_atomic_chunks.add(chunk_2) + + chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) + touching_atomic_chunks.add(chunk_2) result = [] for coords in touching_atomic_chunks: From 6cda58896366c79a8e708fd3515079e9eed410ab Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 14 Oct 2019 17:48:55 +0000 Subject: [PATCH 0281/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 5 +++ pychunkedgraph/backend/chunkedgraph_utils.py | 31 +++++++++---------- pychunkedgraph/ingest/ingestionmanager.py | 1 + .../ingest/initialization/abstract_layers.py | 8 ++--- .../ingest/initialization/helpers.py | 1 - 5 files changed, 24 insertions(+), 22 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 160dd7758..6c7c147fd 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -33,6 +33,7 @@ from google.cloud.bigtable.column_family import MaxVersionsGCRule from . import ( + ChunkedGraphMeta, chunkedgraph_exceptions as cg_exceptions, chunkedgraph_edits as cg_edits, cutting, @@ -97,6 +98,7 @@ def __init__( dataset_info: Optional[object] = None, is_new: bool = False, logger: Optional[logging.Logger] = None, + meta: Optional[ChunkedGraphMeta] = None ) -> None: if logger is None: @@ -187,6 +189,8 @@ def __init__( self._get_chunk_layer_vec = np.vectorize(self.get_chunk_layer) self._get_chunk_id_vec = np.vectorize(self.get_chunk_id) + self.meta = meta + @property def client(self) -> bigtable.Client: return self._client @@ -380,6 +384,7 @@ def get_serialized_info(self): "table_id": self.table_id, "instance_id": self.instance_id, "project_id": self.project_id, + "meta": self.meta, } try: info["credentials"] = self.client.credentials diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index 7b1e0694f..fe4bede01 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -21,7 +21,6 @@ ) from cloudvolume import CloudVolume -from . import ChunkedGraphMeta from .utils import column_keys from .utils import serializers @@ -248,21 +247,21 @@ def get_bounding_box( return bounding_box -def get_children_chunk_coords( - chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] -) -> np.ndarray: - chunk_coords = np.array(chunk_coords, dtype=int) - children_layer = layer - 1 - layer_boundaries = chunkedgraph_meta.layer_chunk_bounds[children_layer] - children_coords = [] - - for dcoord in product(*[range(chunkedgraph_meta.graph_config.fanout)] * 3): - dcoord = np.array(dcoord, dtype=int) - child_coords = chunk_coords * chunkedgraph_meta.graph_config.fanout + dcoord - check_bounds = np.less(child_coords, layer_boundaries) - if np.all(check_bounds): - children_coords.append(child_coords) - return children_coords +# def get_children_chunk_coords( +# chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] +# ) -> np.ndarray: +# chunk_coords = np.array(chunk_coords, dtype=int) +# children_layer = layer - 1 +# layer_boundaries = chunkedgraph_meta.layer_chunk_bounds[children_layer] +# children_coords = [] + +# for dcoord in product(*[range(chunkedgraph_meta.graph_config.fanout)] * 3): +# dcoord = np.array(dcoord, dtype=int) +# child_coords = chunk_coords * chunkedgraph_meta.graph_config.fanout + dcoord +# check_bounds = np.less(child_coords, layer_boundaries) +# if np.all(check_bounds): +# children_coords.append(child_coords) +# return children_coords def compute_chunk_id( diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index b3819225b..00c92c443 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -52,6 +52,7 @@ def cg(self): self._chunkedgraph_meta.graph_config.graph_id, self._chunkedgraph_meta.bigtable_config.project_id, self._chunkedgraph_meta.bigtable_config.instance_id, + meta=self._chunkedgraph_meta, ) return self._cg diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 1a5bb903b..94e67b4d5 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -31,13 +31,11 @@ def add_layer( # parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) start = time.time() - children_ids = _read_children_chunks( - n_threads, cg_instance, layer_id, children_coords - ) + children_ids = _read_children_chunks(cg_instance, layer_id, children_coords) print(f"_read_children_chunks: {time.time()-start}") start = time.time() - edge_ids = _get_cross_edges(n_threads, layer_id, parent_coords) + edge_ids = _get_cross_edges(cg_instance, layer_id, parent_coords) print(f"_get_cross_edges: {time.time()-start}") print(len(children_ids), len(edge_ids)) @@ -66,7 +64,7 @@ def add_layer( return f"{layer_id}_{'_'.join(map(str, (x, y, z)))}" -def _read_children_chunks(n_threads, cg_instance, layer_id, children_coords): +def _read_children_chunks(cg_instance, layer_id, children_coords): cg_info = cg_instance.get_serialized_info() del cg_info["credentials"] diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index 2bb3fe198..b488c5051 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -3,7 +3,6 @@ import numpy as np from ...backend import ChunkedGraphMeta -from ...backend.chunkedgraph_utils import get_children_chunk_coords def get_touching_atomic_chunks( From 467e2b1614032ac0e5d145418cc6d92f41d7d455 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 14 Oct 2019 21:19:58 +0000 Subject: [PATCH 0282/1097] wip --- .../ingest/initialization/abstract_layers.py | 25 +++++++++++-------- .../ingest/initialization/helpers.py | 16 ++++++------ 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 94e67b4d5..bcd407fb0 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -32,7 +32,7 @@ def add_layer( start = time.time() children_ids = _read_children_chunks(cg_instance, layer_id, children_coords) - print(f"_read_children_chunks: {time.time()-start}") + print(f"_read_children_chunks: {time.time()-start}, count {len(children_ids)}") start = time.time() edge_ids = _get_cross_edges(cg_instance, layer_id, parent_coords) @@ -71,9 +71,7 @@ def _read_children_chunks(cg_instance, layer_id, children_coords): multi_args = [] for child_coord in children_coords: multi_args.append((cg_info, layer_id - 1, child_coord)) - children_ids = mu.multithread_func( - _read_chunk_helper, multi_args, n_threads=len(multi_args) - ) + children_ids = mu.multithread_func(_read_chunk_helper, multi_args, n_threads=1) return np.concatenate(children_ids) @@ -124,19 +122,25 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord): start = time.time() multi_args = [] for layer2_chunk in layer2_chunks: - multi_args.append((cg_info, layer2_chunk, layer_id)) + multi_args.append((cg_info, layer2_chunk, layer_id - 1)) cross_edges = mu.multithread_func( - _read_atomic_chunk_cross_edges_helper, multi_args, n_threads=len(multi_args) + _read_atomic_chunk_cross_edges_helper, multi_args, n_threads=4 ) print(f"_read_atomic_chunk_cross_edges: {time.time()-start}") - return np.concatenate(cross_edges) + return np.unique(np.concatenate(cross_edges), axis=0) def _read_atomic_chunk_cross_edges_helper(args): cg_info, layer2_chunk, cross_edge_layer = args cg_instance = ChunkedGraph(**cg_info) - return _read_atomic_chunk_cross_edges(cg_instance, layer2_chunk, cross_edge_layer) + + start = time.time() + cross_edges = _read_atomic_chunk_cross_edges( + cg_instance, layer2_chunk, cross_edge_layer + ) + print(f"single atomic chunk: {time.time()-start}, edges {len(cross_edges)}") + return cross_edges def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): @@ -169,8 +173,9 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): ).T cross_edges.append(edges) - cross_edges = np.unique(np.concatenate(cross_edges), axis=0) - return cross_edges + if cross_edges: + return np.unique(np.concatenate(cross_edges), axis=0) + return [] def _write_out_connected_components( diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index b488c5051..6474ce9b8 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -13,7 +13,7 @@ def get_touching_atomic_chunks( ): """get atomic chunks along touching faces of children chunks of a parent chunk""" chunk_coords = np.array(chunk_coords, dtype=int) - touching_atomic_chunks = set() + touching_atomic_chunks = [] atomic_chunk_count = chunkedgraph_meta.graph_config.fanout ** (layer - 2) layer2_chunk_bounds = chunkedgraph_meta.layer_chunk_bounds[2] @@ -25,28 +25,28 @@ def get_touching_atomic_chunks( for axis_1, axis_2 in product(*[range(atomic_chunk_count)] * 2): # x-y plane chunk_1 = chunk_offset + np.array((axis_1, axis_2, mid)) - touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.append(chunk_1) # x-z plane chunk_1 = chunk_offset + np.array((axis_1, mid, axis_2)) - touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.append(chunk_1) # y-z plane chunk_1 = chunk_offset + np.array((mid, axis_1, axis_2)) - touching_atomic_chunks.add(chunk_1) + touching_atomic_chunks.append(chunk_1) if include_both: chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) - touching_atomic_chunks.add(chunk_2) + touching_atomic_chunks.append(chunk_2) chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) - touching_atomic_chunks.add(chunk_2) + touching_atomic_chunks.append(chunk_2) chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) - touching_atomic_chunks.add(chunk_2) + touching_atomic_chunks.append(chunk_2) result = [] for coords in touching_atomic_chunks: if np.all(np.less(coords, layer2_chunk_bounds)): result.append(coords) - return np.array(result, dtype=int) + return np.unique(np.array(result, dtype=int), axis=0) From 75093af0fd974686fefde65be7ed73043f056585 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 15 Oct 2019 15:57:44 +0000 Subject: [PATCH 0283/1097] wip --- pychunkedgraph/backend/chunkedgraph.py | 11 +- .../ingest/initialization/abstract_layers.py | 153 +++++++++--------- 2 files changed, 83 insertions(+), 81 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 6c7c147fd..41285212b 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -376,7 +376,7 @@ def is_in_bounds(self, coordinate: Sequence[int]): else: return True - def get_serialized_info(self): + def get_serialized_info(self, credentials=True): """ Rerturns dictionary that can be used to load this ChunkedGraph :return: dict """ @@ -386,10 +386,11 @@ def get_serialized_info(self): "project_id": self.project_id, "meta": self.meta, } - try: - info["credentials"] = self.client.credentials - except: - info["credentials"] = self.client._credentials + if credentials: + try: + info["credentials"] = self.client.credentials + except: + info["credentials"] = self.client._credentials return info def adjust_vol_coordinates_to_cv( diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index bcd407fb0..e1cc282d5 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -4,6 +4,7 @@ import time import datetime +import multiprocessing as mp from collections import defaultdict from typing import Optional, Sequence @@ -28,7 +29,6 @@ def add_layer( n_threads: int = 32, ) -> None: x, y, z = parent_coords - # parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) start = time.time() children_ids = _read_children_chunks(cg_instance, layer_id, children_coords) @@ -45,43 +45,52 @@ def add_layer( add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T edge_ids = np.concatenate([edge_ids, add_edge_ids]) - # graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( - # edge_ids, make_directed=True - # ) - - # ccs = flatgraph_utils.connected_components(graph) - # start = time.time() - # _write_out_connected_components( - # cg_instance, - # layer_id, - # parent_chunk_id, - # ccs, - # cross_edge_dict, - # graph_ids, - # time_stamp, - # ) - # print(f"_write_out_connected_components: {time.time()-start}") + graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( + edge_ids, make_directed=True + ) + + ccs = flatgraph_utils.connected_components(graph) + start = time.time() + _write_connected_components( + cg_instance, + layer_id, + cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z), + ccs, + graph_ids, + time_stamp, + ) + print(f"_write_connected_components: {time.time()-start}") return f"{layer_id}_{'_'.join(map(str, (x, y, z)))}" def _read_children_chunks(cg_instance, layer_id, children_coords): - cg_info = cg_instance.get_serialized_info() - del cg_info["credentials"] - - multi_args = [] - for child_coord in children_coords: - multi_args.append((cg_info, layer_id - 1, child_coord)) - children_ids = mu.multithread_func(_read_chunk_helper, multi_args, n_threads=1) - return np.concatenate(children_ids) + with mp.Manager() as manager: + children_ids_shared = manager.list() + multi_args = [] + for child_coord in children_coords: + multi_args.append( + ( + children_ids_shared, + cg_instance.get_serialized_info(credentials=False), + layer_id - 1, + child_coord, + ) + ) + mu.multiprocess_func( + _read_chunk_helper, + multi_args, + n_threads=min(len(multi_args, mp.cpu_count())), + ) + return np.concatenate(children_ids_shared) def _read_chunk_helper(args): - cg_info, layer_id, chunk_coord = args + children_ids_shared, cg_info, layer_id, chunk_coord = args cg_instance = ChunkedGraph(**cg_info) - return _read_chunk(cg_instance, layer_id, chunk_coord) + _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord) -def _read_chunk(cg_instance, layer_id, chunk_coord): +def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): x, y, z = chunk_coord range_read = cg_instance.range_read_chunk( layer_id, x, y, z, columns=column_keys.Hierarchy.Child @@ -103,8 +112,7 @@ def _read_chunk(cg_instance, layer_id, chunk_coord): for i_row in range(len(max_child_ids)): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] counter[max_child_ids[i_row]] += 1 - row_ids = row_ids[max_child_ids_occ_so_far == 0] - return row_ids + children_ids_shared.append(row_ids[max_child_ids_occ_so_far == 0]) def _get_cross_edges(cg_instance, layer_id, chunk_coord): @@ -113,11 +121,9 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord): cg_instance.meta, layer_id, chunk_coord, include_both=False ) print(f"get_touching_atomic_chunks: {time.time()-start}") - print(f"touching chunks count (1 side): {len(layer2_chunks)}") - cg_info = cg_instance.get_serialized_info() - del cg_info["credentials"] + cg_info = cg_instance.get_serialized_info(credentials=False) start = time.time() multi_args = [] @@ -178,74 +184,70 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): return [] -def _write_out_connected_components( - cg_instance, layer_id, parent_chunk_id, ccs, cross_edge_dict, graph_ids, time_stamp +def _write_connected_components( + cg_instance, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp ) -> None: + chunked_ccs = chunked(ccs, len(ccs) // mp.cpu_count()) + cg_info = cg_instance.get_serialized_info(credentials=False) + mp_graph_ids = mp.Array("i", graph_ids) + multi_args = [] + + for ccs in chunked_ccs: + multi_args.append( + (cg_info, layer_id, parent_chunk_id, ccs, mp_graph_ids, time_stamp) + ) + + mu.multiprocess_func( + _write_components_helper, + multi_args, + n_threads=min(len(multi_args, mp.cpu_count())), + ) + + +def _write_components_helper(args): + cg_info, layer_id, parent_chunk_id, ccs, mp_graph_ids, time_stamp = args + graph_ids = np.frombuffer(mp_graph_ids.get_obj()) + _write_components( + ChunkedGraph(**cg_info), layer_id, parent_chunk_id, ccs, graph_ids, time_stamp + ) + + +def _write_components( + cg_instance, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp +): time_stamp = get_valid_timestamp(time_stamp) - parent_layer_ids = range(layer_id, cg_instance.n_layers + 1) - cc_connections = {l: [] for l in parent_layer_ids} + cc_connections = {l: [] for l in (layer_id, cg_instance.n_layers)} for i_cc, cc in enumerate(ccs): node_ids = graph_ids[cc] - parent_cross_edges = defaultdict(list) - - # Collect row info for nodes that are in this chunk - for node_id in node_ids: - if node_id in cross_edge_dict: - # Extract edges relevant to this node - for l in range(layer_id, cg_instance.n_layers): - if ( - l in cross_edge_dict[node_id] - and len(cross_edge_dict[node_id][l]) > 0 - ): - parent_cross_edges[l].append(cross_edge_dict[node_id][l]) - if cg_instance.use_skip_connections and len(node_ids) == 1: - for l in parent_layer_ids: - if l == cg_instance.n_layers or len(parent_cross_edges[l]) > 0: - cc_connections[l].append([node_ids, parent_cross_edges]) - break + cc_connections[cg_instance.n_layers].append([node_ids]) else: - cc_connections[layer_id].append([node_ids, parent_cross_edges]) + cc_connections[layer_id].append([node_ids]) - # Write out cc info rows = [] parent_chunk_id_dict = cg_instance.get_parent_chunk_id_dict(parent_chunk_id) # Iterate through layers - for parent_layer_id in parent_layer_ids: - if len(cc_connections[parent_layer_id]) == 0: - continue - + for parent_layer_id in (layer_id, cg_instance.n_layers): parent_chunk_id = parent_chunk_id_dict[parent_layer_id] reserved_parent_ids = cg_instance.get_unique_node_id_range( parent_chunk_id, step=len(cc_connections[parent_layer_id]) ) - for i_cc, cc_info in enumerate(cc_connections[parent_layer_id]): - node_ids, parent_cross_edges = cc_info - + for i_cc, node_ids in enumerate(cc_connections[parent_layer_id]): parent_id = reserved_parent_ids[i_cc] - val_dict = {column_keys.Hierarchy.Parent: parent_id} - for node_id in node_ids: rows.append( cg_instance.mutate_row( serializers.serialize_uint64(node_id), - val_dict, + {column_keys.Hierarchy.Parent: parent_id}, time_stamp=time_stamp, ) ) - val_dict = {column_keys.Hierarchy.Child: node_ids} - for l in range(parent_layer_id, cg_instance.n_layers): - if l in parent_cross_edges and len(parent_cross_edges[l]) > 0: - val_dict[ - column_keys.Connectivity.CrossChunkEdge[l] - ] = np.concatenate(parent_cross_edges[l]) - rows.append( cg_instance.mutate_row( serializers.serialize_uint64(parent_id), - val_dict, + {column_keys.Hierarchy.Child: node_ids}, time_stamp=time_stamp, ) ) @@ -253,6 +255,5 @@ def _write_out_connected_components( if len(rows) > 100000: cg_instance.bulk_write(rows) rows = [] + cg_instance.bulk_write(rows) - if len(rows) > 0: - cg_instance.bulk_write(rows) From 3384e86f1a1f91b447837fe22ce142edda7884b4 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 15 Oct 2019 17:12:11 +0000 Subject: [PATCH 0284/1097] wip --- pychunkedgraph/backend/__init__.py | 4 +- pychunkedgraph/backend/chunkedgraph_utils.py | 18 +-------- pychunkedgraph/ingest/cli.py | 9 +++-- pychunkedgraph/ingest/ingestionmanager.py | 3 +- pychunkedgraph/ingest/ran_ingestion_v2.py | 32 ++++++++------- test_add_layer.py | 42 -------------------- 6 files changed, 26 insertions(+), 82 deletions(-) delete mode 100644 test_add_layer.py diff --git a/pychunkedgraph/backend/__init__.py b/pychunkedgraph/backend/__init__.py index 2a7477ce8..4b12f893a 100644 --- a/pychunkedgraph/backend/__init__.py +++ b/pychunkedgraph/backend/__init__.py @@ -46,8 +46,8 @@ def layer_count(self) -> int: return self._layer_count bbox = np.array(self._ws_cv.bounds.to_list()).reshape(2, 3) n_chunks = ((bbox[1] - bbox[0]) / self._graph_config.chunk_size).astype(np.int) - n_layers = int(np.ceil(log_n(np.max(n_chunks), self._graph_config.fanout))) + 2 - return n_layers + self._layer_count = int(np.ceil(log_n(np.max(n_chunks), self._graph_config.fanout))) + 2 + return self._layer_count @property def layer_chunk_bounds(self) -> Dict: diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index fe4bede01..99e7ad642 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -247,23 +247,6 @@ def get_bounding_box( return bounding_box -# def get_children_chunk_coords( -# chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] -# ) -> np.ndarray: -# chunk_coords = np.array(chunk_coords, dtype=int) -# children_layer = layer - 1 -# layer_boundaries = chunkedgraph_meta.layer_chunk_bounds[children_layer] -# children_coords = [] - -# for dcoord in product(*[range(chunkedgraph_meta.graph_config.fanout)] * 3): -# dcoord = np.array(dcoord, dtype=int) -# child_coords = chunk_coords * chunkedgraph_meta.graph_config.fanout + dcoord -# check_bounds = np.less(child_coords, layer_boundaries) -# if np.all(check_bounds): -# children_coords.append(child_coords) -# return children_coords - - def compute_chunk_id( layer: int, x: int, @@ -299,5 +282,6 @@ def get_voxels_boundary(cv: CloudVolume) -> Sequence[int]: def get_chunks_boundary(voxel_boundary, chunk_size): + """returns number of chunks in each dimension""" return np.ceil((voxel_boundary / chunk_size)).astype(np.int) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index c4c83c774..16303fe53 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -18,6 +18,7 @@ from .ran_ingestion_v2 import enqueue_atomic_tasks from ..utils.redis import get_redis_connection from ..utils.redis import keys as r_keys +from ..backend import ChunkedGraphMeta from ..backend.definitions.config import DataSource from ..backend.definitions.config import GraphConfig from ..backend.definitions.config import BigTableConfig @@ -69,9 +70,9 @@ def ingest_graph( s_bits_atomic_layer=10, ) bigtable_config = BigTableConfig() - imanager = IngestionManager( - ingest_config, data_source, graph_config, bigtable_config - ) + + meta = ChunkedGraphMeta(data_source, graph_config, bigtable_config) + imanager = IngestionManager(ingest_config, meta) imanager.redis.flushdb() if ingest_config.build_graph: @@ -90,7 +91,7 @@ def ingest_graph( def ingest_status(): redis = get_redis_connection() imanager = IngestionManager.from_pickle(redis.get(r_keys.INGESTION_MANAGER)) - for layer in range(2, imanager.n_layers): + for layer in range(2, imanager.chunkedgraph_meta.layer_count): layer_count = redis.hlen(f"{layer}c") print(f"{layer}\t: {layer_count}") diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 00c92c443..9427eb0b8 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -27,7 +27,6 @@ def __init__(self, config: IngestConfig, chunkedgraph_meta: ChunkedGraphMeta): self._cg = None self._chunkedgraph_meta = chunkedgraph_meta self._ws_cv = CloudVolume(chunkedgraph_meta.data_source.watershed) - self._n_layers = None self._chunk_coords = None self._layer_bounds_d = None @@ -120,7 +119,7 @@ def get_serialized_info(self, pickled=False): def is_out_of_bounds(self, chunk_coordinate): if not self._bitmasks: self._bitmasks = compute_bitmasks( - self._n_layers, + self.chunkedgraph_meta.layer_count, self._chunkedgraph_meta.graph_config.fanout, s_bits_atomic_layer=self._chunkedgraph_meta.graph_config.s_bits_atomic_layer, ) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index e14aeb42b..935c624ae 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -22,6 +22,7 @@ from .ingestionmanager import IngestionManager from .initialization.atomic_layer import add_atomic_edges from .initialization.abstract_layers import add_layer +from ..backend import ChunkedGraphMeta from ..utils.redis import keys as r_keys from ..io.edges import get_chunk_edges from ..io.edges import put_chunk_edges @@ -32,23 +33,21 @@ from ..backend.definitions.edges import Edges, CX_CHUNK from ..backend.definitions.edges import TYPES as EDGE_TYPES - chunk_id_str = lambda layer, coords: f"{layer}_{'_'.join(map(str, coords))}" def _get_children_coords( - imanager: IngestionManager, layer: int, parent_coords: Sequence[int] + cg_meta: ChunkedGraphMeta, layer: int, chunk_coords ) -> np.ndarray: - """ - :param: layer - layer of children chunks - """ - layer_bounds = imanager.layer_chunk_bounds[layer] + chunk_coords = np.array(chunk_coords, dtype=int) + children_layer = layer - 1 + layer_boundaries = cg_meta.layer_chunk_bounds[children_layer] children_coords = [] - parent_coords = np.array(parent_coords, dtype=int) - for dcoord in product(*[range(imanager.graph_config.fanout)] * 3): + + for dcoord in product(*[range(cg_meta.graph_config.fanout)] * 3): dcoord = np.array(dcoord, dtype=int) - child_coords = parent_coords * imanager.graph_config.fanout + dcoord - check_bounds = np.less(child_coords, layer_bounds[:, 1]) + child_coords = chunk_coords * cg_meta.graph_config.fanout + dcoord + check_bounds = np.less(child_coords, layer_boundaries) if np.all(check_bounds): children_coords.append(child_coords) return children_coords @@ -67,13 +66,16 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda parent_coords = np.array(coords, int) // imanager.graph_config.fanout parent_chunk_str = "_".join(map(str, parent_coords)) if not imanager.redis.hget(parent_layer, parent_chunk_str): - children_count = len(_get_children_coords(imanager, layer, parent_coords)) + children_count = len( + _get_children_coords( + imanager.chunkedgraph_meta, parent_layer, parent_coords + ) + ) imanager.redis.hset(parent_layer, parent_chunk_str, children_count) imanager.redis.hincrby(parent_layer, parent_chunk_str, -1) children_left = int( imanager.redis.hget(parent_layer, parent_chunk_str).decode("utf-8") ) - return if children_left == 0: parents_queue = imanager.get_task_queue(imanager.config.parents_q_name) @@ -82,16 +84,16 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda job_id=chunk_id_str(parent_layer, parent_coords), job_timeout="59m", result_ttl=0, - at_front=True, args=( imanager.get_serialized_info(), parent_layer, parent_coords, - _get_children_coords(imanager, layer, parent_coords), + _get_children_coords( + imanager.chunkedgraph_meta, parent_layer, parent_coords + ), ), ) imanager.redis.hdel(parent_layer, parent_chunk_str) - # put in completed (c) hash imanager.redis.hset(f"{parent_layer}q", parent_chunk_str, "") diff --git a/test_add_layer.py b/test_add_layer.py deleted file mode 100644 index 7f47f5c44..000000000 --- a/test_add_layer.py +++ /dev/null @@ -1,42 +0,0 @@ -import numpy as np - -from pychunkedgraph.ingest import IngestConfig -from pychunkedgraph.ingest.ingestionmanager import IngestionManager -from pychunkedgraph.backend.definitions.config import DataSource -from pychunkedgraph.backend.definitions.config import GraphConfig -from pychunkedgraph.backend.definitions.config import BigTableConfig - -from pychunkedgraph.backend.chunkedgraph import ChunkedGraph -from pychunkedgraph.ingest.initialization.abstract_layers import add_layer -from pychunkedgraph.ingest.ran_ingestion_v2 import enqueue_atomic_tasks -from pychunkedgraph.ingest.ran_ingestion_v2 import _get_children_coords - -processed = True -graph_id = "akhilesh-minnie65" - -ingest_config = IngestConfig(build_graph=True) -data_source = DataSource( - agglomeration="gs://ranl-scratch/minnie65_0/agg", - watershed="gs://microns-seunglab/minnie65/ws_minnie65_0", - edges="gs://chunkedgraph/minnie65_0/edges", - components="gs://chunkedgraph/minnie65_0/components", - use_raw_edges=not processed, - use_raw_components=not processed, - data_version=2, -) -graph_config = GraphConfig( - graph_id=graph_id, - chunk_size=np.array([256, 256, 512], dtype=int), - fanout=2, - s_bits_atomic_layer=10, -) -bigtable_config = BigTableConfig() -imanager = IngestionManager(ingest_config, data_source, graph_config, bigtable_config) - -cg = ChunkedGraph("akhilesh-minnie65") -layer = 7 -parent_coords = [10, 6, 0] -children_coords = _get_children_coords(imanager, layer-1, parent_coords) - -print(len(children_coords)) -add_layer(cg, layer, parent_coords, children_coords) From 0a61177d445add4d7ab7b6062e1171297c6339fb Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 15 Oct 2019 17:30:21 +0000 Subject: [PATCH 0285/1097] wip --- pychunkedgraph/backend/__init__.py | 64 ++++++++++++++++++++++- pychunkedgraph/ingest/ingestionmanager.py | 47 ----------------- pychunkedgraph/ingest/ran_ingestion_v2.py | 17 +++--- 3 files changed, 70 insertions(+), 58 deletions(-) diff --git a/pychunkedgraph/backend/__init__.py b/pychunkedgraph/backend/__init__.py index 4b12f893a..dc71f2967 100644 --- a/pychunkedgraph/backend/__init__.py +++ b/pychunkedgraph/backend/__init__.py @@ -1,5 +1,6 @@ from typing import Sequence from typing import Dict +from typing import List import numpy as np from cloudvolume import CloudVolume @@ -7,6 +8,7 @@ from .chunkedgraph_utils import get_voxels_boundary from .chunkedgraph_utils import get_chunks_boundary +from .chunkedgraph_utils import compute_bitmasks from .definitions.config import DataSource from .definitions.config import GraphConfig from .definitions.config import BigTableConfig @@ -28,6 +30,8 @@ def __init__( self._layer_bounds_d = None self._layer_count = None + self._bitmasks = None + @property def data_source(self): return self._data_source @@ -46,12 +50,14 @@ def layer_count(self) -> int: return self._layer_count bbox = np.array(self._ws_cv.bounds.to_list()).reshape(2, 3) n_chunks = ((bbox[1] - bbox[0]) / self._graph_config.chunk_size).astype(np.int) - self._layer_count = int(np.ceil(log_n(np.max(n_chunks), self._graph_config.fanout))) + 2 + self._layer_count = ( + int(np.ceil(log_n(np.max(n_chunks), self._graph_config.fanout))) + 2 + ) return self._layer_count @property def layer_chunk_bounds(self) -> Dict: - """number of chunks in each dimension in each layer {layer: count}""" + """number of chunks in each dimension in each layer {layer: [x,y,z]}""" if self._layer_bounds_d: return self._layer_bounds_d @@ -66,3 +72,57 @@ def layer_chunk_bounds(self) -> Dict: layer_bounds_d[layer] = np.ceil(layer_bounds).astype(np.int) self._layer_bounds_d = layer_bounds_d return self._layer_bounds_d + + @property + def layer_chunk_counts(self) -> List: + """number of chunks in each layer""" + counts = [] + for layer in range(2, self.layer_count): + counts.append(np.prod(self.layer_chunk_bounds[layer])) + return counts + + @property + def edge_dtype(self): + if self.data_source.data_version == 4: + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff_x", np.float32), + ("area_x", np.uint64), + ("aff_y", np.float32), + ("area_y", np.uint64), + ("aff_z", np.float32), + ("area_z", np.uint64), + ] + elif self.data_source.data_version == 3: + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff_x", np.float64), + ("area_x", np.uint64), + ("aff_y", np.float64), + ("area_y", np.uint64), + ("aff_z", np.float64), + ("area_z", np.uint64), + ] + elif self.data_source.data_version == 2: + dtype = [ + ("sv1", np.uint64), + ("sv2", np.uint64), + ("aff", np.float32), + ("area", np.uint64), + ] + else: + raise Exception() + return dtype + + def is_out_of_bounds(self, chunk_coordinate): + if not self._bitmasks: + self._bitmasks = compute_bitmasks( + self.layer_count, + self.graph_config.fanout, + s_bits_atomic_layer=self.graph_config.s_bits_atomic_layer, + ) + return np.any(chunk_coordinate < 0) or np.any( + chunk_coordinate > 2 ** self._bitmasks[1] + ) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 9427eb0b8..88b57771e 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -12,7 +12,6 @@ from ..utils.redis import get_redis_connection from ..utils.redis import get_rq_queue from ..backend import ChunkedGraphMeta -from ..backend.chunkedgraph_utils import compute_bitmasks from ..backend.chunkedgraph import ChunkedGraph from ..backend.definitions.config import DataSource from ..backend.definitions.config import GraphConfig @@ -65,41 +64,6 @@ def redis(self): ) return self._redis - @property - def edge_dtype(self): - if self._chunkedgraph_meta.data_source.data_version == 4: - dtype = [ - ("sv1", np.uint64), - ("sv2", np.uint64), - ("aff_x", np.float32), - ("area_x", np.uint64), - ("aff_y", np.float32), - ("area_y", np.uint64), - ("aff_z", np.float32), - ("area_z", np.uint64), - ] - elif self._chunkedgraph_meta.data_source.data_version == 3: - dtype = [ - ("sv1", np.uint64), - ("sv2", np.uint64), - ("aff_x", np.float64), - ("area_x", np.uint64), - ("aff_y", np.float64), - ("area_y", np.uint64), - ("aff_z", np.float64), - ("area_z", np.uint64), - ] - elif self._chunkedgraph_meta.data_source.data_version == 2: - dtype = [ - ("sv1", np.uint64), - ("sv2", np.uint64), - ("aff", np.float32), - ("area", np.uint64), - ] - else: - raise Exception() - return dtype - @classmethod def from_pickle(cls, serialized_info): return cls(**pickle.loads(serialized_info)) @@ -116,14 +80,3 @@ def get_serialized_info(self, pickled=False): return pickle.dumps(info) return info - def is_out_of_bounds(self, chunk_coordinate): - if not self._bitmasks: - self._bitmasks = compute_bitmasks( - self.chunkedgraph_meta.layer_count, - self._chunkedgraph_meta.graph_config.fanout, - s_bits_atomic_layer=self._chunkedgraph_meta.graph_config.s_bits_atomic_layer, - ) - return np.any(chunk_coordinate < 0) or np.any( - chunk_coordinate > 2 ** self._bitmasks[1] - ) - diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 935c624ae..cb6bfef3d 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -106,7 +106,8 @@ def _create_parent_chunk(im_info, layer, parent_coords, child_chunk_coords): def enqueue_atomic_tasks( imanager: IngestionManager, batch_size: int = 50000, interval: float = 300.0 ): - chunk_coords = list(imanager.chunk_coord_gen) + atomic_chunk_bounds = imanager.chunkedgraph_meta.layer_chunk_bounds[2] + chunk_coords = list(product(*[range(*r) for r in atomic_chunk_bounds])) np.random.shuffle(chunk_coords) # test chunks @@ -238,7 +239,7 @@ def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): continue c_chunk_coord = chunk_coord_l + np.array([dx, dy, dz]) - if imanager.is_out_of_bounds(c_chunk_coord): + if imanager.chunkedgraph_meta.is_out_of_bounds(c_chunk_coord): continue c_chunk_coords.append(c_chunk_coord) return c_chunk_coords @@ -263,7 +264,7 @@ def _collect_edge_data(imanager, chunk_coord): swap = defaultdict(list) x, y, z = chunk_coord for _x, _y, _z in product([x - 1, x], [y - 1, y], [z - 1, z]): - if imanager.is_out_of_bounds(np.array([_x, _y, _z])): + if imanager.chunkedgraph_meta.is_out_of_bounds(np.array([_x, _y, _z])): continue filename = f"in_chunk_0_{_x}_{_y}_{_z}_{chunk_id}.data" filenames["in"].append(filename) @@ -276,7 +277,7 @@ def _collect_edge_data(imanager, chunk_coord): x, y, z = adjacent_chunk_coord adjacent_chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) - if imanager.is_out_of_bounds(adjacent_chunk_coord): + if imanager.chunkedgraph_meta.is_out_of_bounds(adjacent_chunk_coord): continue c_chunk_coords = _get_cont_chunk_coords( imanager, chunk_coord, adjacent_chunk_coord @@ -307,14 +308,12 @@ def _collect_edge_data(imanager, chunk_coord): if file["error"] or file["content"] is None: continue + edge_dtype = imanager.chunkedgraph_meta.edge_dtype if swap[file["filename"]]: - this_dtype = [ - imanager.edge_dtype[1], - imanager.edge_dtype[0], - ] + imanager.edge_dtype[2:] + this_dtype = [edge_dtype[1], edge_dtype[0]] + edge_dtype[2:] content = np.frombuffer(file["content"], dtype=this_dtype) else: - content = np.frombuffer(file["content"], dtype=imanager.edge_dtype) + content = np.frombuffer(file["content"], dtype=edge_dtype) data.append(content) read_counter[k] += 1 From 68cf4f569f7b501fcfde47b10e1d08f512f79b66 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 15 Oct 2019 18:04:24 +0000 Subject: [PATCH 0286/1097] fix properties moved to meta --- pychunkedgraph/ingest/cli.py | 12 ++--- pychunkedgraph/ingest/ran_ingestion_v2.py | 57 +++++++++++++---------- 2 files changed, 38 insertions(+), 31 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 16303fe53..9a24ec75d 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -55,17 +55,17 @@ def ingest_graph( ): ingest_config = IngestConfig(build_graph=True) data_source = DataSource( - agglomeration="gs://ranl-scratch/minnie65_0/agg", - watershed="gs://microns-seunglab/minnie65/ws_minnie65_0", - edges="gs://chunkedgraph/minnie65_0/edges", - components="gs://chunkedgraph/minnie65_0/components", + agglomeration="gs://ranl/scratch/pinky100_ca_com/agg", + watershed="gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com", + edges="gs://chunkedgraph/pinky100/edges", + components="gs://chunkedgraph/pinky100/components", use_raw_edges=not processed, use_raw_components=not processed, - data_version=2, + data_version=4, ) graph_config = GraphConfig( graph_id=graph_id, - chunk_size=np.array([256, 256, 512], dtype=int), + chunk_size=np.array([512, 512, 128], dtype=int), fanout=2, s_bits_atomic_layer=10, ) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index cb6bfef3d..c2642c667 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -60,10 +60,12 @@ def _post_task_completion(imanager: IngestionManager, layer: int, coords: np.nda imanager.redis.hset(f"{layer}c", chunk_str, "") parent_layer = layer + 1 - if parent_layer > imanager.n_layers: + if parent_layer > imanager.chunkedgraph_meta.layer_count: return - parent_coords = np.array(coords, int) // imanager.graph_config.fanout + parent_coords = ( + np.array(coords, int) // imanager.chunkedgraph_meta.graph_config.fanout + ) parent_chunk_str = "_".join(map(str, parent_coords)) if not imanager.redis.hget(parent_layer, parent_chunk_str): children_count = len( @@ -107,22 +109,21 @@ def enqueue_atomic_tasks( imanager: IngestionManager, batch_size: int = 50000, interval: float = 300.0 ): atomic_chunk_bounds = imanager.chunkedgraph_meta.layer_chunk_bounds[2] - chunk_coords = list(product(*[range(*r) for r in atomic_chunk_bounds])) + chunk_coords = list(product(*[range(r) for r in atomic_chunk_bounds])) np.random.shuffle(chunk_coords) # test chunks - # chunk_coords = [ - # [0, 0, 0], - # [0, 0, 1], - # [0, 1, 0], - # [0, 1, 1], - # [1, 0, 0], - # [1, 0, 1], - # [1, 1, 0], - # [1, 1, 1], - # ] - - print(f"Chunk count: {len(chunk_coords)}") + chunk_coords = [ + [0, 0, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 1], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + ] + for chunk_coord in chunk_coords: atomic_queue = imanager.get_task_queue(imanager.config.atomic_q_name) # for optimal use of redis memory wait if queue limit is reached @@ -160,13 +161,15 @@ def _get_chunk_data(imanager, coord) -> Tuple[Dict, Dict]: """ chunk_edges = ( _read_raw_edge_data(imanager, coord) - if imanager.data_source.use_raw_edges - else get_chunk_edges(imanager.data_source.edges, [coord]) + if imanager.chunkedgraph_meta.data_source.use_raw_edges + else get_chunk_edges(imanager.chunkedgraph_meta.data_source.edges, [coord]) ) mapping = ( _read_raw_agglomeration_data(imanager, coord) - if imanager.data_source.use_raw_components - else get_chunk_components(imanager.data_source.components, coord) + if imanager.chunkedgraph_meta.data_source.use_raw_components + else get_chunk_components( + imanager.chunkedgraph_meta.data_source.components, coord + ) ) return chunk_edges, mapping @@ -195,7 +198,9 @@ def _read_raw_edge_data(imanager, coord) -> Dict: no_edges = no_edges and not sv_ids1.size if no_edges: return chunk_edges - put_chunk_edges(imanager.data_source.edges, coord, chunk_edges, 17) + put_chunk_edges( + imanager.chunkedgraph_meta.data_source.edges, coord, chunk_edges, 17 + ) return chunk_edges @@ -255,7 +260,7 @@ def _collect_edge_data(imanager, chunk_coord): :return: dict of np.ndarrays """ subfolder = "chunked_rg" - base_path = f"{imanager.data_source.agglomeration}/{subfolder}/" + base_path = f"{imanager.chunkedgraph_meta.data_source.agglomeration}/{subfolder}/" chunk_coord = np.array(chunk_coord) x, y, z = chunk_coord chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) @@ -348,13 +353,13 @@ def _read_raw_agglomeration_data(imanager, chunk_coord: np.ndarray): Collects agglomeration information & builds connected component mapping """ subfolder = "remap" - base_path = f"{imanager.data_source.agglomeration}/{subfolder}/" + base_path = f"{imanager.chunkedgraph_meta.data_source.agglomeration}/{subfolder}/" chunk_coord = np.array(chunk_coord) x, y, z = chunk_coord chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) filenames = [] - for mip_level in range(0, int(imanager.n_layers - 1)): + for mip_level in range(0, int(imanager.chunkedgraph_meta.layer_count - 1)): x, y, z = np.array(chunk_coord / 2 ** mip_level, dtype=np.int) filenames.append(f"done_{mip_level}_{x}_{y}_{z}_{chunk_id}.data.zst") @@ -366,7 +371,7 @@ def _read_raw_agglomeration_data(imanager, chunk_coord: np.ndarray): x, y, z = adjacent_chunk_coord adjacent_chunk_id = compute_chunk_id(layer=1, x=x, y=y, z=z) - for mip_level in range(0, int(imanager.n_layers - 1)): + for mip_level in range(0, int(imanager.chunkedgraph_meta.layer_count - 1)): x, y, z = np.array(adjacent_chunk_coord / 2 ** mip_level, dtype=np.int) filenames.append( f"done_{mip_level}_{x}_{y}_{z}_{adjacent_chunk_id}.data.zst" @@ -382,7 +387,9 @@ def _read_raw_agglomeration_data(imanager, chunk_coord: np.ndarray): mapping.update(dict(zip(cc, [i_cc] * len(cc)))) if mapping: - put_chunk_components(imanager.data_source.components, components, chunk_coord) + put_chunk_components( + imanager.chunkedgraph_meta.data_source.components, components, chunk_coord + ) return mapping From 98b081a4bf81155a71fbdb7d154addd828ecb1a1 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 15 Oct 2019 19:56:15 +0000 Subject: [PATCH 0287/1097] wip --- pychunkedgraph/ingest/cli.py | 18 ++++------------- pychunkedgraph/ingest/ingestion_utils.py | 7 ++++--- .../ingest/initialization/abstract_layers.py | 14 +++++++++---- pychunkedgraph/ingest/ran_ingestion_v2.py | 20 +++++++++---------- 4 files changed, 28 insertions(+), 31 deletions(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index 9a24ec75d..a55574b7b 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -91,22 +91,12 @@ def ingest_graph( def ingest_status(): redis = get_redis_connection() imanager = IngestionManager.from_pickle(redis.get(r_keys.INGESTION_MANAGER)) - for layer in range(2, imanager.chunkedgraph_meta.layer_count): + for layer in range(2, imanager.chunkedgraph_meta.layer_count + 1): layer_count = redis.hlen(f"{layer}c") print(f"{layer}\t: {layer_count}") + + print(imanager.chunkedgraph_meta.layer_chunk_bounds) def init_ingest_cmds(app): - app.cli.add_command(ingest_cli) - - -# for layer_id in range(2, 13): -# print(layer_id) -# child_chunk_coords = im.chunk_coords // 2 ** (layer_id - 3) -# child_chunk_coords = child_chunk_coords.astype(np.int) -# child_chunk_coords = np.unique(child_chunk_coords, axis=0) - -# parent_chunk_coords = child_chunk_coords // 2 -# parent_chunk_coords = parent_chunk_coords.astype(np.int) -# parent_chunk_coords = np.unique(parent_chunk_coords, axis=0) -# print(len(child_chunk_coords), len(parent_chunk_coords)) + app.cli.add_command(ingest_cli) \ No newline at end of file diff --git a/pychunkedgraph/ingest/ingestion_utils.py b/pychunkedgraph/ingest/ingestion_utils.py index 016efe7dd..1e2a45d10 100644 --- a/pychunkedgraph/ingest/ingestion_utils.py +++ b/pychunkedgraph/ingest/ingestion_utils.py @@ -92,9 +92,10 @@ def initialize_chunkedgraph( def postprocess_edge_data(im, edge_dict): - if im.data_version == 2: + data_version = im.chunkedgraph_meta.data_source.data_version + if data_version == 2: return edge_dict - elif im.data_version in [3, 4]: + elif data_version in [3, 4]: new_edge_dict = {} for k in edge_dict: areas = ( @@ -117,4 +118,4 @@ def postprocess_edge_data(im, edge_dict): return new_edge_dict else: - raise Exception(f"Unknown data_version: {im.data_version}") + raise Exception(f"Unknown data_version: {data_version}") diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index e1cc282d5..0b6823151 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -43,7 +43,7 @@ def add_layer( isolated_node_mask = ~np.in1d(children_ids, np.unique(edge_ids)) add_node_ids = children_ids[isolated_node_mask].squeeze() add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T - edge_ids = np.concatenate([edge_ids, add_edge_ids]) + edge_ids.extend(add_edge_ids) graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( edge_ids, make_directed=True @@ -79,7 +79,7 @@ def _read_children_chunks(cg_instance, layer_id, children_coords): mu.multiprocess_func( _read_chunk_helper, multi_args, - n_threads=min(len(multi_args, mp.cpu_count())), + n_threads=min(len(multi_args), mp.cpu_count()), ) return np.concatenate(children_ids_shared) @@ -134,7 +134,10 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord): ) print(f"_read_atomic_chunk_cross_edges: {time.time()-start}") - return np.unique(np.concatenate(cross_edges), axis=0) + cross_edges = np.concatenate(cross_edges) + if len(cross_edges): + cross_edges = np.unique(cross_edges, axis=0) + return list(cross_edges) def _read_atomic_chunk_cross_edges_helper(args): @@ -150,6 +153,7 @@ def _read_atomic_chunk_cross_edges_helper(args): def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): + print(cross_edge_layer, chunk_coord) x, y, z = chunk_coord range_read = cg_instance.range_read_chunk( 2, x, y, z, columns=column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] @@ -187,6 +191,8 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): def _write_connected_components( cg_instance, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp ) -> None: + if not ccs: + return chunked_ccs = chunked(ccs, len(ccs) // mp.cpu_count()) cg_info = cg_instance.get_serialized_info(credentials=False) mp_graph_ids = mp.Array("i", graph_ids) @@ -200,7 +206,7 @@ def _write_connected_components( mu.multiprocess_func( _write_components_helper, multi_args, - n_threads=min(len(multi_args, mp.cpu_count())), + n_threads=min(len(multi_args), mp.cpu_count()), ) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index c2642c667..2fc3a9034 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -113,16 +113,16 @@ def enqueue_atomic_tasks( np.random.shuffle(chunk_coords) # test chunks - chunk_coords = [ - [0, 0, 0], - [0, 0, 1], - [0, 1, 0], - [0, 1, 1], - [1, 0, 0], - [1, 0, 1], - [1, 1, 0], - [1, 1, 1], - ] + # chunk_coords = [ + # [20, 20, 10], + # [20, 20, 11], + # [20, 21, 10], + # [20, 21, 11], + # [21, 20, 10], + # [21, 20, 11], + # [21, 21, 10], + # [21, 21, 11], + # ] for chunk_coord in chunk_coords: atomic_queue = imanager.get_task_queue(imanager.config.atomic_q_name) From cae70611c458f90060671eb9fb998afd04439d54 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 16 Oct 2019 03:12:51 +0000 Subject: [PATCH 0288/1097] wip: fix include all in chunk ids --- .devcontainer/devcontainer.json | 5 ++- pychunkedgraph/backend/utils/serializers.py | 1 + .../ingest/initialization/abstract_layers.py | 45 ++++++++++--------- .../ingest/initialization/atomic_layer.py | 8 +++- pychunkedgraph/ingest/ran_ingestion_v2.py | 44 ++++++++++++------ 5 files changed, 64 insertions(+), 39 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ae04de5cd..e43e086d3 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -21,11 +21,12 @@ // Uncomment the next line to use a non-root user. See https://aka.ms/vscode-remote/containers/non-root-user. // "-u", "1000" "-w", "${env:HOME}/projects/sl-pychunkedgraph", - "-v", "${env:HOME}/secrets:/root/.cloudvolume/secrets", + "-v", "C:/Users/akhil/secrets:/root/.cloudvolume/secrets", "-e", "FLASK_APP=run_dev.py", "-e", "APP_SETTINGS=pychunkedgraph.app.config.DockerDevelopmentConfig", "-e", "REDIS_PASSWORD=dev", - "-e", "LC_ALL=C.UTF-8" + "-e", "LC_ALL=C.UTF-8", + "-e", "OPPAI=BOOBS" ], // Uncomment the next line if you want to publish any ports. diff --git a/pychunkedgraph/backend/utils/serializers.py b/pychunkedgraph/backend/utils/serializers.py index 4d0ca2153..1fd2587f4 100644 --- a/pychunkedgraph/backend/utils/serializers.py +++ b/pychunkedgraph/backend/utils/serializers.py @@ -88,6 +88,7 @@ def pad_node_id(node_id: np.uint64) -> str: :param node_id: int :return: str """ + print(type(node_id)) return "%.20d" % node_id diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 0b6823151..fae99f643 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -6,7 +6,9 @@ import datetime import multiprocessing as mp from collections import defaultdict +from collections import abc from typing import Optional, Sequence +from operator import itemgetter import numpy as np from multiwrapper import multiprocessing_utils as mu @@ -37,7 +39,7 @@ def add_layer( start = time.time() edge_ids = _get_cross_edges(cg_instance, layer_id, parent_coords) print(f"_get_cross_edges: {time.time()-start}") - print(len(children_ids), len(edge_ids)) + # print(len(children_ids), len(edge_ids)) # Extract connected components isolated_node_mask = ~np.in1d(children_ids, np.unique(edge_ids)) @@ -120,8 +122,8 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord): layer2_chunks = get_touching_atomic_chunks( cg_instance.meta, layer_id, chunk_coord, include_both=False ) - print(f"get_touching_atomic_chunks: {time.time()-start}") - print(f"touching chunks count (1 side): {len(layer2_chunks)}") + # print(f"get_touching_atomic_chunks: {time.time()-start}") + # print(f"touching chunks count (1 side): {len(layer2_chunks)}") cg_info = cg_instance.get_serialized_info(credentials=False) @@ -132,7 +134,7 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord): cross_edges = mu.multithread_func( _read_atomic_chunk_cross_edges_helper, multi_args, n_threads=4 ) - print(f"_read_atomic_chunk_cross_edges: {time.time()-start}") + # print(f"_read_atomic_chunk_cross_edges: {time.time()-start}") cross_edges = np.concatenate(cross_edges) if len(cross_edges): @@ -148,12 +150,11 @@ def _read_atomic_chunk_cross_edges_helper(args): cross_edges = _read_atomic_chunk_cross_edges( cg_instance, layer2_chunk, cross_edge_layer ) - print(f"single atomic chunk: {time.time()-start}, edges {len(cross_edges)}") + # print(f"single atomic chunk: {time.time()-start}, edges {len(cross_edges)}") return cross_edges def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): - print(cross_edge_layer, chunk_coord) x, y, z = chunk_coord range_read = cg_instance.range_read_chunk( 2, x, y, z, columns=column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] @@ -195,24 +196,23 @@ def _write_connected_components( return chunked_ccs = chunked(ccs, len(ccs) // mp.cpu_count()) cg_info = cg_instance.get_serialized_info(credentials=False) - mp_graph_ids = mp.Array("i", graph_ids) multi_args = [] - for ccs in chunked_ccs: - multi_args.append( - (cg_info, layer_id, parent_chunk_id, ccs, mp_graph_ids, time_stamp) + with mp.Manager() as manager: + graph_ids_shared = manager.list(graph_ids) + for ccs in chunked_ccs: + multi_args.append( + (cg_info, layer_id, parent_chunk_id, ccs, graph_ids_shared, time_stamp) + ) + mu.multiprocess_func( + _write_components_helper, + multi_args, + n_threads=min(len(multi_args), mp.cpu_count()), ) - mu.multiprocess_func( - _write_components_helper, - multi_args, - n_threads=min(len(multi_args), mp.cpu_count()), - ) - def _write_components_helper(args): - cg_info, layer_id, parent_chunk_id, ccs, mp_graph_ids, time_stamp = args - graph_ids = np.frombuffer(mp_graph_ids.get_obj()) + cg_info, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp = args _write_components( ChunkedGraph(**cg_info), layer_id, parent_chunk_id, ccs, graph_ids, time_stamp ) @@ -223,10 +223,11 @@ def _write_components( ): time_stamp = get_valid_timestamp(time_stamp) cc_connections = {l: [] for l in (layer_id, cg_instance.n_layers)} - for i_cc, cc in enumerate(ccs): - node_ids = graph_ids[cc] - if cg_instance.use_skip_connections and len(node_ids) == 1: - cc_connections[cg_instance.n_layers].append([node_ids]) + for cc in ccs: + node_ids = itemgetter(*cc)(graph_ids) + print(node_ids) + if cg_instance.use_skip_connections and not isinstance(node_ids, abc.Container): + cc_connections[cg_instance.n_layers].append([[node_ids]]) else: cc_connections[layer_id].append([node_ids]) diff --git a/pychunkedgraph/ingest/initialization/atomic_layer.py b/pychunkedgraph/ingest/initialization/atomic_layer.py index 78a962fb8..c25c9693a 100644 --- a/pychunkedgraph/ingest/initialization/atomic_layer.py +++ b/pychunkedgraph/ingest/initialization/atomic_layer.py @@ -10,7 +10,13 @@ from ...backend.chunkedgraph import ChunkedGraph from ...backend.utils import basetypes, serializers, column_keys -from ...backend.definitions.edges import Edges, IN_CHUNK, BT_CHUNK, CX_CHUNK, TYPES as EDGE_TYPES +from ...backend.definitions.edges import Edges +from ...backend.definitions.edges import ( + IN_CHUNK, + BT_CHUNK, + CX_CHUNK, + TYPES as EDGE_TYPES, +) from ...backend.chunkedgraph_utils import compute_indices_pandas, get_valid_timestamp from ...backend.flatgraph_utils import build_gt_graph, connected_components diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index 2fc3a9034..b4c4c1eb8 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -30,8 +30,13 @@ from ..io.components import put_chunk_components from ..backend.utils import basetypes from ..backend.chunkedgraph_utils import compute_chunk_id -from ..backend.definitions.edges import Edges, CX_CHUNK -from ..backend.definitions.edges import TYPES as EDGE_TYPES +from ..backend.definitions.edges import Edges +from ..backend.definitions.edges import ( + IN_CHUNK, + BT_CHUNK, + CX_CHUNK, + TYPES as EDGE_TYPES, +) chunk_id_str = lambda layer, coords: f"{layer}_{'_'.join(map(str, coords))}" @@ -113,16 +118,16 @@ def enqueue_atomic_tasks( np.random.shuffle(chunk_coords) # test chunks - # chunk_coords = [ - # [20, 20, 10], - # [20, 20, 11], - # [20, 21, 10], - # [20, 21, 11], - # [21, 20, 10], - # [21, 20, 11], - # [21, 21, 10], - # [21, 21, 11], - # ] + chunk_coords = [ + [20, 20, 10], + [20, 20, 11], + [20, 21, 10], + [20, 21, 11], + [21, 20, 10], + [21, 20, 11], + [21, 21, 10], + [21, 21, 11], + ] for chunk_coord in chunk_coords: atomic_queue = imanager.get_task_queue(imanager.config.atomic_q_name) @@ -207,9 +212,15 @@ def _read_raw_edge_data(imanager, coord) -> Dict: def _get_active_edges(imanager, coord, edges_d, mapping): active_edges_flag_d, isolated_ids = _define_active_edges(edges_d, mapping) chunk_edges_active = {} + pseudo_isolated_ids = [isolated_ids] for edge_type in EDGE_TYPES: edges = edges_d[edge_type] - active = active_edges_flag_d[edge_type] + + active = ( + np.ones(len(edges), dtype=bool) + if edge_type == CX_CHUNK + else active_edges_flag_d[edge_type] + ) sv_ids1 = edges.node_ids1[active] sv_ids2 = edges.node_ids2[active] @@ -218,7 +229,12 @@ def _get_active_edges(imanager, coord, edges_d, mapping): chunk_edges_active[edge_type] = Edges( sv_ids1, sv_ids2, affinities=affinities, areas=areas ) - return chunk_edges_active, isolated_ids + # assume all ids within the chunk are isolated + # to make sure all end up in connected components + pseudo_isolated_ids.append(edges.node_ids1) + if edge_type == IN_CHUNK: + pseudo_isolated_ids.append(edges.node_ids2) + return chunk_edges_active, np.concatenate(pseudo_isolated_ids) def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): From 7406a7e02cb373811e34ad6e70689f6798559599 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 16 Oct 2019 14:30:22 +0000 Subject: [PATCH 0289/1097] fix: include all in chunk node ids --- pychunkedgraph/backend/utils/serializers.py | 1 - .../ingest/initialization/abstract_layers.py | 47 +++++++++---------- 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/pychunkedgraph/backend/utils/serializers.py b/pychunkedgraph/backend/utils/serializers.py index 1fd2587f4..4d0ca2153 100644 --- a/pychunkedgraph/backend/utils/serializers.py +++ b/pychunkedgraph/backend/utils/serializers.py @@ -88,7 +88,6 @@ def pad_node_id(node_id: np.uint64) -> str: :param node_id: int :return: str """ - print(type(node_id)) return "%.20d" % node_id diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index fae99f643..5cae5f4c9 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -7,8 +7,8 @@ import multiprocessing as mp from collections import defaultdict from collections import abc -from typing import Optional, Sequence -from operator import itemgetter +from typing import Optional +from typing import Sequence import numpy as np from multiwrapper import multiprocessing_utils as mu @@ -194,42 +194,39 @@ def _write_connected_components( ) -> None: if not ccs: return - chunked_ccs = chunked(ccs, len(ccs) // mp.cpu_count()) + + ccs_with_node_ids = [] + for cc in ccs: + ccs_with_node_ids.append(graph_ids[cc]) + + chunked_ccs = chunked(ccs_with_node_ids, len(ccs_with_node_ids) // mp.cpu_count()) cg_info = cg_instance.get_serialized_info(credentials=False) multi_args = [] - with mp.Manager() as manager: - graph_ids_shared = manager.list(graph_ids) - for ccs in chunked_ccs: - multi_args.append( - (cg_info, layer_id, parent_chunk_id, ccs, graph_ids_shared, time_stamp) - ) - mu.multiprocess_func( - _write_components_helper, - multi_args, - n_threads=min(len(multi_args), mp.cpu_count()), - ) + for ccs in chunked_ccs: + multi_args.append((cg_info, layer_id, parent_chunk_id, ccs, time_stamp)) + mu.multiprocess_func( + _write_components_helper, + multi_args, + n_threads=min(len(multi_args), mp.cpu_count()), + ) def _write_components_helper(args): - cg_info, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp = args + cg_info, layer_id, parent_chunk_id, ccs, time_stamp = args _write_components( - ChunkedGraph(**cg_info), layer_id, parent_chunk_id, ccs, graph_ids, time_stamp + ChunkedGraph(**cg_info), layer_id, parent_chunk_id, ccs, time_stamp ) -def _write_components( - cg_instance, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp -): +def _write_components(cg_instance, layer_id, parent_chunk_id, ccs, time_stamp): time_stamp = get_valid_timestamp(time_stamp) cc_connections = {l: [] for l in (layer_id, cg_instance.n_layers)} - for cc in ccs: - node_ids = itemgetter(*cc)(graph_ids) - print(node_ids) - if cg_instance.use_skip_connections and not isinstance(node_ids, abc.Container): - cc_connections[cg_instance.n_layers].append([[node_ids]]) + for node_ids in ccs: + if cg_instance.use_skip_connections and len(node_ids) == 1: + cc_connections[cg_instance.n_layers].append(node_ids) else: - cc_connections[layer_id].append([node_ids]) + cc_connections[layer_id].append(node_ids) rows = [] parent_chunk_id_dict = cg_instance.get_parent_chunk_id_dict(parent_chunk_id) From 2f5105fdb6a1bd27a255abb16402e2d7fef54d87 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 16 Oct 2019 17:11:20 +0000 Subject: [PATCH 0290/1097] fix: chunked avoid step size 0 --- pychunkedgraph/ingest/initialization/abstract_layers.py | 7 +++++-- pychunkedgraph/utils/general.py | 5 ++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 5cae5f4c9..9b1e07878 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -186,7 +186,7 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): if cross_edges: return np.unique(np.concatenate(cross_edges), axis=0) - return [] + return np.empty([0, 2], dtype=np.uint64) def _write_connected_components( @@ -199,7 +199,10 @@ def _write_connected_components( for cc in ccs: ccs_with_node_ids.append(graph_ids[cc]) - chunked_ccs = chunked(ccs_with_node_ids, len(ccs_with_node_ids) // mp.cpu_count()) + job_size = len(ccs_with_node_ids) // mp.cpu_count() + if not job_size: + job_size = 1 + chunked_ccs = chunked(ccs_with_node_ids, job_size) cg_info = cg_instance.get_serialized_info(credentials=False) multi_args = [] diff --git a/pychunkedgraph/utils/general.py b/pychunkedgraph/utils/general.py index 890be43b8..8f5479fba 100644 --- a/pychunkedgraph/utils/general.py +++ b/pychunkedgraph/utils/general.py @@ -6,6 +6,7 @@ import numpy as np + def reverse_dictionary(dictionary): """ given a dictionary - {key1 : [item1, item2 ...], key2 : [ite3, item4 ...]} @@ -24,5 +25,7 @@ def reverse_dictionary(dictionary): def chunked(l: Sequence, n: int): """Yield successive n-sized chunks from l.""" + if n < 1: + n = len(l) for i in range(0, len(l), n): - yield l[i:i + n] + yield l[i : i + n] From e42ca31f8ba0783c238288ed56b8da8048915845 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 16 Oct 2019 17:11:54 +0000 Subject: [PATCH 0291/1097] fix: chunked avoid step size 0 --- pychunkedgraph/ingest/initialization/abstract_layers.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 9b1e07878..124096875 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -199,10 +199,7 @@ def _write_connected_components( for cc in ccs: ccs_with_node_ids.append(graph_ids[cc]) - job_size = len(ccs_with_node_ids) // mp.cpu_count() - if not job_size: - job_size = 1 - chunked_ccs = chunked(ccs_with_node_ids, job_size) + chunked_ccs = chunked(ccs_with_node_ids, len(ccs_with_node_ids) // mp.cpu_count()) cg_info = cg_instance.get_serialized_info(credentials=False) multi_args = [] From fffaf6697bfaeabfd598c35181c9b97db198aa06 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 16 Oct 2019 22:40:44 +0000 Subject: [PATCH 0292/1097] fix: numpy concatenate dtype change empty array --- pychunkedgraph/backend/chunkedgraph.py | 4 ++-- pychunkedgraph/backend/definitions/edges.py | 10 ++++++---- pychunkedgraph/backend/flatgraph_utils.py | 1 - pychunkedgraph/ingest/cli.py | 4 ++-- .../ingest/initialization/abstract_layers.py | 13 +++++++------ .../ingest/initialization/atomic_layer.py | 6 +++--- pychunkedgraph/ingest/ran_ingestion_v2.py | 19 ++++++++++--------- rq_workers/{atomic_worker.py => atomic.py} | 0 rq_workers/{parent_worker.py => parent.py} | 0 9 files changed, 30 insertions(+), 27 deletions(-) rename rq_workers/{atomic_worker.py => atomic.py} (100%) rename rq_workers/{parent_worker.py => parent.py} (100%) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 41285212b..51ac5eaf6 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -98,7 +98,7 @@ def __init__( dataset_info: Optional[object] = None, is_new: bool = False, logger: Optional[logging.Logger] = None, - meta: Optional[ChunkedGraphMeta] = None + meta: Optional[ChunkedGraphMeta] = None, ) -> None: if logger is None: @@ -1696,7 +1696,7 @@ def get_roots( :return: np.uint64 """ time_stamp = get_valid_timestamp(time_stamp) - parent_ids = np.array(node_ids) + parent_ids = np.array(node_ids, dtype=basetypes.NODE_ID) if stop_layer is not None: stop_layer = min(self.n_layers, stop_layer) else: diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index 6fc3e2ca2..3b7049122 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -6,6 +6,8 @@ import numpy as np +from ..utils import basetypes + IN_CHUNK = "in" BT_CHUNK = "between" CX_CHUNK = "cross" @@ -25,19 +27,19 @@ def __init__( areas: Optional[np.ndarray] = None, ): assert node_ids1.size == node_ids2.size - self.node_ids1 = node_ids1 - self.node_ids2 = node_ids2 + self.node_ids1 = np.array(node_ids1, dtype=basetypes.NODE_ID) + self.node_ids2 = np.array(node_ids2, dtype=basetypes.NODE_ID) self._as_pairs = None self.affinities = np.ones(len(self.node_ids1)) * DEFAULT_AFFINITY if affinities is not None: assert node_ids1.size == affinities.size - self.affinities = affinities + self.affinities = np.array(affinities, dtype=basetypes.EDGE_AFFINITY) self.areas = np.ones(len(self.node_ids1)) * DEFAULT_AREA if areas is not None: assert node_ids1.size == areas.size - self.areas = affinities + self.areas = np.array(areas, dtype=basetypes.EDGE_AREA) def __add__(self, other): """add two Edges instances""" diff --git a/pychunkedgraph/backend/flatgraph_utils.py b/pychunkedgraph/backend/flatgraph_utils.py index 2832db5ef..ecb576b61 100644 --- a/pychunkedgraph/backend/flatgraph_utils.py +++ b/pychunkedgraph/backend/flatgraph_utils.py @@ -37,7 +37,6 @@ def build_gt_graph(edges, weights=None, is_directed=True, make_directed=False, cap = weighted_graph.new_edge_property("float", vals=weights) else: cap = None - return weighted_graph, cap, edges, unique_ids diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index a55574b7b..d53e7bb85 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -57,8 +57,8 @@ def ingest_graph( data_source = DataSource( agglomeration="gs://ranl/scratch/pinky100_ca_com/agg", watershed="gs://neuroglancer/pinky100_v0/ws/pinky100_ca_com", - edges="gs://chunkedgraph/pinky100/edges", - components="gs://chunkedgraph/pinky100/components", + edges="gs://akhilesh-pcg/pinky100-test/edges", + components="gs://akhilesh-pcg/pinky100-test/components", use_raw_edges=not processed, use_raw_components=not processed, data_version=4, diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 124096875..bce52c926 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -9,6 +9,7 @@ from collections import abc from typing import Optional from typing import Sequence +from typing import List import numpy as np from multiwrapper import multiprocessing_utils as mu @@ -16,6 +17,7 @@ from .helpers import get_touching_atomic_chunks from ...utils.general import chunked from ...backend import flatgraph_utils +from ...backend.utils import basetypes from ...backend.chunkedgraph import ChunkedGraph from ...backend.chunkedgraph_utils import get_valid_timestamp from ...backend.utils import serializers, column_keys @@ -28,7 +30,6 @@ def add_layer( children_coords: Sequence[Sequence[int]], *, time_stamp: Optional[datetime.datetime] = None, - n_threads: int = 32, ) -> None: x, y, z = parent_coords @@ -117,7 +118,7 @@ def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): children_ids_shared.append(row_ids[max_child_ids_occ_so_far == 0]) -def _get_cross_edges(cg_instance, layer_id, chunk_coord): +def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: start = time.time() layer2_chunks = get_touching_atomic_chunks( cg_instance.meta, layer_id, chunk_coord, include_both=False @@ -170,15 +171,15 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): cross_edges = [] for i, l2_id in enumerate(parent_neighboring_chunk_supervoxels_d): - segment_id = segment_ids[i] - neighboring_supervoxels = parent_neighboring_chunk_supervoxels_d[l2_id] + seg_id = segment_ids[i] + neighboring_svs = parent_neighboring_chunk_supervoxels_d[l2_id] neighboring_segment_ids = cg_instance.get_roots( - neighboring_supervoxels, stop_layer=cross_edge_layer + neighboring_svs, stop_layer=cross_edge_layer ) edges = np.vstack( [ - np.array([segment_id] * len(neighboring_supervoxels)), + np.array([seg_id] * len(neighboring_svs), dtype=basetypes.NODE_ID), neighboring_segment_ids, ] ).T diff --git a/pychunkedgraph/ingest/initialization/atomic_layer.py b/pychunkedgraph/ingest/initialization/atomic_layer.py index c25c9693a..1807d131f 100644 --- a/pychunkedgraph/ingest/initialization/atomic_layer.py +++ b/pychunkedgraph/ingest/initialization/atomic_layer.py @@ -35,6 +35,7 @@ def add_atomic_edges( chunk_ids = cg_instance.get_chunk_ids_from_node_ids(chunk_node_ids) assert len(np.unique(chunk_ids)) == 1 + # print(np.argwhere(chunk_edge_ids == 73957825679720457)) graph, _, _, unique_ids = build_gt_graph(chunk_edge_ids, make_directed=True) ccs = connected_components(graph) @@ -79,9 +80,7 @@ def _get_chunk_nodes_and_edges(chunk_edges_d: dict, isolated_ids: Sequence[int]) edge_ids.append(edges.get_pairs()) chunk_node_ids = np.unique(np.concatenate(node_ids)) - chunk_edge_ids = np.concatenate(edge_ids) - - return (chunk_node_ids, chunk_edge_ids) + return (chunk_node_ids, np.concatenate(edge_ids)) def _get_remapping(chunk_edges_d: dict): @@ -111,6 +110,7 @@ def _process_component( ): rows = [] chunk_out_edges = [] # out = between + cross + # print(node_ids) for node_id in node_ids: _edges = _get_outgoing_edges(node_id, chunk_edges_d, sparse_indices, remapping) chunk_out_edges.append(_edges) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index b4c4c1eb8..a7d2bfb93 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -119,14 +119,14 @@ def enqueue_atomic_tasks( # test chunks chunk_coords = [ - [20, 20, 10], - [20, 20, 11], - [20, 21, 10], - [20, 21, 11], - [21, 20, 10], - [21, 20, 11], - [21, 21, 10], - [21, 21, 11], + [26, 4, 10], + [26, 4, 11], + [26, 5, 10], + [26, 5, 11], + [27, 4, 10], + [27, 4, 11], + [27, 5, 10], + [27, 5, 11], ] for chunk_coord in chunk_coords: @@ -234,7 +234,8 @@ def _get_active_edges(imanager, coord, edges_d, mapping): pseudo_isolated_ids.append(edges.node_ids1) if edge_type == IN_CHUNK: pseudo_isolated_ids.append(edges.node_ids2) - return chunk_edges_active, np.concatenate(pseudo_isolated_ids) + + return chunk_edges_active, np.unique(np.concatenate(pseudo_isolated_ids)) def _get_cont_chunk_coords(imanager, chunk_coord_a, chunk_coord_b): diff --git a/rq_workers/atomic_worker.py b/rq_workers/atomic.py similarity index 100% rename from rq_workers/atomic_worker.py rename to rq_workers/atomic.py diff --git a/rq_workers/parent_worker.py b/rq_workers/parent.py similarity index 100% rename from rq_workers/parent_worker.py rename to rq_workers/parent.py From 2db8c6f9b390b8837a4799b59b4f77bbe0e56d70 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 16 Oct 2019 23:39:50 +0000 Subject: [PATCH 0293/1097] change status output --- pychunkedgraph/ingest/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pychunkedgraph/ingest/cli.py b/pychunkedgraph/ingest/cli.py index d53e7bb85..d548c8b2e 100644 --- a/pychunkedgraph/ingest/cli.py +++ b/pychunkedgraph/ingest/cli.py @@ -95,7 +95,7 @@ def ingest_status(): layer_count = redis.hlen(f"{layer}c") print(f"{layer}\t: {layer_count}") - print(imanager.chunkedgraph_meta.layer_chunk_bounds) + print(imanager.chunkedgraph_meta.layer_chunk_counts) def init_ingest_cmds(app): From c5a2f63e2fd0f96d73a31e369395b42d5f9fc714 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Wed, 16 Oct 2019 23:47:11 +0000 Subject: [PATCH 0294/1097] improve failed command --- pychunkedgraph/app/rq_cli.py | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/pychunkedgraph/app/rq_cli.py b/pychunkedgraph/app/rq_cli.py index 75237148e..674101129 100644 --- a/pychunkedgraph/app/rq_cli.py +++ b/pychunkedgraph/app/rq_cli.py @@ -42,25 +42,24 @@ def get_status(queues, show_busy): print(f"Jobs failed \t: {q.failed_job_registry.count}\n") -@rq_cli.command("failed_ids") +@rq_cli.command("failed") @click.argument("queue", type=str) -def failed_jobs(queue): - q = Queue(queue, connection=connection) - ids = q.failed_job_registry.get_job_ids() - print("\n".join(ids)) - - -@rq_cli.command("failed_info") -@click.argument("queue", type=str) -@click.argument("id", type=str) -def failed_job_info(queue, id): - j = Job.fetch(id, connection=connection) - print("KWARGS") - print(j.kwargs) - print("\nARGS") - print(j.args) - print("\nEXCEPTION") - print(j.exc_info) +@click.argument("job_ids", nargs=-1) +def failed_jobs(queue, job_ids): + if job_ids: + for job_id in job_ids: + j = Job.fetch(job_id, connection=connection) + print(f"JOB ID {job_id}") + print("KWARGS") + print(j.kwargs) + print("\nARGS") + print(j.args) + print("\nEXCEPTION") + print(j.exc_info) + else: + q = Queue(queue, connection=connection) + ids = q.failed_job_registry.get_job_ids() + print("\n".join(ids)) @rq_cli.command("empty") From e559e0f1d192d7d54601b9356812c8c26943d7cc Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 17 Oct 2019 17:54:42 +0000 Subject: [PATCH 0295/1097] fix: handle empty atomic chunk array --- pychunkedgraph/ingest/initialization/abstract_layers.py | 2 ++ pychunkedgraph/ingest/initialization/helpers.py | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index bce52c926..8f47a27a6 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -123,6 +123,8 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: layer2_chunks = get_touching_atomic_chunks( cg_instance.meta, layer_id, chunk_coord, include_both=False ) + if not layer2_chunks: + return [] # print(f"get_touching_atomic_chunks: {time.time()-start}") # print(f"touching chunks count (1 side): {len(layer2_chunks)}") diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index 6474ce9b8..fcba7d9e0 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -15,6 +15,7 @@ def get_touching_atomic_chunks( chunk_coords = np.array(chunk_coords, dtype=int) touching_atomic_chunks = [] + # atomic chunk count along one dimension atomic_chunk_count = chunkedgraph_meta.graph_config.fanout ** (layer - 2) layer2_chunk_bounds = chunkedgraph_meta.layer_chunk_bounds[2] @@ -47,6 +48,7 @@ def get_touching_atomic_chunks( for coords in touching_atomic_chunks: if np.all(np.less(coords, layer2_chunk_bounds)): result.append(coords) - - return np.unique(np.array(result, dtype=int), axis=0) + if result: + return np.unique(np.array(result, dtype=int), axis=0) + return [] From 632786e3dfcf28ab1d584c81067333903d0029a3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 17 Oct 2019 18:52:43 +0000 Subject: [PATCH 0296/1097] wip: get bounding atomic chunk croods --- .../ingest/initialization/helpers.py | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index fcba7d9e0..d91670db2 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -1,4 +1,5 @@ from typing import Sequence +from typing import List from itertools import product import numpy as np @@ -10,8 +11,8 @@ def get_touching_atomic_chunks( layer: int, chunk_coords: Sequence[int], include_both=True, -): - """get atomic chunks along touching faces of children chunks of a parent chunk""" +) -> List: + """get atomic chunk coordinates along touching faces of children chunks of a parent chunk""" chunk_coords = np.array(chunk_coords, dtype=int) touching_atomic_chunks = [] @@ -52,3 +53,46 @@ def get_touching_atomic_chunks( return np.unique(np.array(result, dtype=int), axis=0) return [] + +def get_bounding_atomic_chunks( + chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] +) -> List: + """get atomic chunk coordinates along the boundary of the chunk""" + chunk_coords = np.array(chunk_coords, dtype=int) + atomic_chunks = [] + + # atomic chunk count along one dimension + atomic_chunk_count = chunkedgraph_meta.graph_config.fanout ** (layer - 2) + layer2_chunk_bounds = chunkedgraph_meta.layer_chunk_bounds[2] + + chunk_offset = chunk_coords * atomic_chunk_count + + # relevant chunks along touching planes at center + for axis_1, axis_2 in product(*[range(atomic_chunk_count)] * 2): + # x-y plane + chunk_1 = chunk_offset + np.array((axis_1, axis_2, mid)) + touching_atomic_chunks.append(chunk_1) + # x-z plane + chunk_1 = chunk_offset + np.array((axis_1, mid, axis_2)) + touching_atomic_chunks.append(chunk_1) + # y-z plane + chunk_1 = chunk_offset + np.array((mid, axis_1, axis_2)) + touching_atomic_chunks.append(chunk_1) + + if include_both: + chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) + touching_atomic_chunks.append(chunk_2) + + chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) + touching_atomic_chunks.append(chunk_2) + + chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) + touching_atomic_chunks.append(chunk_2) + + result = [] + for coords in touching_atomic_chunks: + if np.all(np.less(coords, layer2_chunk_bounds)): + result.append(coords) + if result: + return np.unique(np.array(result, dtype=int), axis=0) + return [] From 1f82f0af45348260bdd693f3716a365e0dc6f8f3 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Thu, 17 Oct 2019 23:26:58 +0000 Subject: [PATCH 0297/1097] bounding atomic chunks --- pychunkedgraph/backend/chunkedgraph.py | 2 +- .../ingest/initialization/helpers.py | 34 +++++++------------ 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 51ac5eaf6..63ea8a405 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1705,7 +1705,7 @@ def get_roots( node_mask = np.ones(len(node_ids), dtype=np.bool) node_mask[self.get_chunk_layers(node_ids) >= stop_layer] = False for _ in range(n_tries): - parent_ids = np.array(node_ids) + parent_ids = np.array(node_ids, dtype=basetypes.NODE_ID) for _ in range(int(stop_layer + 1)): temp_parent_ids = self.get_parents( parent_ids[node_mask], time_stamp=time_stamp diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index d91670db2..2819501bf 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -66,33 +66,23 @@ def get_bounding_atomic_chunks( layer2_chunk_bounds = chunkedgraph_meta.layer_chunk_bounds[2] chunk_offset = chunk_coords * atomic_chunk_count + x1, y1, z1 = chunk_offset + x2, y2, z2 = chunk_offset + atomic_chunk_count - # relevant chunks along touching planes at center - for axis_1, axis_2 in product(*[range(atomic_chunk_count)] * 2): - # x-y plane - chunk_1 = chunk_offset + np.array((axis_1, axis_2, mid)) - touching_atomic_chunks.append(chunk_1) - # x-z plane - chunk_1 = chunk_offset + np.array((axis_1, mid, axis_2)) - touching_atomic_chunks.append(chunk_1) - # y-z plane - chunk_1 = chunk_offset + np.array((mid, axis_1, axis_2)) - touching_atomic_chunks.append(chunk_1) + f = lambda range1, range2: product(range(*range1), range(*range2)) - if include_both: - chunk_2 = chunk_offset + np.array((axis_1, axis_2, mid + 1)) - touching_atomic_chunks.append(chunk_2) + atomic_chunks.extend([np.array([x1, d1, d2]) for d1, d2 in f((y1, y2), (z1, z2))]) + atomic_chunks.extend([np.array([x2, d1, d2]) for d1, d2 in f((y1, y2), (z1, z2))]) - chunk_2 = chunk_offset + np.array((axis_1, mid + 1, axis_2)) - touching_atomic_chunks.append(chunk_2) + atomic_chunks.extend([np.array([d1, y1, d2]) for d1, d2 in f((x1, x2), (z1, z2))]) + atomic_chunks.extend([np.array([d1, y2, d2]) for d1, d2 in f((x1, x2), (z1, z2))]) - chunk_2 = chunk_offset + np.array((mid + 1, axis_1, axis_2)) - touching_atomic_chunks.append(chunk_2) + atomic_chunks.extend([np.array([d1, d2, z1]) for d1, d2 in f((x1, x2), (y1, y2))]) + atomic_chunks.extend([np.array([d1, d2, z2]) for d1, d2 in f((x1, x2), (y1, y2))]) result = [] - for coords in touching_atomic_chunks: + for coords in atomic_chunks: if np.all(np.less(coords, layer2_chunk_bounds)): result.append(coords) - if result: - return np.unique(np.array(result, dtype=int), axis=0) - return [] + + return np.unique(np.array(result, dtype=int), axis=0) From 1a1d15922bcb0b618c2927b7c213ae29821ceffe Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 18 Oct 2019 15:44:13 +0000 Subject: [PATCH 0298/1097] fix: read only the latest node ids --- .../ingest/initialization/abstract_layers.py | 75 +++++++++++-------- 1 file changed, 42 insertions(+), 33 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 8f47a27a6..8b9dfd4b7 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -93,18 +93,10 @@ def _read_chunk_helper(args): _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord) -def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): - x, y, z = chunk_coord - range_read = cg_instance.range_read_chunk( - layer_id, x, y, z, columns=column_keys.Hierarchy.Child - ) - - # Deserialize row keys and store child with highest id for comparison - row_ids = np.fromiter(range_read.keys(), dtype=np.uint64) - segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) +def _filter_latest_ids(row_ids, segment_ids, children_ids): max_child_ids = [] - for row_data in range_read.values(): - max_child_ids.append(np.max(row_data[0].value)) + for ids in children_ids: + max_child_ids.append(np.max(ids)) sorting = np.argsort(segment_ids)[::-1] row_ids = row_ids[sorting] @@ -115,7 +107,21 @@ def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): for i_row in range(len(max_child_ids)): max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] counter[max_child_ids[i_row]] += 1 - children_ids_shared.append(row_ids[max_child_ids_occ_so_far == 0]) + return row_ids[max_child_ids_occ_so_far == 0] + + +def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): + x, y, z = chunk_coord + range_read = cg_instance.range_read_chunk( + layer_id, x, y, z, columns=column_keys.Hierarchy.Child + ) + row_ids = [] + children_ids = [] + for row_id, row_data in range_read.items(): + row_ids.append(row_id) + children_ids.append(row_data[0].value) + segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) + children_ids_shared.append(_filter_latest_ids(row_ids, segment_ids, children_ids)) def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: @@ -123,7 +129,7 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: layer2_chunks = get_touching_atomic_chunks( cg_instance.meta, layer_id, chunk_coord, include_both=False ) - if not layer2_chunks: + if not len(layer2_chunks): return [] # print(f"get_touching_atomic_chunks: {time.time()-start}") # print(f"touching chunks count (1 side): {len(layer2_chunks)}") @@ -159,33 +165,36 @@ def _read_atomic_chunk_cross_edges_helper(args): def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): x, y, z = chunk_coord + child_key = column_keys.Hierarchy.Child + cross_edge_key = column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] range_read = cg_instance.range_read_chunk( - 2, x, y, z, columns=column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] + 2, x, y, z, columns=[child_key, cross_edge_key] ) - parent_neighboring_chunk_supervoxels_d = defaultdict(list) - for l2_id, row_data in range_read.items(): - edges = row_data[0].value - parent_neighboring_chunk_supervoxels_d[l2_id] = edges[:, 1] + row_ids = [] + children_ids = [] + for row_id, row_data in range_read.items(): + row_ids.append(row_id) + children_ids.append(row_data[child_key][0].value) + segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) + l2ids = _filter_latest_ids(row_ids, segment_ids, children_ids) - l2_ids = list(parent_neighboring_chunk_supervoxels_d.keys()) - segment_ids = cg_instance.get_roots(l2_ids, stop_layer=cross_edge_layer) + parent_neighboring_chunk_supervoxels_d = defaultdict(list) + for l2id in l2ids: + edges = range_read[l2id][cross_edge_key][0].value + parent_neighboring_chunk_supervoxels_d[l2id] = edges[:, 1] + parent_ids = cg_instance.get_roots(l2ids, stop_layer=cross_edge_layer) cross_edges = [] - for i, l2_id in enumerate(parent_neighboring_chunk_supervoxels_d): - seg_id = segment_ids[i] - neighboring_svs = parent_neighboring_chunk_supervoxels_d[l2_id] - neighboring_segment_ids = cg_instance.get_roots( - neighboring_svs, stop_layer=cross_edge_layer + for i, l2id in enumerate(parent_neighboring_chunk_supervoxels_d): + parent_id = parent_ids[i] + neighboring_parent_ids = cg_instance.get_roots( + parent_neighboring_chunk_supervoxels_d[l2id], stop_layer=cross_edge_layer ) - - edges = np.vstack( - [ - np.array([seg_id] * len(neighboring_svs), dtype=basetypes.NODE_ID), - neighboring_segment_ids, - ] - ).T - cross_edges.append(edges) + chunk_parent_ids = np.array( + [parent_id] * len(neighboring_parent_ids), dtype=basetypes.NODE_ID + ) + cross_edges.append(np.vstack([chunk_parent_ids, neighboring_parent_ids]).T) if cross_edges: return np.unique(np.concatenate(cross_edges), axis=0) From b3fa4eced042e2c74a98df14f3319de8ee5fb1fd Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 18 Oct 2019 15:59:08 +0000 Subject: [PATCH 0299/1097] wip: fix read only the latest node ids --- .../ingest/initialization/abstract_layers.py | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 8b9dfd4b7..e0a7a6df2 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -93,14 +93,10 @@ def _read_chunk_helper(args): _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord) -def _filter_latest_ids(row_ids, segment_ids, children_ids): - max_child_ids = [] - for ids in children_ids: - max_child_ids.append(np.max(ids)) - +def _filter_latest_ids(row_ids, segment_ids, max_children_ids): sorting = np.argsort(segment_ids)[::-1] row_ids = row_ids[sorting] - max_child_ids = np.array(max_child_ids, dtype=np.uint64)[sorting] + max_child_ids = np.array(max_children_ids, dtype=np.uint64)[sorting] counter = defaultdict(int) max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) @@ -116,12 +112,15 @@ def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): layer_id, x, y, z, columns=column_keys.Hierarchy.Child ) row_ids = [] - children_ids = [] + max_children_ids = [] for row_id, row_data in range_read.items(): row_ids.append(row_id) - children_ids.append(row_data[0].value) + max_children_ids.append(np.max(row_data[0].value)) + row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - children_ids_shared.append(_filter_latest_ids(row_ids, segment_ids, children_ids)) + + row_ids = _filter_latest_ids(row_ids, segment_ids, max_children_ids) + children_ids_shared.append(row_ids) def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: @@ -172,12 +171,14 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): ) row_ids = [] - children_ids = [] + max_children_ids = [] for row_id, row_data in range_read.items(): row_ids.append(row_id) - children_ids.append(row_data[child_key][0].value) + max_children_ids.append(np.max(row_data[child_key][0].value)) + + row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - l2ids = _filter_latest_ids(row_ids, segment_ids, children_ids) + l2ids = _filter_latest_ids(row_ids, segment_ids, max_children_ids) parent_neighboring_chunk_supervoxels_d = defaultdict(list) for l2id in l2ids: From 952a6cf09d9decfd5c77a8f38e7eed76a01f26c5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 18 Oct 2019 18:54:45 +0000 Subject: [PATCH 0300/1097] fix: read only the latest node ids --- .../ingest/initialization/abstract_layers.py | 65 ++++++++++--------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index e0a7a6df2..80d933359 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -3,6 +3,7 @@ """ import time +import math import datetime import multiprocessing as mp from collections import defaultdict @@ -35,7 +36,7 @@ def add_layer( start = time.time() children_ids = _read_children_chunks(cg_instance, layer_id, children_coords) - print(f"_read_children_chunks: {time.time()-start}, count {len(children_ids)}") + print(f"_read_children_chunks: {time.time()-start}, id count {len(children_ids)}") start = time.time() edge_ids = _get_cross_edges(cg_instance, layer_id, parent_coords) @@ -96,7 +97,7 @@ def _read_chunk_helper(args): def _filter_latest_ids(row_ids, segment_ids, max_children_ids): sorting = np.argsort(segment_ids)[::-1] row_ids = row_ids[sorting] - max_child_ids = np.array(max_children_ids, dtype=np.uint64)[sorting] + max_child_ids = np.array(max_children_ids, dtype=basetypes.NODE_ID)[sorting] counter = defaultdict(int) max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) @@ -124,42 +125,47 @@ def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: - start = time.time() layer2_chunks = get_touching_atomic_chunks( cg_instance.meta, layer_id, chunk_coord, include_both=False ) if not len(layer2_chunks): return [] - # print(f"get_touching_atomic_chunks: {time.time()-start}") - # print(f"touching chunks count (1 side): {len(layer2_chunks)}") cg_info = cg_instance.get_serialized_info(credentials=False) + with mp.Manager() as manager: + edge_ids_shared = manager.list() + edge_ids_shared.append(np.empty([0, 2], dtype=basetypes.NODE_ID)) - start = time.time() - multi_args = [] - for layer2_chunk in layer2_chunks: - multi_args.append((cg_info, layer2_chunk, layer_id - 1)) - cross_edges = mu.multithread_func( - _read_atomic_chunk_cross_edges_helper, multi_args, n_threads=4 - ) - # print(f"_read_atomic_chunk_cross_edges: {time.time()-start}") + chunked_l2chunk_list = chunked( + layer2_chunks, len(layer2_chunks) // mp.cpu_count() + ) + multi_args = [] + for layer2_chunks in chunked_l2chunk_list: + multi_args.append((edge_ids_shared, cg_info, layer2_chunks, layer_id - 1)) + mu.multiprocess_func( + _read_atomic_chunk_cross_edges_helper, + multi_args, + n_threads=min(len(multi_args), mp.cpu_count()), + ) - cross_edges = np.concatenate(cross_edges) - if len(cross_edges): - cross_edges = np.unique(cross_edges, axis=0) - return list(cross_edges) + cross_edges = np.concatenate(edge_ids_shared) + if len(cross_edges): + cross_edges = np.unique(cross_edges, axis=0) + return list(cross_edges) def _read_atomic_chunk_cross_edges_helper(args): - cg_info, layer2_chunk, cross_edge_layer = args - cg_instance = ChunkedGraph(**cg_info) + edge_ids_shared, cg_info, layer2_chunks, cross_edge_layer = args + cg = ChunkedGraph(**cg_info) - start = time.time() - cross_edges = _read_atomic_chunk_cross_edges( - cg_instance, layer2_chunk, cross_edge_layer - ) - # print(f"single atomic chunk: {time.time()-start}, edges {len(cross_edges)}") - return cross_edges + cross_edges = [] + for layer2_chunk in layer2_chunks: + edges = _read_atomic_chunk_cross_edges(cg, layer2_chunk, cross_edge_layer) + cross_edges.append(edges) + cross_edges = np.concatenate(cross_edges) + if len(cross_edges): + cross_edges = np.unique(cross_edges, axis=0) + edge_ids_shared.append(cross_edges) def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): @@ -182,11 +188,13 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): parent_neighboring_chunk_supervoxels_d = defaultdict(list) for l2id in l2ids: + if not cross_edge_key in range_read[l2id]: + continue edges = range_read[l2id][cross_edge_key][0].value parent_neighboring_chunk_supervoxels_d[l2id] = edges[:, 1] parent_ids = cg_instance.get_roots(l2ids, stop_layer=cross_edge_layer) - cross_edges = [] + cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] for i, l2id in enumerate(parent_neighboring_chunk_supervoxels_d): parent_id = parent_ids[i] neighboring_parent_ids = cg_instance.get_roots( @@ -196,10 +204,7 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): [parent_id] * len(neighboring_parent_ids), dtype=basetypes.NODE_ID ) cross_edges.append(np.vstack([chunk_parent_ids, neighboring_parent_ids]).T) - - if cross_edges: - return np.unique(np.concatenate(cross_edges), axis=0) - return np.empty([0, 2], dtype=np.uint64) + return np.concatenate(cross_edges) def _write_connected_components( From 375e8bdd22b50b33ded8bd038ca0559ab3740cd5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sat, 19 Oct 2019 23:07:43 +0000 Subject: [PATCH 0301/1097] wip --- .../ingest/initialization/abstract_layers.py | 44 ++++++++++++------- .../ingest/initialization/helpers.py | 2 +- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 80d933359..f5b7f4c87 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -40,7 +40,7 @@ def add_layer( start = time.time() edge_ids = _get_cross_edges(cg_instance, layer_id, parent_coords) - print(f"_get_cross_edges: {time.time()-start}") + print(f"_get_cross_edges: {time.time()-start}, {len(edge_ids)}") # print(len(children_ids), len(edge_ids)) # Extract connected components @@ -142,6 +142,7 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: multi_args = [] for layer2_chunks in chunked_l2chunk_list: multi_args.append((edge_ids_shared, cg_info, layer2_chunks, layer_id - 1)) + mu.multiprocess_func( _read_atomic_chunk_cross_edges_helper, multi_args, @@ -156,13 +157,28 @@ def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: def _read_atomic_chunk_cross_edges_helper(args): edge_ids_shared, cg_info, layer2_chunks, cross_edge_layer = args - cg = ChunkedGraph(**cg_info) + cg_instance = ChunkedGraph(**cg_info) - cross_edges = [] + start = time.time() + cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] for layer2_chunk in layer2_chunks: - edges = _read_atomic_chunk_cross_edges(cg, layer2_chunk, cross_edge_layer) + edges = _read_atomic_chunk_cross_edges( + cg_instance, layer2_chunk, cross_edge_layer + ) cross_edges.append(edges) cross_edges = np.concatenate(cross_edges) + print(f"reading raw edges {time.time()-start}s") + + start = time.time() + parents_1 = cg_instance.get_roots(cross_edges[:, 0], stop_layer=cross_edge_layer) + print(f"getting parents1 {time.time()-start}s") + + start = time.time() + parents_2 = cg_instance.get_roots(cross_edges[:, 1], stop_layer=cross_edge_layer) + print(f"getting parents2 {time.time()-start}s") + + cross_edges[:, 0] = parents_1 + cross_edges[:, 1] = parents_2 if len(cross_edges): cross_edges = np.unique(cross_edges, axis=0) edge_ids_shared.append(cross_edges) @@ -185,7 +201,10 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) l2ids = _filter_latest_ids(row_ids, segment_ids, max_children_ids) + return _get_cross_edges_raw(range_read, l2ids, cross_edge_key) + +def _get_cross_edges_raw(range_read, l2ids, cross_edge_key): parent_neighboring_chunk_supervoxels_d = defaultdict(list) for l2id in l2ids: if not cross_edge_key in range_read[l2id]: @@ -193,18 +212,13 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): edges = range_read[l2id][cross_edge_key][0].value parent_neighboring_chunk_supervoxels_d[l2id] = edges[:, 1] - parent_ids = cg_instance.get_roots(l2ids, stop_layer=cross_edge_layer) cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] - for i, l2id in enumerate(parent_neighboring_chunk_supervoxels_d): - parent_id = parent_ids[i] - neighboring_parent_ids = cg_instance.get_roots( - parent_neighboring_chunk_supervoxels_d[l2id], stop_layer=cross_edge_layer - ) - chunk_parent_ids = np.array( - [parent_id] * len(neighboring_parent_ids), dtype=basetypes.NODE_ID - ) - cross_edges.append(np.vstack([chunk_parent_ids, neighboring_parent_ids]).T) - return np.concatenate(cross_edges) + for l2id in parent_neighboring_chunk_supervoxels_d: + nebor_svs = parent_neighboring_chunk_supervoxels_d[l2id] + chunk_parent_ids = np.array([l2id] * len(nebor_svs), dtype=basetypes.NODE_ID) + cross_edges.append(np.vstack([chunk_parent_ids, nebor_svs]).T) + cross_edges = np.concatenate(cross_edges) + return cross_edges def _write_connected_components( diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index 2819501bf..e0df8e7c7 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -57,7 +57,7 @@ def get_touching_atomic_chunks( def get_bounding_atomic_chunks( chunkedgraph_meta: ChunkedGraphMeta, layer: int, chunk_coords: Sequence[int] ) -> List: - """get atomic chunk coordinates along the boundary of the chunk""" + """get atomic chunk coordinates along the boundary of a chunk""" chunk_coords = np.array(chunk_coords, dtype=int) atomic_chunks = [] From 66fc537c1b75ee5b052a8a46b8ec260d50432239 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 20 Oct 2019 02:47:19 +0000 Subject: [PATCH 0302/1097] speedup get roots --- .../ingest/initialization/abstract_layers.py | 5 ++-- .../ingest/initialization/helpers.py | 30 +++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index f5b7f4c87..ec07335c5 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -15,6 +15,7 @@ import numpy as np from multiwrapper import multiprocessing_utils as mu +from .helpers import get_roots from .helpers import get_touching_atomic_chunks from ...utils.general import chunked from ...backend import flatgraph_utils @@ -170,11 +171,11 @@ def _read_atomic_chunk_cross_edges_helper(args): print(f"reading raw edges {time.time()-start}s") start = time.time() - parents_1 = cg_instance.get_roots(cross_edges[:, 0], stop_layer=cross_edge_layer) + parents_1 = get_roots(cg_instance, cross_edges[:, 0], layer=cross_edge_layer) print(f"getting parents1 {time.time()-start}s") start = time.time() - parents_2 = cg_instance.get_roots(cross_edges[:, 1], stop_layer=cross_edge_layer) + parents_2 = get_roots(cg_instance, cross_edges[:, 1], layer=cross_edge_layer) print(f"getting parents2 {time.time()-start}s") cross_edges[:, 0] = parents_1 diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/ingest/initialization/helpers.py index e0df8e7c7..d2ffbe642 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/ingest/initialization/helpers.py @@ -4,6 +4,8 @@ import numpy as np from ...backend import ChunkedGraphMeta +from ...backend.chunkedgraph_utils import get_valid_timestamp +from ...backend.utils import basetypes def get_touching_atomic_chunks( @@ -86,3 +88,31 @@ def get_bounding_atomic_chunks( result.append(coords) return np.unique(np.array(result, dtype=int), axis=0) + + +def get_roots( + cg_instance, + node_ids: Sequence[np.uint64], + time_stamp=None, + layer: int = None, + n_tries: int = 1, +): + time_stamp = get_valid_timestamp(time_stamp) + layer = cg_instance.n_layers if not layer else min(cg_instance.n_layers, layer) + + layer_mask = np.ones(len(node_ids), dtype=np.bool) + layer_mask[cg_instance.get_chunk_layers(node_ids) >= layer] = False + parent_ids = np.array(node_ids, dtype=basetypes.NODE_ID) + for _ in range(int(layer + 1)): + filtered_ids = parent_ids[layer_mask] + unique_ids, inverse = np.unique(filtered_ids, return_inverse=True) + temp_parent_ids = cg_instance.get_parents(unique_ids, time_stamp=time_stamp) + if temp_parent_ids is None: + break + else: + parent_ids[layer_mask] = temp_parent_ids[inverse] + layer_mask[cg_instance.get_chunk_layers(parent_ids) >= layer] = False + if not np.any(cg_instance.get_chunk_layers(parent_ids) < layer): + break + + return parent_ids From 01321f1d52b74e0c72bcad2ff196b982036acf1c Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 20 Oct 2019 16:47:52 +0000 Subject: [PATCH 0303/1097] wip: refactor helper functions --- pychunkedgraph/backend/chunkedgraph.py | 33 +++-- pychunkedgraph/backend/chunks/__init__.py | 0 .../helpers.py => backend/chunks/atomic.py} | 34 +----- .../backend/connectivity/cross_edges.py | 115 ++++++++++++++++++ .../ingest/initialization/abstract_layers.py | 110 +---------------- 5 files changed, 138 insertions(+), 154 deletions(-) create mode 100644 pychunkedgraph/backend/chunks/__init__.py rename pychunkedgraph/{ingest/initialization/helpers.py => backend/chunks/atomic.py} (73%) create mode 100644 pychunkedgraph/backend/connectivity/cross_edges.py diff --git a/pychunkedgraph/backend/chunkedgraph.py b/pychunkedgraph/backend/chunkedgraph.py index 63ea8a405..cbd84a870 100644 --- a/pychunkedgraph/backend/chunkedgraph.py +++ b/pychunkedgraph/backend/chunkedgraph.py @@ -1696,30 +1696,25 @@ def get_roots( :return: np.uint64 """ time_stamp = get_valid_timestamp(time_stamp) - parent_ids = np.array(node_ids, dtype=basetypes.NODE_ID) - if stop_layer is not None: - stop_layer = min(self.n_layers, stop_layer) - else: - stop_layer = self.n_layers - - node_mask = np.ones(len(node_ids), dtype=np.bool) - node_mask[self.get_chunk_layers(node_ids) >= stop_layer] = False + stop_layer = self.n_layers if not stop_layer else min(self.n_layers, stop_layer) + layer_mask = np.ones(len(node_ids), dtype=np.bool) + for _ in range(n_tries): + layer_mask[self.get_chunk_layers(node_ids) >= stop_layer] = False parent_ids = np.array(node_ids, dtype=basetypes.NODE_ID) for _ in range(int(stop_layer + 1)): - temp_parent_ids = self.get_parents( - parent_ids[node_mask], time_stamp=time_stamp - ) - if temp_parent_ids is None: + filtered_ids = parent_ids[layer_mask] + unique_ids, inverse = np.unique(filtered_ids, return_inverse=True) + temp_ids = self.get_parents(unique_ids, time_stamp=time_stamp) + if temp_ids is None: break else: - parent_ids[node_mask] = temp_parent_ids - node_mask[self.get_chunk_layers(parent_ids) >= stop_layer] = False - if np.all(~node_mask): - break - - if np.all(self.get_chunk_layers(parent_ids) >= stop_layer): - break + parent_ids[layer_mask] = temp_ids[inverse] + layer_mask[self.get_chunk_layers(parent_ids) >= stop_layer] = False + if not np.any(self.get_chunk_layers(parent_ids) < stop_layer): + return parent_ids + if not np.any(self.get_chunk_layers(parent_ids) < stop_layer): + return parent_ids else: time.sleep(0.5) return parent_ids diff --git a/pychunkedgraph/backend/chunks/__init__.py b/pychunkedgraph/backend/chunks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pychunkedgraph/ingest/initialization/helpers.py b/pychunkedgraph/backend/chunks/atomic.py similarity index 73% rename from pychunkedgraph/ingest/initialization/helpers.py rename to pychunkedgraph/backend/chunks/atomic.py index d2ffbe642..01c35e6d5 100644 --- a/pychunkedgraph/ingest/initialization/helpers.py +++ b/pychunkedgraph/backend/chunks/atomic.py @@ -3,9 +3,9 @@ from itertools import product import numpy as np -from ...backend import ChunkedGraphMeta -from ...backend.chunkedgraph_utils import get_valid_timestamp -from ...backend.utils import basetypes +from .. import ChunkedGraphMeta +from ..chunkedgraph_utils import get_valid_timestamp +from ..utils import basetypes def get_touching_atomic_chunks( @@ -88,31 +88,3 @@ def get_bounding_atomic_chunks( result.append(coords) return np.unique(np.array(result, dtype=int), axis=0) - - -def get_roots( - cg_instance, - node_ids: Sequence[np.uint64], - time_stamp=None, - layer: int = None, - n_tries: int = 1, -): - time_stamp = get_valid_timestamp(time_stamp) - layer = cg_instance.n_layers if not layer else min(cg_instance.n_layers, layer) - - layer_mask = np.ones(len(node_ids), dtype=np.bool) - layer_mask[cg_instance.get_chunk_layers(node_ids) >= layer] = False - parent_ids = np.array(node_ids, dtype=basetypes.NODE_ID) - for _ in range(int(layer + 1)): - filtered_ids = parent_ids[layer_mask] - unique_ids, inverse = np.unique(filtered_ids, return_inverse=True) - temp_parent_ids = cg_instance.get_parents(unique_ids, time_stamp=time_stamp) - if temp_parent_ids is None: - break - else: - parent_ids[layer_mask] = temp_parent_ids[inverse] - layer_mask[cg_instance.get_chunk_layers(parent_ids) >= layer] = False - if not np.any(cg_instance.get_chunk_layers(parent_ids) < layer): - break - - return parent_ids diff --git a/pychunkedgraph/backend/connectivity/cross_edges.py b/pychunkedgraph/backend/connectivity/cross_edges.py new file mode 100644 index 000000000..4a23bc73a --- /dev/null +++ b/pychunkedgraph/backend/connectivity/cross_edges.py @@ -0,0 +1,115 @@ +import time +import multiprocessing as mp +from collections import defaultdict +from typing import Optional +from typing import Sequence +from typing import List + +import numpy as np +from multiwrapper import multiprocessing_utils as mu + +from ...utils.general import chunked +from ...backend import flatgraph_utils +from ...backend.utils import basetypes +from ...backend.utils import serializers +from ...backend.utils import column_keys +from ...backend.chunkedgraph import ChunkedGraph +from ...backend.chunkedgraph_utils import get_valid_timestamp +from ...backend.chunks.atomic import get_touching_atomic_chunks + + +def get_children_chunk_cross_edges(cg_instance, layer_id, chunk_coord) -> List: + layer2_chunks = get_touching_atomic_chunks( + cg_instance.meta, layer_id, chunk_coord, include_both=False + ) + if not len(layer2_chunks): + return [] + + cg_info = cg_instance.get_serialized_info(credentials=False) + with mp.Manager() as manager: + edge_ids_shared = manager.list() + edge_ids_shared.append(np.empty([0, 2], dtype=basetypes.NODE_ID)) + + chunked_l2chunk_list = chunked( + layer2_chunks, len(layer2_chunks) // mp.cpu_count() + ) + multi_args = [] + for layer2_chunks in chunked_l2chunk_list: + multi_args.append((edge_ids_shared, cg_info, layer2_chunks, layer_id - 1)) + + mu.multiprocess_func( + _get_cross_edges_helper, + multi_args, + n_threads=min(len(multi_args), mp.cpu_count()), + ) + + cross_edges = np.concatenate(edge_ids_shared) + if len(cross_edges): + cross_edges = np.unique(cross_edges, axis=0) + return list(cross_edges) + + +def _get_cross_edges_helper(args): + edge_ids_shared, cg_info, layer2_chunks, cross_edge_layer = args + cg_instance = ChunkedGraph(**cg_info) + + start = time.time() + cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] + for layer2_chunk in layer2_chunks: + edges = _read_atomic_chunk_cross_edges( + cg_instance, layer2_chunk, cross_edge_layer + ) + cross_edges.append(edges) + cross_edges = np.concatenate(cross_edges) + print(f"reading raw edges {time.time()-start}s") + + start = time.time() + parents_1 = cg_instance.get_roots(cross_edges[:, 0], stop_layer=cross_edge_layer) + print(f"getting parents1 {time.time()-start}s") + + start = time.time() + parents_2 = cg_instance.get_roots(cross_edges[:, 1], stop_layer=cross_edge_layer) + print(f"getting parents2 {time.time()-start}s") + + cross_edges[:, 0] = parents_1 + cross_edges[:, 1] = parents_2 + if len(cross_edges): + cross_edges = np.unique(cross_edges, axis=0) + edge_ids_shared.append(cross_edges) + + +def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): + x, y, z = chunk_coord + child_key = column_keys.Hierarchy.Child + cross_edge_key = column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] + range_read = cg_instance.range_read_chunk( + 2, x, y, z, columns=[child_key, cross_edge_key] + ) + + row_ids = [] + max_children_ids = [] + for row_id, row_data in range_read.items(): + row_ids.append(row_id) + max_children_ids.append(np.max(row_data[child_key][0].value)) + + row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) + segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) + l2ids = _filter_latest_ids(row_ids, segment_ids, max_children_ids) + return _extract_atomic_cross_edges(range_read, l2ids, cross_edge_key) + + +def _extract_atomic_cross_edges(range_read, l2ids, cross_edge_key): + parent_neighboring_chunk_supervoxels_d = defaultdict(list) + for l2id in l2ids: + if not cross_edge_key in range_read[l2id]: + continue + edges = range_read[l2id][cross_edge_key][0].value + parent_neighboring_chunk_supervoxels_d[l2id] = edges[:, 1] + + cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] + for l2id in parent_neighboring_chunk_supervoxels_d: + nebor_svs = parent_neighboring_chunk_supervoxels_d[l2id] + chunk_parent_ids = np.array([l2id] * len(nebor_svs), dtype=basetypes.NODE_ID) + cross_edges.append(np.vstack([chunk_parent_ids, nebor_svs]).T) + cross_edges = np.concatenate(cross_edges) + return cross_edges diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index ec07335c5..80f6443c5 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -3,11 +3,9 @@ """ import time -import math import datetime import multiprocessing as mp from collections import defaultdict -from collections import abc from typing import Optional from typing import Sequence from typing import List @@ -15,14 +13,15 @@ import numpy as np from multiwrapper import multiprocessing_utils as mu -from .helpers import get_roots -from .helpers import get_touching_atomic_chunks from ...utils.general import chunked from ...backend import flatgraph_utils from ...backend.utils import basetypes +from ...backend.utils import serializers +from ...backend.utils import column_keys from ...backend.chunkedgraph import ChunkedGraph from ...backend.chunkedgraph_utils import get_valid_timestamp -from ...backend.utils import serializers, column_keys +from ...backend.chunks.atomic import get_touching_atomic_chunks +from ...backend.connectivity.cross_edges import get_children_chunk_cross_edges def add_layer( @@ -40,8 +39,8 @@ def add_layer( print(f"_read_children_chunks: {time.time()-start}, id count {len(children_ids)}") start = time.time() - edge_ids = _get_cross_edges(cg_instance, layer_id, parent_coords) - print(f"_get_cross_edges: {time.time()-start}, {len(edge_ids)}") + edge_ids = get_children_chunk_cross_edges(cg_instance, layer_id, parent_coords) + print(f"get_children_chunk_cross_edges: {time.time()-start}, {len(edge_ids)}") # print(len(children_ids), len(edge_ids)) # Extract connected components @@ -125,103 +124,6 @@ def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): children_ids_shared.append(row_ids) -def _get_cross_edges(cg_instance, layer_id, chunk_coord) -> List: - layer2_chunks = get_touching_atomic_chunks( - cg_instance.meta, layer_id, chunk_coord, include_both=False - ) - if not len(layer2_chunks): - return [] - - cg_info = cg_instance.get_serialized_info(credentials=False) - with mp.Manager() as manager: - edge_ids_shared = manager.list() - edge_ids_shared.append(np.empty([0, 2], dtype=basetypes.NODE_ID)) - - chunked_l2chunk_list = chunked( - layer2_chunks, len(layer2_chunks) // mp.cpu_count() - ) - multi_args = [] - for layer2_chunks in chunked_l2chunk_list: - multi_args.append((edge_ids_shared, cg_info, layer2_chunks, layer_id - 1)) - - mu.multiprocess_func( - _read_atomic_chunk_cross_edges_helper, - multi_args, - n_threads=min(len(multi_args), mp.cpu_count()), - ) - - cross_edges = np.concatenate(edge_ids_shared) - if len(cross_edges): - cross_edges = np.unique(cross_edges, axis=0) - return list(cross_edges) - - -def _read_atomic_chunk_cross_edges_helper(args): - edge_ids_shared, cg_info, layer2_chunks, cross_edge_layer = args - cg_instance = ChunkedGraph(**cg_info) - - start = time.time() - cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] - for layer2_chunk in layer2_chunks: - edges = _read_atomic_chunk_cross_edges( - cg_instance, layer2_chunk, cross_edge_layer - ) - cross_edges.append(edges) - cross_edges = np.concatenate(cross_edges) - print(f"reading raw edges {time.time()-start}s") - - start = time.time() - parents_1 = get_roots(cg_instance, cross_edges[:, 0], layer=cross_edge_layer) - print(f"getting parents1 {time.time()-start}s") - - start = time.time() - parents_2 = get_roots(cg_instance, cross_edges[:, 1], layer=cross_edge_layer) - print(f"getting parents2 {time.time()-start}s") - - cross_edges[:, 0] = parents_1 - cross_edges[:, 1] = parents_2 - if len(cross_edges): - cross_edges = np.unique(cross_edges, axis=0) - edge_ids_shared.append(cross_edges) - - -def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): - x, y, z = chunk_coord - child_key = column_keys.Hierarchy.Child - cross_edge_key = column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] - range_read = cg_instance.range_read_chunk( - 2, x, y, z, columns=[child_key, cross_edge_key] - ) - - row_ids = [] - max_children_ids = [] - for row_id, row_data in range_read.items(): - row_ids.append(row_id) - max_children_ids.append(np.max(row_data[child_key][0].value)) - - row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) - segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - l2ids = _filter_latest_ids(row_ids, segment_ids, max_children_ids) - return _get_cross_edges_raw(range_read, l2ids, cross_edge_key) - - -def _get_cross_edges_raw(range_read, l2ids, cross_edge_key): - parent_neighboring_chunk_supervoxels_d = defaultdict(list) - for l2id in l2ids: - if not cross_edge_key in range_read[l2id]: - continue - edges = range_read[l2id][cross_edge_key][0].value - parent_neighboring_chunk_supervoxels_d[l2id] = edges[:, 1] - - cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] - for l2id in parent_neighboring_chunk_supervoxels_d: - nebor_svs = parent_neighboring_chunk_supervoxels_d[l2id] - chunk_parent_ids = np.array([l2id] * len(nebor_svs), dtype=basetypes.NODE_ID) - cross_edges.append(np.vstack([chunk_parent_ids, nebor_svs]).T) - cross_edges = np.concatenate(cross_edges) - return cross_edges - - def _write_connected_components( cg_instance, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp ) -> None: From cb6949adcb3bd7ce8c70d473c8f2cb405637fc34 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 20 Oct 2019 17:21:14 +0000 Subject: [PATCH 0304/1097] wip: refactor helper functions --- pychunkedgraph/backend/chunkedgraph_utils.py | 14 ++++++++++++++ .../backend/connectivity/cross_edges.py | 3 ++- .../ingest/initialization/abstract_layers.py | 16 ++-------------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/pychunkedgraph/backend/chunkedgraph_utils.py b/pychunkedgraph/backend/chunkedgraph_utils.py index 99e7ad642..98036dc0c 100644 --- a/pychunkedgraph/backend/chunkedgraph_utils.py +++ b/pychunkedgraph/backend/chunkedgraph_utils.py @@ -6,6 +6,7 @@ from typing import Sequence from typing import Tuple from itertools import product +from collections import defaultdict import numpy as np import pandas as pd @@ -285,3 +286,16 @@ def get_chunks_boundary(voxel_boundary, chunk_size): """returns number of chunks in each dimension""" return np.ceil((voxel_boundary / chunk_size)).astype(np.int) + +def filter_failed_node_ids(row_ids, segment_ids, max_children_ids): + """filters node ids that were created by failed/in-complete jobs""" + sorting = np.argsort(segment_ids)[::-1] + row_ids = row_ids[sorting] + max_child_ids = np.array(max_children_ids)[sorting] + + counter = defaultdict(int) + max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) + for i_row in range(len(max_child_ids)): + max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] + counter[max_child_ids[i_row]] += 1 + return row_ids[max_child_ids_occ_so_far == 0] diff --git a/pychunkedgraph/backend/connectivity/cross_edges.py b/pychunkedgraph/backend/connectivity/cross_edges.py index 4a23bc73a..708b470c3 100644 --- a/pychunkedgraph/backend/connectivity/cross_edges.py +++ b/pychunkedgraph/backend/connectivity/cross_edges.py @@ -15,6 +15,7 @@ from ...backend.utils import column_keys from ...backend.chunkedgraph import ChunkedGraph from ...backend.chunkedgraph_utils import get_valid_timestamp +from ...backend.chunkedgraph_utils import filter_failed_node_ids from ...backend.chunks.atomic import get_touching_atomic_chunks @@ -94,7 +95,7 @@ def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - l2ids = _filter_latest_ids(row_ids, segment_ids, max_children_ids) + l2ids = filter_failed_node_ids(row_ids, segment_ids, max_children_ids) return _extract_atomic_cross_edges(range_read, l2ids, cross_edge_key) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 80f6443c5..493926859 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -20,6 +20,7 @@ from ...backend.utils import column_keys from ...backend.chunkedgraph import ChunkedGraph from ...backend.chunkedgraph_utils import get_valid_timestamp +from ...backend.chunkedgraph_utils import filter_failed_node_ids from ...backend.chunks.atomic import get_touching_atomic_chunks from ...backend.connectivity.cross_edges import get_children_chunk_cross_edges @@ -94,19 +95,6 @@ def _read_chunk_helper(args): _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord) -def _filter_latest_ids(row_ids, segment_ids, max_children_ids): - sorting = np.argsort(segment_ids)[::-1] - row_ids = row_ids[sorting] - max_child_ids = np.array(max_children_ids, dtype=basetypes.NODE_ID)[sorting] - - counter = defaultdict(int) - max_child_ids_occ_so_far = np.zeros(len(max_child_ids), dtype=np.int) - for i_row in range(len(max_child_ids)): - max_child_ids_occ_so_far[i_row] = counter[max_child_ids[i_row]] - counter[max_child_ids[i_row]] += 1 - return row_ids[max_child_ids_occ_so_far == 0] - - def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): x, y, z = chunk_coord range_read = cg_instance.range_read_chunk( @@ -120,7 +108,7 @@ def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - row_ids = _filter_latest_ids(row_ids, segment_ids, max_children_ids) + row_ids = filter_failed_node_ids(row_ids, segment_ids, max_children_ids) children_ids_shared.append(row_ids) From 88024d3a13a03d985886a88d9394bf6c7659db4b Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 20 Oct 2019 22:54:15 +0000 Subject: [PATCH 0305/1097] get singleton component cross edge layer --- .../backend/connectivity/cross_edges.py | 139 ++++++++++++++---- .../ingest/initialization/abstract_layers.py | 70 +++++---- 2 files changed, 152 insertions(+), 57 deletions(-) diff --git a/pychunkedgraph/backend/connectivity/cross_edges.py b/pychunkedgraph/backend/connectivity/cross_edges.py index 708b470c3..cc7e83be2 100644 --- a/pychunkedgraph/backend/connectivity/cross_edges.py +++ b/pychunkedgraph/backend/connectivity/cross_edges.py @@ -4,6 +4,7 @@ from typing import Optional from typing import Sequence from typing import List +from typing import Dict import numpy as np from multiwrapper import multiprocessing_utils as mu @@ -17,9 +18,10 @@ from ...backend.chunkedgraph_utils import get_valid_timestamp from ...backend.chunkedgraph_utils import filter_failed_node_ids from ...backend.chunks.atomic import get_touching_atomic_chunks +from ...backend.chunks.atomic import get_bounding_atomic_chunks -def get_children_chunk_cross_edges(cg_instance, layer_id, chunk_coord) -> List: +def get_children_chunk_cross_edges(cg_instance, layer_id, chunk_coord) -> np.ndarray: layer2_chunks = get_touching_atomic_chunks( cg_instance.meta, layer_id, chunk_coord, include_both=False ) @@ -39,18 +41,18 @@ def get_children_chunk_cross_edges(cg_instance, layer_id, chunk_coord) -> List: multi_args.append((edge_ids_shared, cg_info, layer2_chunks, layer_id - 1)) mu.multiprocess_func( - _get_cross_edges_helper, + _get_children_chunk_cross_edges_helper, multi_args, n_threads=min(len(multi_args), mp.cpu_count()), ) cross_edges = np.concatenate(edge_ids_shared) - if len(cross_edges): - cross_edges = np.unique(cross_edges, axis=0) - return list(cross_edges) + if cross_edges.size: + return np.unique(cross_edges, axis=0) + return cross_edges -def _get_cross_edges_helper(args): +def _get_children_chunk_cross_edges_helper(args): edge_ids_shared, cg_info, layer2_chunks, cross_edge_layer = args cg_instance = ChunkedGraph(**cg_info) @@ -79,32 +81,17 @@ def _get_cross_edges_helper(args): edge_ids_shared.append(cross_edges) -def _read_atomic_chunk_cross_edges(cg_instance, chunk_coord, cross_edge_layer): - x, y, z = chunk_coord - child_key = column_keys.Hierarchy.Child - cross_edge_key = column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] - range_read = cg_instance.range_read_chunk( - 2, x, y, z, columns=[child_key, cross_edge_key] - ) - - row_ids = [] - max_children_ids = [] - for row_id, row_data in range_read.items(): - row_ids.append(row_id) - max_children_ids.append(np.max(row_data[child_key][0].value)) +def _read_atomic_chunk_cross_edges( + cg_instance, chunk_coord, cross_edge_layer +) -> np.ndarray: + cross_edge_col = column_keys.Connectivity.CrossChunkEdge[cross_edge_layer] + range_read, l2ids = _read_atomic_chunk(cg_instance, chunk_coord, [cross_edge_layer]) - row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) - segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) - l2ids = filter_failed_node_ids(row_ids, segment_ids, max_children_ids) - return _extract_atomic_cross_edges(range_read, l2ids, cross_edge_key) - - -def _extract_atomic_cross_edges(range_read, l2ids, cross_edge_key): parent_neighboring_chunk_supervoxels_d = defaultdict(list) for l2id in l2ids: - if not cross_edge_key in range_read[l2id]: + if not cross_edge_col in range_read[l2id]: continue - edges = range_read[l2id][cross_edge_key][0].value + edges = range_read[l2id][cross_edge_col][0].value parent_neighboring_chunk_supervoxels_d[l2id] = edges[:, 1] cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] @@ -114,3 +101,99 @@ def _extract_atomic_cross_edges(range_read, l2ids, cross_edge_key): cross_edges.append(np.vstack([chunk_parent_ids, nebor_svs]).T) cross_edges = np.concatenate(cross_edges) return cross_edges + + +def get_chunk_nodes_cross_edge_layer(cg_instance, layer_id, chunk_coord) -> Dict: + """ + gets nodes in a chunk that are part of cross chunk edges + return_type dict {node_id: layer} + the lowest layer (>= current layer) at which a node_id is part of a cross edge + """ + layer2_chunks = get_bounding_atomic_chunks(cg_instance.meta, layer_id, chunk_coord) + node_layer_d = defaultdict(lambda: cg_instance.n_layers) + if not len(layer2_chunks): + return node_layer_d + + cg_info = cg_instance.get_serialized_info(credentials=False) + with mp.Manager() as manager: + node_layer_tuples_shared = manager.list() + chunked_l2chunk_list = chunked( + layer2_chunks, len(layer2_chunks) // mp.cpu_count() + ) + multi_args = [] + for layer2_chunks in chunked_l2chunk_list: + multi_args.append( + (node_layer_tuples_shared, cg_info, layer2_chunks, layer_id) + ) + + mu.multiprocess_func( + _get_chunk_nodes_cross_edge_layer_helper, + multi_args, + n_threads=min(len(multi_args), mp.cpu_count()), + ) + + node_ids = [] + layers = [] + for tup in node_layer_tuples_shared: + node_ids.append(tup[0]) + layers.append(tup[1]) + + node_ids = np.concatenate(node_ids) + layers = np.concatenate(layers) + + for i, node_id in enumerate(node_ids): + node_layer_d[node_id] = min(node_layer_d[node_id], layers[i]) + return node_layer_d + + +def _get_chunk_nodes_cross_edge_layer_helper(args): + node_layer_tuples_shared, cg_info, layer2_chunks, layer_id = args + cg_instance = ChunkedGraph(**cg_info) + + start = time.time() + node_layer_d = {} + for layer2_chunk in layer2_chunks: + chunk_node_layer_d = _read_atomic_chunk_cross_edge_nodes( + cg_instance, layer2_chunk, range(layer_id, cg_instance.n_layers + 1) + ) + node_layer_d.update(**chunk_node_layer_d) + print(f"reading raw edges {time.time()-start}s") + + start = time.time() + l2ids = np.fromiter(node_layer_d.keys(), dtype=basetypes.NODE_ID) + parents = cg_instance.get_roots(l2ids, stop_layer=layer_id - 1) + layers = np.fromiter(node_layer_d.values(), dtype=np.int) + print(f"getting parents {time.time()-start}s") + + node_layer_tuples_shared.append((parents, layers)) + + +def _read_atomic_chunk_cross_edge_nodes(cg_instance, chunk_coord, cross_edge_layers): + node_layer_d = {} + range_read, l2ids = _read_atomic_chunk(cg_instance, chunk_coord, cross_edge_layers) + for l2id in l2ids: + node_layer_d[l2id] = cg_instance.n_layers + for layer in cross_edge_layers: + if column_keys.Connectivity.CrossChunkEdge[layer] in range_read[l2id]: + node_layer_d[l2id] = layer + break + return node_layer_d + + +def _read_atomic_chunk(cg_instance, chunk_coord, layers): + """ utility function to read atomic chunk data """ + x, y, z = chunk_coord + child_col = column_keys.Hierarchy.Child + columns = [child_col] + [column_keys.Connectivity.CrossChunkEdge[l] for l in layers] + range_read = cg_instance.range_read_chunk(2, x, y, z, columns=columns) + + row_ids = [] + max_children_ids = [] + for row_id, row_data in range_read.items(): + row_ids.append(row_id) + max_children_ids.append(np.max(row_data[child_col][0].value)) + + row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) + segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) + l2ids = filter_failed_node_ids(row_ids, segment_ids, max_children_ids) + return (range_read, l2ids) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 493926859..020eb7292 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -23,6 +23,7 @@ from ...backend.chunkedgraph_utils import filter_failed_node_ids from ...backend.chunks.atomic import get_touching_atomic_chunks from ...backend.connectivity.cross_edges import get_children_chunk_cross_edges +from ...backend.connectivity.cross_edges import get_chunk_nodes_cross_edge_layer def add_layer( @@ -33,14 +34,13 @@ def add_layer( *, time_stamp: Optional[datetime.datetime] = None, ) -> None: - x, y, z = parent_coords - start = time.time() children_ids = _read_children_chunks(cg_instance, layer_id, children_coords) print(f"_read_children_chunks: {time.time()-start}, id count {len(children_ids)}") start = time.time() edge_ids = get_children_chunk_cross_edges(cg_instance, layer_id, parent_coords) + edge_ids = edge_ids.tolist() print(f"get_children_chunk_cross_edges: {time.time()-start}, {len(edge_ids)}") # print(len(children_ids), len(edge_ids)) @@ -59,13 +59,13 @@ def add_layer( _write_connected_components( cg_instance, layer_id, - cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z), + parent_coords, ccs, graph_ids, - time_stamp, + get_valid_timestamp(time_stamp), ) print(f"_write_connected_components: {time.time()-start}") - return f"{layer_id}_{'_'.join(map(str, (x, y, z)))}" + return f"{layer_id}_{'_'.join(map(str, parent_coords))}" def _read_children_chunks(cg_instance, layer_id, children_coords): @@ -113,48 +113,60 @@ def _read_chunk(children_ids_shared, cg_instance, layer_id, chunk_coord): def _write_connected_components( - cg_instance, layer_id, parent_chunk_id, ccs, graph_ids, time_stamp + cg_instance, layer_id, parent_coords, ccs, graph_ids, time_stamp ) -> None: if not ccs: return - ccs_with_node_ids = [] - for cc in ccs: - ccs_with_node_ids.append(graph_ids[cc]) + node_layer_d = get_chunk_nodes_cross_edge_layer( + cg_instance, layer_id, parent_coords + ) - chunked_ccs = chunked(ccs_with_node_ids, len(ccs_with_node_ids) // mp.cpu_count()) - cg_info = cg_instance.get_serialized_info(credentials=False) - multi_args = [] + with mp.Manager() as manager: + node_layer_d_shared = manager.dict(**node_layer_d) + ccs_with_node_ids = [] + for cc in ccs: + ccs_with_node_ids.append(graph_ids[cc]) - for ccs in chunked_ccs: - multi_args.append((cg_info, layer_id, parent_chunk_id, ccs, time_stamp)) - mu.multiprocess_func( - _write_components_helper, - multi_args, - n_threads=min(len(multi_args), mp.cpu_count()), - ) + chunked_ccs = chunked( + ccs_with_node_ids, len(ccs_with_node_ids) // mp.cpu_count() + ) + cg_info = cg_instance.get_serialized_info(credentials=False) + multi_args = [] + for ccs in chunked_ccs: + multi_args.append( + (cg_info, layer_id, parent_coords, ccs, node_layer_d_shared, time_stamp) + ) + mu.multiprocess_func( + _write_components_helper, + multi_args, + n_threads=min(len(multi_args), mp.cpu_count()), + ) -def _write_components_helper(args): - cg_info, layer_id, parent_chunk_id, ccs, time_stamp = args - _write_components( - ChunkedGraph(**cg_info), layer_id, parent_chunk_id, ccs, time_stamp - ) +def _write_components_helper(args): + cg_info, layer_id, parent_coords, ccs, node_layer_d_shared, time_stamp = args + cg_instance = ChunkedGraph(**cg_info) -def _write_components(cg_instance, layer_id, parent_chunk_id, ccs, time_stamp): - time_stamp = get_valid_timestamp(time_stamp) - cc_connections = {l: [] for l in (layer_id, cg_instance.n_layers)} + parent_layer_ids = range(layer_id, cg_instance.n_layers + 1) + cc_connections = {l: [] for l in parent_layer_ids} for node_ids in ccs: if cg_instance.use_skip_connections and len(node_ids) == 1: - cc_connections[cg_instance.n_layers].append(node_ids) + layer = node_layer_d_shared.get(node_ids[0], cg_instance.n_layers) + cc_connections[layer].append(node_ids) else: cc_connections[layer_id].append(node_ids) rows = [] + x, y, z = parent_coords + parent_chunk_id = cg_instance.get_chunk_id(layer=layer_id, x=x, y=y, z=z) parent_chunk_id_dict = cg_instance.get_parent_chunk_id_dict(parent_chunk_id) # Iterate through layers - for parent_layer_id in (layer_id, cg_instance.n_layers): + for parent_layer_id in parent_layer_ids: + if len(cc_connections[parent_layer_id]) == 0: + continue + parent_chunk_id = parent_chunk_id_dict[parent_layer_id] reserved_parent_ids = cg_instance.get_unique_node_id_range( parent_chunk_id, step=len(cc_connections[parent_layer_id]) From 27565c6cf0dd91f9945de99c733033c6d01e53c5 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Sun, 20 Oct 2019 23:29:53 +0000 Subject: [PATCH 0306/1097] fix some issues --- .../backend/connectivity/cross_edges.py | 24 +++++++++---------- .../ingest/initialization/abstract_layers.py | 4 +++- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/pychunkedgraph/backend/connectivity/cross_edges.py b/pychunkedgraph/backend/connectivity/cross_edges.py index cc7e83be2..b1cbbb443 100644 --- a/pychunkedgraph/backend/connectivity/cross_edges.py +++ b/pychunkedgraph/backend/connectivity/cross_edges.py @@ -56,7 +56,7 @@ def _get_children_chunk_cross_edges_helper(args): edge_ids_shared, cg_info, layer2_chunks, cross_edge_layer = args cg_instance = ChunkedGraph(**cg_info) - start = time.time() + # start = time.time() cross_edges = [np.empty([0, 2], dtype=basetypes.NODE_ID)] for layer2_chunk in layer2_chunks: edges = _read_atomic_chunk_cross_edges( @@ -64,15 +64,15 @@ def _get_children_chunk_cross_edges_helper(args): ) cross_edges.append(edges) cross_edges = np.concatenate(cross_edges) - print(f"reading raw edges {time.time()-start}s") + # print(f"reading raw edges {time.time()-start}s") - start = time.time() + # start = time.time() parents_1 = cg_instance.get_roots(cross_edges[:, 0], stop_layer=cross_edge_layer) - print(f"getting parents1 {time.time()-start}s") + # print(f"getting parents1 {time.time()-start}s") - start = time.time() + # start = time.time() parents_2 = cg_instance.get_roots(cross_edges[:, 1], stop_layer=cross_edge_layer) - print(f"getting parents2 {time.time()-start}s") + # print(f"getting parents2 {time.time()-start}s") cross_edges[:, 0] = parents_1 cross_edges[:, 1] = parents_2 @@ -143,27 +143,27 @@ def get_chunk_nodes_cross_edge_layer(cg_instance, layer_id, chunk_coord) -> Dict for i, node_id in enumerate(node_ids): node_layer_d[node_id] = min(node_layer_d[node_id], layers[i]) - return node_layer_d + return {**node_layer_d} def _get_chunk_nodes_cross_edge_layer_helper(args): node_layer_tuples_shared, cg_info, layer2_chunks, layer_id = args cg_instance = ChunkedGraph(**cg_info) - start = time.time() + # start = time.time() node_layer_d = {} for layer2_chunk in layer2_chunks: chunk_node_layer_d = _read_atomic_chunk_cross_edge_nodes( cg_instance, layer2_chunk, range(layer_id, cg_instance.n_layers + 1) ) - node_layer_d.update(**chunk_node_layer_d) - print(f"reading raw edges {time.time()-start}s") + node_layer_d.update(chunk_node_layer_d) + # print(f"reading raw edges {time.time()-start}s") - start = time.time() + # start = time.time() l2ids = np.fromiter(node_layer_d.keys(), dtype=basetypes.NODE_ID) parents = cg_instance.get_roots(l2ids, stop_layer=layer_id - 1) layers = np.fromiter(node_layer_d.values(), dtype=np.int) - print(f"getting parents {time.time()-start}s") + # print(f"getting parents {time.time()-start}s") node_layer_tuples_shared.append((parents, layers)) diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index 020eb7292..fdf653aaa 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -118,12 +118,14 @@ def _write_connected_components( if not ccs: return + start = time.time() node_layer_d = get_chunk_nodes_cross_edge_layer( cg_instance, layer_id, parent_coords ) + print(f"node_layer_d: {time.time()-start}, {len(node_layer_d)}") with mp.Manager() as manager: - node_layer_d_shared = manager.dict(**node_layer_d) + node_layer_d_shared = manager.dict(node_layer_d) ccs_with_node_ids = [] for cc in ccs: ccs_with_node_ids.append(graph_ids[cc]) From e382fd84bb619b8cc711e650b976115782fbf355 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Mon, 21 Oct 2019 19:39:15 +0000 Subject: [PATCH 0307/1097] numpy dtype issues --- pychunkedgraph/backend/connectivity/cross_edges.py | 2 +- pychunkedgraph/backend/flatgraph_utils.py | 1 + pychunkedgraph/ingest/__init__.py | 2 +- pychunkedgraph/ingest/ingestionmanager.py | 2 ++ pychunkedgraph/ingest/initialization/abstract_layers.py | 3 ++- 5 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/connectivity/cross_edges.py b/pychunkedgraph/backend/connectivity/cross_edges.py index b1cbbb443..eadef81de 100644 --- a/pychunkedgraph/backend/connectivity/cross_edges.py +++ b/pychunkedgraph/backend/connectivity/cross_edges.py @@ -196,4 +196,4 @@ def _read_atomic_chunk(cg_instance, chunk_coord, layers): row_ids = np.array(row_ids, dtype=basetypes.NODE_ID) segment_ids = np.array([cg_instance.get_segment_id(r_id) for r_id in row_ids]) l2ids = filter_failed_node_ids(row_ids, segment_ids, max_children_ids) - return (range_read, l2ids) + return range_read, l2ids diff --git a/pychunkedgraph/backend/flatgraph_utils.py b/pychunkedgraph/backend/flatgraph_utils.py index ecb576b61..37dd9b0fd 100644 --- a/pychunkedgraph/backend/flatgraph_utils.py +++ b/pychunkedgraph/backend/flatgraph_utils.py @@ -14,6 +14,7 @@ def build_gt_graph(edges, weights=None, is_directed=True, make_directed=False, :param hashed: bool :return: graph, capacities """ + edges = np.array(edges, np.uint64) if weights is not None: assert len(weights) == len(edges) weights = np.array(weights) diff --git a/pychunkedgraph/ingest/__init__.py b/pychunkedgraph/ingest/__init__.py index 0e577c96b..1d726b496 100644 --- a/pychunkedgraph/ingest/__init__.py +++ b/pychunkedgraph/ingest/__init__.py @@ -12,7 +12,7 @@ "parents_q_limit", "parents_q_interval", ) -_ingestconfig_defaults = (True, REDIS_URL, "atomic", 50000, 60, "parents", 25000, 120) +_ingestconfig_defaults = (True, REDIS_URL, "atomic", 100000, 60, "parents", 25000, 120) IngestConfig = namedtuple( "IngestConfig", _ingestconfig_fields, defaults=_ingestconfig_defaults ) diff --git a/pychunkedgraph/ingest/ingestionmanager.py b/pychunkedgraph/ingest/ingestionmanager.py index 88b57771e..d90bf1c2a 100644 --- a/pychunkedgraph/ingest/ingestionmanager.py +++ b/pychunkedgraph/ingest/ingestionmanager.py @@ -35,6 +35,8 @@ def __init__(self, config: IngestConfig, chunkedgraph_meta: ChunkedGraphMeta): self._bounds = None self._redis = None + self.redis.set(r_keys.INGESTION_MANAGER, self.get_serialized_info(pickled=True)) + @property def config(self): return self._config diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index fdf653aaa..e5ba91fa4 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -40,7 +40,6 @@ def add_layer( start = time.time() edge_ids = get_children_chunk_cross_edges(cg_instance, layer_id, parent_coords) - edge_ids = edge_ids.tolist() print(f"get_children_chunk_cross_edges: {time.time()-start}, {len(edge_ids)}") # print(len(children_ids), len(edge_ids)) @@ -48,6 +47,8 @@ def add_layer( isolated_node_mask = ~np.in1d(children_ids, np.unique(edge_ids)) add_node_ids = children_ids[isolated_node_mask].squeeze() add_edge_ids = np.vstack([add_node_ids, add_node_ids]).T + + edge_ids = list(edge_ids) edge_ids.extend(add_edge_ids) graph, _, _, graph_ids = flatgraph_utils.build_gt_graph( From 62e2392ebcfef435ffc0297d53919aefb0cf5f70 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 22 Oct 2019 01:04:02 +0000 Subject: [PATCH 0308/1097] fix: bounding atomic chunks index --- pychunkedgraph/backend/chunks/atomic.py | 12 +++++++++--- .../ingest/initialization/abstract_layers.py | 1 - 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/pychunkedgraph/backend/chunks/atomic.py b/pychunkedgraph/backend/chunks/atomic.py index 01c35e6d5..2c61051d2 100644 --- a/pychunkedgraph/backend/chunks/atomic.py +++ b/pychunkedgraph/backend/chunks/atomic.py @@ -74,13 +74,19 @@ def get_bounding_atomic_chunks( f = lambda range1, range2: product(range(*range1), range(*range2)) atomic_chunks.extend([np.array([x1, d1, d2]) for d1, d2 in f((y1, y2), (z1, z2))]) - atomic_chunks.extend([np.array([x2, d1, d2]) for d1, d2 in f((y1, y2), (z1, z2))]) + atomic_chunks.extend( + [np.array([x2 - 1, d1, d2]) for d1, d2 in f((y1, y2), (z1, z2))] + ) atomic_chunks.extend([np.array([d1, y1, d2]) for d1, d2 in f((x1, x2), (z1, z2))]) - atomic_chunks.extend([np.array([d1, y2, d2]) for d1, d2 in f((x1, x2), (z1, z2))]) + atomic_chunks.extend( + [np.array([d1, y2 - 1, d2]) for d1, d2 in f((x1, x2), (z1, z2))] + ) atomic_chunks.extend([np.array([d1, d2, z1]) for d1, d2 in f((x1, x2), (y1, y2))]) - atomic_chunks.extend([np.array([d1, d2, z2]) for d1, d2 in f((x1, x2), (y1, y2))]) + atomic_chunks.extend( + [np.array([d1, d2, z2 - 1]) for d1, d2 in f((x1, x2), (y1, y2))] + ) result = [] for coords in atomic_chunks: diff --git a/pychunkedgraph/ingest/initialization/abstract_layers.py b/pychunkedgraph/ingest/initialization/abstract_layers.py index e5ba91fa4..ad097a456 100644 --- a/pychunkedgraph/ingest/initialization/abstract_layers.py +++ b/pychunkedgraph/ingest/initialization/abstract_layers.py @@ -21,7 +21,6 @@ from ...backend.chunkedgraph import ChunkedGraph from ...backend.chunkedgraph_utils import get_valid_timestamp from ...backend.chunkedgraph_utils import filter_failed_node_ids -from ...backend.chunks.atomic import get_touching_atomic_chunks from ...backend.connectivity.cross_edges import get_children_chunk_cross_edges from ...backend.connectivity.cross_edges import get_chunk_nodes_cross_edge_layer From 66d9c504cee5d801b4050baa5ff3738bcb2bf064 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 22 Oct 2019 15:35:59 +0000 Subject: [PATCH 0309/1097] comment test chunks --- pychunkedgraph/ingest/ran_ingestion_v2.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pychunkedgraph/ingest/ran_ingestion_v2.py b/pychunkedgraph/ingest/ran_ingestion_v2.py index a7d2bfb93..0596d2b95 100644 --- a/pychunkedgraph/ingest/ran_ingestion_v2.py +++ b/pychunkedgraph/ingest/ran_ingestion_v2.py @@ -118,16 +118,16 @@ def enqueue_atomic_tasks( np.random.shuffle(chunk_coords) # test chunks - chunk_coords = [ - [26, 4, 10], - [26, 4, 11], - [26, 5, 10], - [26, 5, 11], - [27, 4, 10], - [27, 4, 11], - [27, 5, 10], - [27, 5, 11], - ] + # chunk_coords = [ + # [26, 4, 10], + # [26, 4, 11], + # [26, 5, 10], + # [26, 5, 11], + # [27, 4, 10], + # [27, 4, 11], + # [27, 5, 10], + # [27, 5, 11], + # ] for chunk_coord in chunk_coords: atomic_queue = imanager.get_task_queue(imanager.config.atomic_q_name) From 1eee3413dfc505d983c510dd013d8ad19e4b0df0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Tue, 22 Oct 2019 12:02:19 -0400 Subject: [PATCH 0310/1097] fix Edges __add__ --- pychunkedgraph/backend/definitions/edges.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pychunkedgraph/backend/definitions/edges.py b/pychunkedgraph/backend/definitions/edges.py index 3b7049122..c60c48153 100644 --- a/pychunkedgraph/backend/definitions/edges.py +++ b/pychunkedgraph/backend/definitions/edges.py @@ -44,9 +44,9 @@ def __init__( def __add__(self, other): """add two Edges instances""" node_ids1 = np.concatenate([self.node_ids1, other.node_ids1]) - node_ids2 = np.concatenate([self.node_ids1, other.node_ids1]) - affinities = np.concatenate([self.node_ids1, other.node_ids1]) - areas = np.concatenate([self.node_ids1, other.node_ids1]) + node_ids2 = np.concatenate([self.node_ids2, other.node_ids2]) + affinities = np.concatenate([self.affinities, other.affinities]) + areas = np.concatenate([self.areas, other.areas]) return Edges(node_ids1, node_ids2, affinities=affinities, areas=areas) def __iadd__(self, other): From 8e13ac03f1e2cc5f88dbf871b884d7515fe13742 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 25 Oct 2019 15:03:09 -0400 Subject: [PATCH 0311/1097] Delete jupyter_notebook_config.py --- override/jupyter_notebook_config.py | 765 ---------------------------- 1 file changed, 765 deletions(-) delete mode 100644 override/jupyter_notebook_config.py diff --git a/override/jupyter_notebook_config.py b/override/jupyter_notebook_config.py deleted file mode 100644 index ecece8ce8..000000000 --- a/override/jupyter_notebook_config.py +++ /dev/null @@ -1,765 +0,0 @@ -# Configuration file for jupyter-notebook. - -#------------------------------------------------------------------------------ -# Application(SingletonConfigurable) configuration -#------------------------------------------------------------------------------ - -## This is an application. - -## The date format used by logging formatters for %(asctime)s -#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' - -## The Logging format template -#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' - -## Set the log level by value or name. -#c.Application.log_level = 30 - -#------------------------------------------------------------------------------ -# JupyterApp(Application) configuration -#------------------------------------------------------------------------------ - -## Base class for Jupyter applications - -## Answer yes to any prompts. -#c.JupyterApp.answer_yes = False - -## Full path of a config file. -#c.JupyterApp.config_file = '' - -## Specify a config file to load. -#c.JupyterApp.config_file_name = '' - -## Generate default config file. -#c.JupyterApp.generate_config = False - -#------------------------------------------------------------------------------ -# NotebookApp(JupyterApp) configuration -#------------------------------------------------------------------------------ - -## Set the Access-Control-Allow-Credentials: true header -#c.NotebookApp.allow_credentials = False - -## Set the Access-Control-Allow-Origin header -# -# Use '*' to allow any origin to access your server. -# -# Takes precedence over allow_origin_pat. -c.NotebookApp.allow_origin = '*' - -## Use a regular expression for the Access-Control-Allow-Origin header -# -# Requests from an origin matching the expression will get replies with: -# -# Access-Control-Allow-Origin: origin -# -# where `origin` is the origin of the request. -# -# Ignored if allow_origin is set. -#c.NotebookApp.allow_origin_pat = '' - -## Allow password to be changed at login for the notebook server. -# -# While loggin in with a token, the notebook server UI will give the opportunity -# to the user to enter a new password at the same time that will replace the -# token login mechanism. -# -# This can be set to false to prevent changing password from the UI/API. -#c.NotebookApp.allow_password_change = True - -## Allow requests where the Host header doesn't point to a local server -# -# By default, requests get a 403 forbidden response if the 'Host' header shows -# that the browser thinks it's on a non-local domain. Setting this option to -# True disables this check. -# -# This protects against 'DNS rebinding' attacks, where a remote web server -# serves you a page and then changes its DNS to send later requests to a local -# IP, bypassing same-origin checks. -# -# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along -# with hostnames configured in local_hostnames. -#c.NotebookApp.allow_remote_access = False - -## Whether to allow the user to run the notebook as root. -#c.NotebookApp.allow_root = False - -## DEPRECATED use base_url -#c.NotebookApp.base_project_url = '/' - -## The base URL for the notebook server. -# -# Leading and trailing slashes can be omitted, and will automatically be added. -#c.NotebookApp.base_url = '/' - -## Specify what command to use to invoke a web browser when opening the notebook. -# If not specified, the default browser will be determined by the `webbrowser` -# standard library module, which allows setting of the BROWSER environment -# variable to override it. -#c.NotebookApp.browser = '' - -## The full path to an SSL/TLS certificate file. -#c.NotebookApp.certfile = '' - -## The full path to a certificate authority certificate for SSL/TLS client -# authentication. -#c.NotebookApp.client_ca = '' - -## The config manager class to use -#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager' - -## The notebook manager class to use. -#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager' - -## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's -# set_secure_cookie docs for details. -#c.NotebookApp.cookie_options = {} - -## The random bytes used to secure cookies. By default this is a new random -# number every time you start the Notebook. Set it to a value in a config file -# to enable logins to persist across server sessions. -# -# Note: Cookie secrets should be kept private, do not share config files with -# cookie_secret stored in plaintext (you can read the value from a file). -#c.NotebookApp.cookie_secret = b'' - -## The file where the cookie secret is stored. -#c.NotebookApp.cookie_secret_file = '' - -## Override URL shown to users. -# -# Replace actual URL, including protocol, address, port and base URL, with the -# given value when displaying URL to the users. Do not change the actual -# connection URL. If authentication token is enabled, the token is added to the -# custom URL automatically. -# -# This option is intended to be used when the URL to display to the user cannot -# be determined reliably by the Jupyter notebook server (proxified or -# containerized setups for example). -#c.NotebookApp.custom_display_url = '' - -## The default URL to redirect to from `/` -#c.NotebookApp.default_url = '/tree' - -## Disable cross-site-request-forgery protection -# -# Jupyter notebook 4.3.1 introduces protection from cross-site request -# forgeries, requiring API requests to either: -# -# - originate from pages served by this server (validated with XSRF cookie and -# token), or - authenticate with a token -# -# Some anonymous compute resources still desire the ability to run code, -# completely without authentication. These services can disable all -# authentication and security checks, with the full knowledge of what that -# implies. -#c.NotebookApp.disable_check_xsrf = False - -## Whether to enable MathJax for typesetting math/TeX -# -# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is -# very large, so you may want to disable it if you have a slow internet -# connection, or for offline use of the notebook. -# -# When disabled, equations etc. will appear as their untransformed TeX source. -#c.NotebookApp.enable_mathjax = True - -## extra paths to look for Javascript notebook extensions -#c.NotebookApp.extra_nbextensions_path = [] - -## handlers that should be loaded at higher priority than the default services -#c.NotebookApp.extra_services = [] - -## Extra paths to search for serving static files. -# -# This allows adding javascript/css to be available from the notebook server -# machine, or overriding individual files in the IPython -#c.NotebookApp.extra_static_paths = [] - -## Extra paths to search for serving jinja templates. -# -# Can be used to override templates from notebook.templates. -#c.NotebookApp.extra_template_paths = [] - -## -#c.NotebookApp.file_to_run = '' - -## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's -# get_secure_cookie docs for details. -#c.NotebookApp.get_secure_cookie_kwargs = {} - -## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS -# recompilation -#c.NotebookApp.ignore_minified_js = False - -## (bytes/sec) Maximum rate at which stream output can be sent on iopub before -# they are limited. -#c.NotebookApp.iopub_data_rate_limit = 1000000 - -## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are -# limited. -#c.NotebookApp.iopub_msg_rate_limit = 1000 - -## The IP address the notebook server will listen on. -c.NotebookApp.ip = '0.0.0.0' - -## Supply extra arguments that will be passed to Jinja environment. -#c.NotebookApp.jinja_environment_options = {} - -## Extra variables to supply to jinja templates when rendering. -#c.NotebookApp.jinja_template_vars = {} - -## The kernel manager class to use. -#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager' - -## The kernel spec manager class to use. Should be a subclass of -# `jupyter_client.kernelspec.KernelSpecManager`. -# -# The Api of KernelSpecManager is provisional and might change without warning -# between this version of Jupyter and the next stable one. -#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager' - -## The full path to a private key file for usage with SSL/TLS. -#c.NotebookApp.keyfile = '' - -## Hostnames to allow as local when allow_remote_access is False. -# -# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as -# local as well. -#c.NotebookApp.local_hostnames = ['localhost'] - -## The login handler class to use. -#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler' - -## The logout handler class to use. -#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler' - -## The MathJax.js configuration file that is to be used. -#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe' - -## A custom url for MathJax.js. Should be in the form of a case-sensitive url to -# MathJax, for example: /static/components/MathJax/MathJax.js -#c.NotebookApp.mathjax_url = '' - -## Sets the maximum allowed size of the client request body, specified in the -# Content-Length request header field. If the size in a request exceeds the -# configured value, a malformed HTTP message is returned to the client. -# -# Note: max_body_size is applied even in streaming mode. -#c.NotebookApp.max_body_size = 536870912 - -## Gets or sets the maximum amount of memory, in bytes, that is allocated for -# use by the buffer manager. -#c.NotebookApp.max_buffer_size = 536870912 - -## Dict of Python modules to load as notebook server extensions.Entry values can -# be used to enable and disable the loading ofthe extensions. The extensions -# will be loaded in alphabetical order. -#c.NotebookApp.nbserver_extensions = {} - -## The directory to use for notebooks and kernels. -#c.NotebookApp.notebook_dir = '' - -## Whether to open in a browser after starting. The specific browser used is -# platform dependent and determined by the python standard library `webbrowser` -# module, unless it is overridden using the --browser (NotebookApp.browser) -# configuration option. -#c.NotebookApp.open_browser = True - -## Hashed password to use for web authentication. -# -# To generate, type in a python/IPython shell: -# -# from notebook.auth import passwd; passwd() -# -# The string should be of the form type:salt:hashed-password. -c.NotebookApp.password = 'sha1:99c4a251cbc1:7b1916ef1147bd40891739b56192177a5f00e468' - -## Forces users to use a password for the Notebook server. This is useful in a -# multi user environment, for instance when everybody in the LAN can access each -# other's machine through ssh. -# -# In such a case, server the notebook server on localhost is not secure since -# any user can connect to the notebook server via ssh. -#c.NotebookApp.password_required = False - -## The port the notebook server will listen on. -#c.NotebookApp.port = 8888 - -## The number of additional ports to try if the specified port is not available. -#c.NotebookApp.port_retries = 50 - -## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. -#c.NotebookApp.pylab = 'disabled' - -## If True, display a button in the dashboard to quit (shutdown the notebook -# server). -#c.NotebookApp.quit_button = True - -## (sec) Time window used to check the message and data rate limits. -#c.NotebookApp.rate_limit_window = 3 - -## Reraise exceptions encountered loading server extensions? -#c.NotebookApp.reraise_server_extension_failures = False - -## DEPRECATED use the nbserver_extensions dict instead -#c.NotebookApp.server_extensions = [] - -## The session manager class to use. -#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager' - -## Shut down the server after N seconds with no kernels or terminals running and -# no activity. This can be used together with culling idle kernels -# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when -# it's not in use. This is not precisely timed: it may shut down up to a minute -# later. 0 (the default) disables this automatic shutdown. -#c.NotebookApp.shutdown_no_activity_timeout = 0 - -## Supply SSL options for the tornado HTTPServer. See the tornado docs for -# details. -#c.NotebookApp.ssl_options = {} - -## Supply overrides for terminado. Currently only supports "shell_command". -#c.NotebookApp.terminado_settings = {} - -## Set to False to disable terminals. -# -# This does *not* make the notebook server more secure by itself. Anything the -# user can in a terminal, they can also do in a notebook. -# -# Terminals may also be automatically disabled if the terminado package is not -# available. -#c.NotebookApp.terminals_enabled = True - -## Token used for authenticating first-time connections to the server. -# -# When no password is enabled, the default is to generate a new, random token. -# -# Setting to an empty string disables authentication altogether, which is NOT -# RECOMMENDED. -#c.NotebookApp.token = '' - -## Supply overrides for the tornado.web.Application that the Jupyter notebook -# uses. -#c.NotebookApp.tornado_settings = {} - -## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded- -# For headerssent by the upstream reverse proxy. Necessary if the proxy handles -# SSL -#c.NotebookApp.trust_xheaders = False - -## DEPRECATED, use tornado_settings -#c.NotebookApp.webapp_settings = {} - -## Specify Where to open the notebook on startup. This is the `new` argument -# passed to the standard library method `webbrowser.open`. The behaviour is not -# guaranteed, but depends on browser support. Valid values are: -# -# - 2 opens a new tab, -# - 1 opens a new window, -# - 0 opens in an existing window. -# -# See the `webbrowser.open` documentation for details. -#c.NotebookApp.webbrowser_open_new = 2 - -## Set the tornado compression options for websocket connections. -# -# This value will be returned from -# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable -# compression. A dict (even an empty one) will enable compression. -# -# See the tornado docs for WebSocketHandler.get_compression_options for details. -#c.NotebookApp.websocket_compression_options = None - -## The base URL for websockets, if it differs from the HTTP server (hint: it -# almost certainly doesn't). -# -# Should be in the form of an HTTP origin: ws[s]://hostname[:port] -#c.NotebookApp.websocket_url = '' - -#------------------------------------------------------------------------------ -# ConnectionFileMixin(LoggingConfigurable) configuration -#------------------------------------------------------------------------------ - -## Mixin for configurable classes that work with connection files - -## JSON file in which to store connection info [default: kernel-.json] -# -# This file will contain the IP, ports, and authentication key needed to connect -# clients to this kernel. By default, this file will be created in the security -# dir of the current profile, but can be specified by absolute path. -#c.ConnectionFileMixin.connection_file = '' - -## set the control (ROUTER) port [default: random] -#c.ConnectionFileMixin.control_port = 0 - -## set the heartbeat port [default: random] -#c.ConnectionFileMixin.hb_port = 0 - -## set the iopub (PUB) port [default: random] -#c.ConnectionFileMixin.iopub_port = 0 - -## Set the kernel's IP address [default localhost]. If the IP address is -# something other than localhost, then Consoles on other machines will be able -# to connect to the Kernel, so be careful! -#c.ConnectionFileMixin.ip = '' - -## set the shell (ROUTER) port [default: random] -#c.ConnectionFileMixin.shell_port = 0 - -## set the stdin (ROUTER) port [default: random] -#c.ConnectionFileMixin.stdin_port = 0 - -## -#c.ConnectionFileMixin.transport = 'tcp' - -#------------------------------------------------------------------------------ -# KernelManager(ConnectionFileMixin) configuration -#------------------------------------------------------------------------------ - -## Manages a single kernel in a subprocess on this host. -# -# This version starts kernels with Popen. - -## Should we autorestart the kernel if it dies. -#c.KernelManager.autorestart = True - -## DEPRECATED: Use kernel_name instead. -# -# The Popen Command to launch the kernel. Override this if you have a custom -# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not -# pass any arguments to the kernel, because it cannot make any assumptions about -# the arguments that the kernel understands. In particular, this means that the -# kernel does not receive the option --debug if it given on the Jupyter command -# line. -#c.KernelManager.kernel_cmd = [] - -## Time to wait for a kernel to terminate before killing it, in seconds. -#c.KernelManager.shutdown_wait_time = 5.0 - -#------------------------------------------------------------------------------ -# Session(Configurable) configuration -#------------------------------------------------------------------------------ - -## Object for handling serialization and sending of messages. -# -# The Session object handles building messages and sending them with ZMQ sockets -# or ZMQStream objects. Objects can communicate with each other over the -# network via Session objects, and only need to work with the dict-based IPython -# message spec. The Session will handle serialization/deserialization, security, -# and metadata. -# -# Sessions support configurable serialization via packer/unpacker traits, and -# signing with HMAC digests via the key/keyfile traits. -# -# Parameters ---------- -# -# debug : bool -# whether to trigger extra debugging statements -# packer/unpacker : str : 'json', 'pickle' or import_string -# importstrings for methods to serialize message parts. If just -# 'json' or 'pickle', predefined JSON and pickle packers will be used. -# Otherwise, the entire importstring must be used. -# -# The functions must accept at least valid JSON input, and output *bytes*. -# -# For example, to use msgpack: -# packer = 'msgpack.packb', unpacker='msgpack.unpackb' -# pack/unpack : callables -# You can also set the pack/unpack callables for serialization directly. -# session : bytes -# the ID of this Session object. The default is to generate a new UUID. -# username : unicode -# username added to message headers. The default is to ask the OS. -# key : bytes -# The key used to initialize an HMAC signature. If unset, messages -# will not be signed or checked. -# keyfile : filepath -# The file containing a key. If this is set, `key` will be initialized -# to the contents of the file. - -## Threshold (in bytes) beyond which an object's buffer should be extracted to -# avoid pickling. -#c.Session.buffer_threshold = 1024 - -## Whether to check PID to protect against calls after fork. -# -# This check can be disabled if fork-safety is handled elsewhere. -#c.Session.check_pid = True - -## Threshold (in bytes) beyond which a buffer should be sent without copying. -#c.Session.copy_threshold = 65536 - -## Debug output in the Session -#c.Session.debug = False - -## The maximum number of digests to remember. -# -# The digest history will be culled when it exceeds this value. -#c.Session.digest_history_size = 65536 - -## The maximum number of items for a container to be introspected for custom -# serialization. Containers larger than this are pickled outright. -#c.Session.item_threshold = 64 - -## execution key, for signing messages. -#c.Session.key = b'' - -## path to file containing execution key. -#c.Session.keyfile = '' - -## Metadata dictionary, which serves as the default top-level metadata dict for -# each message. -#c.Session.metadata = {} - -## The name of the packer for serializing messages. Should be one of 'json', -# 'pickle', or an import name for a custom callable serializer. -#c.Session.packer = 'json' - -## The UUID identifying this session. -#c.Session.session = '' - -## The digest scheme used to construct the message signatures. Must have the form -# 'hmac-HASH'. -#c.Session.signature_scheme = 'hmac-sha256' - -## The name of the unpacker for unserializing messages. Only used with custom -# functions for `packer`. -#c.Session.unpacker = 'json' - -## Username for the Session. Default is your system username. -#c.Session.username = 'username' - -#------------------------------------------------------------------------------ -# MultiKernelManager(LoggingConfigurable) configuration -#------------------------------------------------------------------------------ - -## A class for managing multiple kernels. - -## The name of the default kernel to start -#c.MultiKernelManager.default_kernel_name = 'python3' - -## The kernel manager class. This is configurable to allow subclassing of the -# KernelManager for customized behavior. -#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager' - -#------------------------------------------------------------------------------ -# MappingKernelManager(MultiKernelManager) configuration -#------------------------------------------------------------------------------ - -## A KernelManager that handles notebook mapping and HTTP error handling - -## Whether messages from kernels whose frontends have disconnected should be -# buffered in-memory. -# -# When True (default), messages are buffered and replayed on reconnect, avoiding -# lost messages due to interrupted connectivity. -# -# Disable if long-running kernels will produce too much output while no -# frontends are connected. -#c.MappingKernelManager.buffer_offline_messages = True - -## Whether to consider culling kernels which are busy. Only effective if -# cull_idle_timeout > 0. -#c.MappingKernelManager.cull_busy = False - -## Whether to consider culling kernels which have one or more connections. Only -# effective if cull_idle_timeout > 0. -#c.MappingKernelManager.cull_connected = False - -## Timeout (in seconds) after which a kernel is considered idle and ready to be -# culled. Values of 0 or lower disable culling. Very short timeouts may result -# in kernels being culled for users with poor network connections. -#c.MappingKernelManager.cull_idle_timeout = 0 - -## The interval (in seconds) on which to check for idle kernels exceeding the -# cull timeout value. -#c.MappingKernelManager.cull_interval = 300 - -## Timeout for giving up on a kernel (in seconds). -# -# On starting and restarting kernels, we check whether the kernel is running and -# responsive by sending kernel_info_requests. This sets the timeout in seconds -# for how long the kernel can take before being presumed dead. This affects the -# MappingKernelManager (which handles kernel restarts) and the -# ZMQChannelsHandler (which handles the startup). -#c.MappingKernelManager.kernel_info_timeout = 60 - -## -#c.MappingKernelManager.root_dir = '' - -#------------------------------------------------------------------------------ -# ContentsManager(LoggingConfigurable) configuration -#------------------------------------------------------------------------------ - -## Base class for serving files and directories. -# -# This serves any text or binary file, as well as directories, with special -# handling for JSON notebook documents. -# -# Most APIs take a path argument, which is always an API-style unicode path, and -# always refers to a directory. -# -# - unicode, not url-escaped -# - '/'-separated -# - leading and trailing '/' will be stripped -# - if unspecified, path defaults to '', -# indicating the root path. - -## Allow access to hidden files -#c.ContentsManager.allow_hidden = False - -## -#c.ContentsManager.checkpoints = None - -## -#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints' - -## -#c.ContentsManager.checkpoints_kwargs = {} - -## handler class to use when serving raw file requests. -# -# Default is a fallback that talks to the ContentsManager API, which may be -# inefficient, especially for large files. -# -# Local files-based ContentsManagers can use a StaticFileHandler subclass, which -# will be much more efficient. -# -# Access to these files should be Authenticated. -#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler' - -## Extra parameters to pass to files_handler_class. -# -# For example, StaticFileHandlers generally expect a `path` argument specifying -# the root directory from which to serve files. -#c.ContentsManager.files_handler_params = {} - -## Glob patterns to hide in file and directory listings. -#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~'] - -## Python callable or importstring thereof -# -# To be called on a contents model prior to save. -# -# This can be used to process the structure, such as removing notebook outputs -# or other side effects that should not be saved. -# -# It will be called as (all arguments passed by keyword):: -# -# hook(path=path, model=model, contents_manager=self) -# -# - model: the model to be saved. Includes file contents. -# Modifying this dict will affect the file that is stored. -# - path: the API path of the save destination -# - contents_manager: this ContentsManager instance -#c.ContentsManager.pre_save_hook = None - -## -#c.ContentsManager.root_dir = '/' - -## The base name used when creating untitled directories. -#c.ContentsManager.untitled_directory = 'Untitled Folder' - -## The base name used when creating untitled files. -#c.ContentsManager.untitled_file = 'untitled' - -## The base name used when creating untitled notebooks. -#c.ContentsManager.untitled_notebook = 'Untitled' - -#------------------------------------------------------------------------------ -# FileManagerMixin(Configurable) configuration -#------------------------------------------------------------------------------ - -## Mixin for ContentsAPI classes that interact with the filesystem. -# -# Provides facilities for reading, writing, and copying both notebooks and -# generic files. -# -# Shared by FileContentsManager and FileCheckpoints. -# -# Note ---- Classes using this mixin must provide the following attributes: -# -# root_dir : unicode -# A directory against against which API-style paths are to be resolved. -# -# log : logging.Logger - -## By default notebooks are saved on disk on a temporary file and then if -# succefully written, it replaces the old ones. This procedure, namely -# 'atomic_writing', causes some bugs on file system whitout operation order -# enforcement (like some networked fs). If set to False, the new notebook is -# written directly on the old one which could fail (eg: full filesystem or quota -# ) -#c.FileManagerMixin.use_atomic_writing = True - -#------------------------------------------------------------------------------ -# FileContentsManager(FileManagerMixin,ContentsManager) configuration -#------------------------------------------------------------------------------ - -## If True (default), deleting files will send them to the platform's -# trash/recycle bin, where they can be recovered. If False, deleting files -# really deletes them. -#c.FileContentsManager.delete_to_trash = True - -## Python callable or importstring thereof -# -# to be called on the path of a file just saved. -# -# This can be used to process the file on disk, such as converting the notebook -# to a script or HTML via nbconvert. -# -# It will be called as (all arguments passed by keyword):: -# -# hook(os_path=os_path, model=model, contents_manager=instance) -# -# - path: the filesystem path to the file just written - model: the model -# representing the file - contents_manager: this ContentsManager instance -#c.FileContentsManager.post_save_hook = None - -## -#c.FileContentsManager.root_dir = '' - -## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0 -#c.FileContentsManager.save_script = False - -#------------------------------------------------------------------------------ -# NotebookNotary(LoggingConfigurable) configuration -#------------------------------------------------------------------------------ - -## A class for computing and verifying notebook signatures. - -## The hashing algorithm used to sign notebooks. -#c.NotebookNotary.algorithm = 'sha256' - -## The sqlite file in which to store notebook signatures. By default, this will -# be in your Jupyter data directory. You can set it to ':memory:' to disable -# sqlite writing to the filesystem. -#c.NotebookNotary.db_file = '' - -## The secret key with which notebooks are signed. -#c.NotebookNotary.secret = b'' - -## The file where the secret key is stored. -#c.NotebookNotary.secret_file = '' - -## A callable returning the storage backend for notebook signatures. The default -# uses an SQLite database. -#c.NotebookNotary.store_factory = traitlets.Undefined - -#------------------------------------------------------------------------------ -# KernelSpecManager(LoggingConfigurable) configuration -#------------------------------------------------------------------------------ - -## If there is no Python kernelspec registered and the IPython kernel is -# available, ensure it is added to the spec list. -#c.KernelSpecManager.ensure_native_kernel = True - -## The kernel spec class. This is configurable to allow subclassing of the -# KernelSpecManager for customized behavior. -#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec' - -## Whitelist of allowed kernel names. -# -# By default, all installed kernels are allowed. -#c.KernelSpecManager.whitelist = set() From 24101127bc92b957362d87b1234c3eb8d8806a9e Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 25 Oct 2019 19:25:22 +0000 Subject: [PATCH 0312/1097] delete unnecessary files --- docker-compose.yml | 56 ------------------ .../images/ProofreadingDiagram.png | Bin pychunkedgraph/backend/requirements.txt | 3 - pychunkedgraph/backend/temp.py | 0 4 files changed, 59 deletions(-) delete mode 100644 docker-compose.yml rename ProofreadingDiagram.png => docs/images/ProofreadingDiagram.png (100%) delete mode 100644 pychunkedgraph/backend/requirements.txt delete mode 100644 pychunkedgraph/backend/temp.py diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 96ccc0919..000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,56 +0,0 @@ -version: '3.7' - -services: - - pcg_1: - build: - context: . - image: pcg - container_name: pcg_1 - environment: - - APP_SETTINGS=pychunkedgraph.app.config.DeploymentWithRedisConfig - - FLASK_APP=run_dev_cli.py - - REDIS_SERVICE_HOST=redis - - REDIS_SERVICE_PORT=6379 - - REDIS_PASSWORD=dev - volumes: - - .:/app - - ~/secrets:/root/.cloudvolume/secrets - ports: - - '80:80' - - '4000:4000' - depends_on: - - redis - - pcg_2: - image: pcg - container_name: pcg_2 - environment: - - APP_SETTINGS=pychunkedgraph.app.config.DeploymentWithRedisConfig - - FLASK_APP=run_dev_cli.py - - REDIS_SERVICE_HOST=redis - - REDIS_SERVICE_PORT=6379 - - REDIS_PASSWORD=dev - volumes: - - .:/app - - ~/secrets:/root/.cloudvolume/secrets - ports: - - '81:80' - - '4001:4000' - depends_on: - - pcg_1 - - redis - - redis: - image: redis:5.0.4-alpine - container_name: redis - ports: - - '6379:6379' - command: ["redis-server", "--requirepass", "dev"] - - rq-dashboard: - image: python:3.6.8-alpine - container_name: rq-dashboard - ports: - - '9181:9181' - command: [sh, -c, "pip install rq-dashboard && rq-dashboard -u redis://:dev@redis:6379"] \ No newline at end of file diff --git a/ProofreadingDiagram.png b/docs/images/ProofreadingDiagram.png similarity index 100% rename from ProofreadingDiagram.png rename to docs/images/ProofreadingDiagram.png diff --git a/pychunkedgraph/backend/requirements.txt b/pychunkedgraph/backend/requirements.txt deleted file mode 100644 index ba50f47cf..000000000 --- a/pychunkedgraph/backend/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -google-cloud -cloud-volume -networkx \ No newline at end of file diff --git a/pychunkedgraph/backend/temp.py b/pychunkedgraph/backend/temp.py deleted file mode 100644 index e69de29bb..000000000 From 5d9195117308a12b84187390f5040c648fb7a8b0 Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 25 Oct 2019 19:29:55 +0000 Subject: [PATCH 0313/1097] docs folder --- {pychunkedgraph/backend => docs}/Readme.md | 0 pychunkedgraph/meshing/README.md => docs/meshing.md | 0 {pychunkedgraph/io => docs}/storage.md | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename {pychunkedgraph/backend => docs}/Readme.md (100%) rename pychunkedgraph/meshing/README.md => docs/meshing.md (100%) rename {pychunkedgraph/io => docs}/storage.md (100%) diff --git a/pychunkedgraph/backend/Readme.md b/docs/Readme.md similarity index 100% rename from pychunkedgraph/backend/Readme.md rename to docs/Readme.md diff --git a/pychunkedgraph/meshing/README.md b/docs/meshing.md similarity index 100% rename from pychunkedgraph/meshing/README.md rename to docs/meshing.md diff --git a/pychunkedgraph/io/storage.md b/docs/storage.md similarity index 100% rename from pychunkedgraph/io/storage.md rename to docs/storage.md From e135faec3009fbc3795b456557de78dc5093a10f Mon Sep 17 00:00:00 2001 From: Akhilesh Halageri Date: Fri, 25 Oct 2019 19:36:21 +0000 Subject: [PATCH 0314/1097] wip: docs --- docs/images/edges/1.png | Bin 0 -> 713361 bytes docs/images/edges/2.png | Bin 0 -> 29976 bytes docs/images/edges/3.png | Bin 0 -> 136955 bytes docs/images/edges/4.png | Bin 0 -> 159443 bytes docs/ingest.md | 49 +++++++++++++++++++++++++++++++ docs/storage.md | 63 +++++++++++++++++++++++----------------- 6 files changed, 85 insertions(+), 27 deletions(-) create mode 100644 docs/images/edges/1.png create mode 100644 docs/images/edges/2.png create mode 100644 docs/images/edges/3.png create mode 100644 docs/images/edges/4.png create mode 100644 docs/ingest.md diff --git a/docs/images/edges/1.png b/docs/images/edges/1.png new file mode 100644 index 0000000000000000000000000000000000000000..c66d811e78c4d3c53ce9c44a710270dd5f4ec0bd GIT binary patch literal 713361 zcmaI8cUaGDA3vO=9W4!&h88LfqN$-pXc&cd8Y)U88roV?($X%Op_C-Fr&1wG(oRt{ zwa4>1ulxA@^ZbtIxsUt4Ze00(KjS>#@7H>tVFpKZnCQ3AQ&3Pa>FFLYqM)G6A^)SL z#-B`e#K}@n2vF!9P(S9GGWKpkDc*}`&+5Iad)woU97VTyuzkG8s$r-rZd=dJq7igk zu)#C=Z@8>6_l}rwVWyCuxATp*>^Zm9V|3~B%cws~rPs@sG?(`A)(wPxzwGnO-DFs? z{i^vI11+tZo*v!7g9m$m{i0jHetlE=e*Wp<79D42iOQ-f8OKk;O=&7T1@JQ79!%}e%mQD&+>`tCsg;8R zt)`~t{KA5|1V>e6B_#(3$H>QQnv%<-Az^G{*8SCCTuL4iwUOL`XB*?KzP&h`sOqCA zdL)IGjg5_qi>ti6oPvQ5PXDX|=Spnv&#b-oJkzefxI6ojdFyOgz+dbaae7 z@{Cm>Oo8$79JM!jZfs!Su&fB!5Pj!PkiY-Bu|I#TE?n5;xfN$1<|AC=VSvKf&-Q3)8uYtkA8_4Tm;*l@^{+&lg zMrLAi(*M@2P5lD{EId3B_4T?3w6%rs2$PbMI$mAipZ)Xw#^Gf7iiQR{{BUSkn7W6D zyvN_Ju!j%D4`^vsw6tu}*4BQjvw#17Ud2nac<}qK&4@>BRSCyiBY%=0WmcJUC}CoL z(&pIl<9(IE48^W}l-1TUS=rf!$BqSMWeM4yJzLS($bcKK>3r#AWoJi?WxVQTX2zG5 zmBq}(6Gz6xibQfDC6EeD*pYx)pfV{-By>cdS1KM-~G<3_{#WJ^HT4yLkUtJ zpW|`6_E*z+UcE|g!QbEiVQy}9zD=F$SceSnK358%n9$PX`1p|JW$%6a_LWyutaJLB z>t9y3U;pr7t8?e*nV6Wc{d7`<^~=|MO_`ulM_!XA0K~W z>Ey(K|18%0{qe@o#6)*L13i5}dOE+yc=yd8Z?06~%O@u%3r*$ZU1r=z1oVEcAD{0!};O?9}7{43+;W8VsBmS+D{`LCyO?JZxIvd@ij!-frS z$4|dHFDNLuuBfOe@8!$jgapnL&&_X=efjC0o`IpEe@F=RADgw+m3`M&m8Yhrq7xEm zcJ10#U0p4psJP|h$B)vxcURxji ze}Iu+m6x8L9<`tx^{w@}#eV%n>2++P`hKWv*Jeii%T^a7EXd!-&XsoQ+^wspXN6K} zaPs7hk`l%HM^fog`-)$`uK4hQ39pJd?11OOln7Y|c(oFzix_X;zTK3JsuEOGB)_^a z%)dO{vOhXDHt^|F$$?wg!T5iDeSJ%7>j13?j!d0fTSt1z_Akzj-9yu84+wz{;-cX{4_mqlrBW5TZDYyWnq1Xy4XUl%!qnNWcV3*E_*D=1_^A9I9W^vJ-)LcBG4kt|`q87C z@G!aL>=>s%;&UePoj>ErTi?5v^Lvp?Rgueg%L^B7VPQ>=FU-#iNJ}%{*u-C(rsCyL zX5XAr@$~d7*U_(%Z#^d|v4F{CJTre6{{HRyH^|m~sC;_5Z3QX)jm)cB!8|E3#wfPPe657h5~K ztdf!~SU}=Cc2wfx1vnxEIU@2sC#{{oz1Toj$IP$KGCqr!9zK4op{W^ob>HsY>?qR{ zv$G+uUhTbqG(F0+z)l@Sd2w-(l<7;CFXwwNT$HdVj?zpHJem>zkSmI67{Zm6eT-iCHHmCWh|~p1+S*G}8W@zvRkz6t=Ub zh6ZJ}&aJ-}r3&mm1iXH&(DdO0%jV6&c%MI8^YWz8&{y{&}nT+R2$G&}=HP*`A zU5@OU^6A+>r+4q&JJ4SfLCQ{3Q5jqz(-eZM_+AGPLx| zymR;4q*|4WgSz^uig7)7cG<6AbH9H5y8gq58tnJhZ{NaDnw-+`oS{@4V(Fet+5d_S!%C4<9}}eEz%+@5gm*)u*fMIzQ?g-m6N7`%kz`#$7=Hs%~tWUtLj({een?4IP4)(T~;dxw@?I%5>KNT1lw)+Q-if}diMcuZDHYdV3TtcY*3QoGuU}7y?#X$pVOg(w})yuK(Rdid@Efx>J5 z&gGtZ87|Y@*2bQ^_d;z&AdTBZuR0)^Ch9~4hh$}zR)jQ=Jn0GUKbtC26x<1%c5-sU zDye8{Vk|8!)z#NO;OV)q==_gxl+sGHl;GfCWv6_T$Oz6|{avr!_g(qRi<<&a)yTsV zRO)(rHMZvNvu9Gci0T8y9)C_HCMOewxVpO9+TLD=r|ZZ71UfM>;fEc1FDFOl+KdCR zW7YTX$-XgoH@Kx~iNl8v8=g23g6h#*6TvyZFm2Ciu=wZu-ZQ1%Nt5DuN`{7p6EibG z4<3lIva+_ewH=V2of(Cf9aRe%-A=! zZ8}laz~eH`9n$!1tnhoR4;jx1%bC&kO)HE4s_;fX7T6!XH2Rh4N6F=ZH4f{JVz+_T z2S@ou2XJwk+S(y4nJCx0%-;&oO^XgT-y$Hc#X!Ox%XisDrW7;Z_Qo-;N!?(6NPz~2Xw z(*$1x#Cj`=4JvV_m>J(-DRuJLv1-5+PySDzKDB=SY*YH8pdj$(O(sI^u&ynCHYJ&q zdMivHK7U@SIeq_8zd5vBQuX$p?S6Hu5R5D=I2VRD;riXsENIlro_Xbd>ri6N6CCfZ z{rgRsf?C$>ar)Aw-9p>8eM|fC+Ffki4R{U!K^yHqbr3CjVtRVp$mJ_nZp6kKb7sGG z8=wYrtQp*Uebrk(S^nnJ3#U(?M$KZiO;PdUz^WK!&A7g{df2ZMr4r5Iv*18~zommi zeUTx=f^$wzlUU~7^AqdJd{-&~nkTVgz@r2Kf{z?ILKa2dcT{>xN=nb3w6wIm!b00O zW?)okigrAY0LUn~Tu+}mm3MX8j(QBx$QrnO_uIIgCpEC;Dhg4sxfCyngX1POiRvf% z;StK@5rlztp?mbG&b4cbVAhtlwg+e~UcRh#<_xd@Wvo<)1s$^bkmZs;XO0K08x^C;m+|_t`UTe3tqnz%3{O+x6%e7&O5!*?d6{ zj-;!8I1z_h^y%|wQ$}U4Sw{3PMrG@KE?-|?*NYbeVq@8?ZERTB*wzCcq^)2};A@)H z2Jp_&UIU^Q7q8uki(|2U{OFO@=+~#HH`{N%bonEU}D$EVzRNC1n9*R1X7bKsNW z5)!sg#z?1{8e(8uL4}(DJNcw~ePyv`JoxJ(Uc|@F&i5L>2ODC|glVr{y-NCPL5c5* z;`y#(f57*CEPNtSV9#_k^C){t3kwV5wdry<0^&udr0DnV)TJO0ps&v#Ke}!2-UPbt z_I6GXGvy`HCd>WTJG=b*^PLkVuYV5c!XNcKM|C|lquO)xcZB4anwqk+vl~1XsFyoq zW@dHvY>j6&7PO9zPJq+q&6^)SdGdKYiccxz`>P8DPv-|4*bW>xAl=*yZ32|??3lEi z9N{Ff;W05RWCw%iwzluyvj-of<%RxHj;^<`7vhxL^1NGNVPQs2&IUq)(P~?}yQ5}j zJy0MlK^vXx&SBH9E-#oAy9#nhovE*u5RNG>-j{p&Rg^5zh15=++Jf3D78-f$)(yNy zQJ{A~qipmK?1pUKF_b&!0fwR(R3}v>kkqm7uWtEz0E~4NyJa7edJUn#CA+kA>z+M( z94=fCV}A7Tpi6*LEB`ocZEb0VfE;Bk%PBa%YIy(sRv>n5b}S5-;)-4_@M?5i zT=4z-qPVrbkr5&^as1_!Ix`|v^Lu1O4bsxgzklCKPzFQZzE$=)ifg5&rj`hQ_x5e? z`1sL`H!k1h!PqTvDO`Kc^8)i~n1&hzt#C>cRRix}pVzDe9%=*;l2C0V0=BApheAIS z5EHvmM*7$8-MgdP9zK04l5c2XaSM%I2>(Xr2!&?l)l^lVE&F|!EprKn1cr`|j`j-* zqWTXq3^$9IeE#^+KRKC4LPCO8h>w>y57(l)1{8_g&1Zi6^r;s1+Ryz_Mk_K~_N}C7!6LD2a{O_nDcQ{_ECLTcY%G?RH=W!Zbh+>3aVB`7I!M z>2$(qKqjb;ViN?ucp)pIgOWzD&2|~)a7XnQGrbk-!4~Ak`Bkp510-PCYdJWG;}!|Y zANl>8bsoB!b@$t=cVf}r2o*N`l?_Q zkIJ>(pp6^;$CPdAqWGX0+T}h+Nx`M*y$NRE)X>rj9%hBE1uhj764HNt=gDWby>y#4 z>3}7oJo)n}d077Xl!I@tMj_yOOC_09PI=YSGb1CzFHA>TT79oj-aQSqD{gN7p!I?E zkY%CT94uG3I%S({_9p)LeLSqfmoKesY^p}tcc%n=K6-aI9oq7NNsSyQ)I79phjZsN z4jqcve9YEXcy_$!X!$Ffx{aW&z1Rz$Q-gFU6;M!gB;P+We&G7=_i+H>tinQe&;r8t z0JyZ7LT=okv1?Azqd}A8ICJI7 z7D#EFM^Y7ofBv+?o6FA6XMt)^js2JueFy^d=;$GN=i%nmyu7@O&$V@RHZ5r?-tj2! zkVw?fyFR|UU;%;>{e$`0pMRt6RPTVJZF}Qy-_Fux*brfG7?&rr?#ZJ^EX$@QCPWsc z@(&9O+vhnUfSxn&Or#D}CLvLXPQa!5XX4}IOg65W5|zS z%m2!;@j)NNocNfT{eX*&>qH%Vf?C)jYUY95IQcqJ`X>I3o(!O-0Av;mY{y=nK+O^j z!?O)NAoPMVE-tRVvC-x>SfSd*i?R`K@K*Yv8rqgp`CS}(zZvv_NQ`8gqayh{{hw%I zdvZgqzGaY>5DS3Cmsv1?0eyDmo_mnF*WoO8JpnDNh!Y#g49x{3TYwz+HNHP zb!zHjZEi{mZ|%F4rMYaNbx0m5m+Cfe%kaMbfdECYxh5_fK&s(WciH!c*BXZGLsCAsKArOdFboY(^R8qGDHD7 zb^5faIDi&Cu6V7z(gf}SkrdH`lXyZl@o&UZ1V`e*Gtvfd`uODJF5eyi%bD?>^m0)& zOuU^)f5`d->DdgT=?JohVR5`4esuqSNN6Z6hdoveoC)PaP%#}G9o11?;n6@ysDW={ zLIqLV3cyeJII2^|lP8CkfY=G-dA=Q%1Qz$$dlm2b=u!dpW+-3eOV+KS*%PHf*=La* zFG>pn9J+%p#~I7^;Ee&U-5S2vvGF0n&F2OeOZEnQd2-UGIYj}mZCg2KJ&oWjCp&va zc6Ok?CybvhJUphlXn_@7T`99)vUN6%jE=@S`rremrl-Rg4hR`?vfAOs_g$S5P9JK` z)Rcj@;OXfpeW9+VrsC75+vN(v!c>q6OkSPm!*iv9$O9_MlFP6mc*6Pfr%!+-O{RW( zFLVn4jVSEJO`%;#%tDF_L}$sgZ%HFqFk~ZNI5wd*8e*g&kVG(4yZpe^R6c+ZQ^xQE zLbta8XcFlROe_KJHWO2`}-%# zz@w+sh;BYOXiH5T2Wzk)m_XLjY#lq$0nwu%Yu#gT@9Xc^cwh431#3x3iOE|}nMdb< zbr`qoet78Y4%*xJSQ&gVWfVREP?!ahRid2z#wT&4dqQ?}u4_qAV8cB*A3vz8TV7oq z)LsZBBG>%g-3%e9->^+`C49WST?gw~j5_e>w4i$hMrLLT5~Uu>SRZZ!LWG8s(@p>_ zod=PS3vnAskI`(24MuDT=86vS-HtLzyamXEH4l>z&tw*cBy zzw|+0UsX6c_~dVC7Ddk7V`F3D8CL+zu`0etty{Ow_R~uM$j6Tlo;XY^0m&^iBEp76 zi0Y!C0WhS1uy9as2~=n4y?gB$M6v8VJUkdF$)-Pkd~?gotgL|2Sa?{l^}w<7rHT23 zYCyKbCFmh+G@G{C)<{9mb^I(!nYl1Mj9L*TaEVxn0Blbjp{76qkW1|B=&;1*@{WfT zMip)Mo51=LI}qJ_saK??FD{I9U4KjEt;No>SI9k z5W)+3v0H|Etstl4{@vjHzR-rEH$VG_$MhkT7Xd**{|5KUAUa~{(nwyubSdM(gSsR7 z&?La3m_Uyfiy#o-LmEb~`m#`zW@^ay)r4=B(8$frW#;4i;Awp7)cbxVpj7?G6XQZL zw7_Ll?gO2>Q;Rta=ElZ|R1T01=vyHf{gB?~mlx#xKL*llg3?3t z!sW(Bz5o-6pL^cx=p}R&H5OlI-9{dG`g>xX3w^{f=YHI4p*qE)GVDwSQ zJfO(-jC(+|bcOp1x?Y@Rh~H(g5ij9q_8zCNYG!7s`AhKD9h{vHKu0=v?%c%8FJqv{ zeA>TbW7Sy64bT3;zQTI7WLX9Txpe8$#1a6m;I3V_nk=AF5n)pO1597Im3-taJ*Es4 zQ+|UxgmFBbo1K*votRi(1{y#Q;Qb;V#Xqm8=)Q0cgh*h%ZSlm2hV8(>ise{eU!Q?p zNO%D?)0QCUwFF!=HU9kh6OQpVo}Gn1Hzb{fTnS*8I8AjmvGwZ5_Vyb{1Ok*2tQWihZJ>lPdgWic1=pT) z?BCWz4#k&ORRu0ILgxbw-se3py^&uv612DY&6@$d{{K(O2SU&qg}BSPbJon;ckBqb zaYHAe;sKs8SaL)eFAom`G&G|@I5iLpvcylKsz7)U^pxGbI|wlBMie6>W66jo7`pVO z5d&)P3V|SxRV;OBGyq2EUMkmS)NXB6k#_A>tAGFA=+(aSUHeoWGFPm@0Qd~_^@-(| zcqT9~kTB%{dMlIuvQa5Cby|%-V`IHPe`*^Tfn8!*_hCKmR$bRc>v9eNJCaK#;F>;I z3t{T5FbT*SK6fBFIoa^!$-1BwP_26p9=PY7#hWU=zNWgzAMk`Y7`eH&JV=4?#Kgo< zZ3ZMmTkRW%<5pEu6TI#%06uh18;0k2mjDvk6r{(&?e>mGp#X28*<%|ZdPtm_xLPEQ z@$q9!IdpgqEaC^M=kQ&+M~;Y&z$w!Jw0S9Zy2!Z#!l!99@kyaR2(&H^r(e&rZD19x z#O)9m?`%^C%NO1t`3Oc485tII2jxTRY{G@7(^b4e;8t)b0G!?c!M@>$)kD-7r^v|2 z1JEVcFXK}TO-&mtoz9)>%o*AS6O)C-AM!tkA09d>9rkVQ?Nzu!5Tw?RABm9O3IQ24 z$iP5q`}PKpu#9%rl&;RsEw#0^R9~;Jt?UKs5a;xq{>em>Y+mMj*vA32;!i1H>cYZ8 zn}|zKDIb~=m1spp1&OD`OyN-%6)dl;xS_L4$>XEKA@LS{H8wJu=&xZ2m?M!4^lv!P zW1lWEY}|O?{4c~IXc6{C51&1&g_FT05j?nkz7N8gD>P2(cElMVE2l}80VzX0%a?q8 z{)gf|1qDMXA;0z5i?tP?rPjaO9?kH;5eNzn&JVr<&IfH%t{cKa=o9Z!?}Y%bxxa#v zk~cR50TDqpX|g^KcL^l*UeC|r;q0eRVY)IY-vEkd=ir$7=HTFytP#16tTDcP?yBfq_UVY~pu`X0ZF3{z zhZL0Okn}isgpk^}xEgL66_s~VzMtW@6DMsn9X3mcmT@P3j~#Z$@0f|tHpYAfMMY@Q zDYpI;y{o64L3*sP9Pb8qv(|2%AMEU z4habnKF_hsoYCEVT1svsHb~@aEDDo19+L3flB(eRll{od4OgodlM(QQoblpkm z#_e_%{!g3P&VLO}Le*!nI;#d2YIAe*86|dp{#)VU^pw+BER|TRP+8h$Z-j(g)E>n4 z+UGIG13bU59Dsb4`^#6a>ipKN7kB*i?r{l&`X)tLNqC_njdrn63MLBP%O4(tH3HaTf;!b~5g#lWB&{1f;@y?qyz{UsTja zhNqB>h>|q(27O3VUEP1O20;~=VfQ}(4W-Y{lzsYSM7@kpB7)vXJ5ov5ap;M|8R63U zLL;1b=p@{Bj&{>l-GO$pQkJ1yl#ou|1sQR=6k({n66WQjIShG7HHB zMDpy7D&dFO0B{`Nvx!?asQe)?7A!Cw^yzOuU=u)-nz&2i2#}Lb_@lp(&{xt^Tus+I zuY}_RaG#NFXb}+9f8>j$+jLS>bN%e!AC2YG2m)-;N>}v}&)|%d18+plWuymP)IrC+ z9FE6HxIMR))m&2VTl}sZ6v4pMRL}J?m>w#&++=8tHMY8&rzm0sxDnfRkb72xmpA=TY$F^fcjS4AfSOW)v@w z1k5Wau$efG#26HV3Js?(kBsoE!TV*wmyC zr6kX~xv6O|>NBJklb zW!tB6b&BfWzkex_9IU_L3e)O`M?|>IPguQ(MEi2RbZKy+5_BWazGbVd1u%FAjFYum zICTKH!BUzBA!!Zu=!0fK1S>APOprZp7+zHat(~ncT-DIhMR=H9Z?4=5Ml0bqT|%cp z>Z2d+&Nc~{pWJ|`vz5SMidu;0gmbAo85l5=*PXc=MURS_y0X07PhK8o>BjrqJUms% zvw>F#%iT>#sQZy`XJ-ciw}#+Me2@3)GPgykw_vjGc(=-#V%K$e$I!*>n*5**nqGu> z01Vd$1*2`5?%I`g_pqIKVU^jQgh&yEtx}NK_~C;VRKS)!c(W*8h2Nmj4`7Aa=3I1l zhyI~HRe^p7CDoQa=iWU_1pCl>qQgo&{%ip(tL|R~^Fy`I5M2PUC9w@taVs^0Vor4) z3GZ%v)+AJ_&Xc%JlwaB%=eysA5l!{W7nb5uueptoK$e?OsSuvKm9HVNgK9sj&hN@d zbOJVD9m~HYEsCb~nyD6H7KEH48}Zb#L{D>d@7T3#{F5Fx`UPTpbfv-RoX=jv0>&pw zaDg=vNn84`N6kX=0|EnM($jC@+G8xqMWBB!B$0xHjOgWN9(iXqSc-HqpeUdSK2xhe4l;!qxx?e1*ENg4;)=6uI!kCKnZXBz)OMupvOq_eoTsV)x;Vra{m?g+Ox3PYSd4 z!Bnv=F)}tL=^W}{JeC2he#Cg}H(LNFAD8Xx>wCMkuS7*fW&2%^%a@7QgYu%#O0EWx z)tINrGB(r_l9Hx;5CAVbKBvunEnrHbD9V66X{fTfDKv!A;bOj)WgVYUZiwI|G;B}bJlZ=vaB%ak!5ltrZZ?sqv z8mL$goQ9Ndf^!769pc0b5?KDAo;nj3kdlV(^j-y7Ef!whrX5JQY=I&*F*Rk)icU}o z7Rc>Mgf7&@_bT{i!q9ua0XQ8w&!D*ro88{}MPVV4&A*FO(lZP8A@#Bm7ngF%cV>hI zn(4={U$tjXn50ShH6aN`;#3>5o;=w}^f>FCJ9a?lZ_Oz=f9_m=8Nae8Gep#}>J<1= zCr+FwOhBH*1`0AU2X4}>XAZbPuaywZ4%2~|o4a90+2hCb*k_SeKxEqJi8s`sHbo~T zH2_`ybAsr3Cu{;T7a<)C`Ikq(y2<(=l5mImsYO|umJ*e4D5yJ$O{uuwes z%cfh_EPl?Q{1em6{c(17)$lGxYV2`J=&H7^FqZ;v@$Y9vTxl4TX9HCDTBO$$y0+<3 z5(f!c88s7Q<3NaRF~3+T;-n4Ylaj(9xv^9$!pH+*;ZpV9M`|%Tu@%s;tDBpkmyx-7 z6wHQzAb6a;_-=`VAo$e~b8MC^sEM7)9I;MfO|9SMz#hG84)GyJ?=0~5Gt=^(r223~JVR!!g zMg+q=7L~pV@0;G8(Ep6+!ZY)hC&mtM8kY6JP*|4RJ&3Y_n-}IOm zg1$8noAEr0cp|(=;U)g9TQ^?0a^?#R7vYfcilI3VoP%hlM+=RV)6=qKSd@Ml9D&z=n?%p8bh9SKyTx?~BGS%kxfZw9W%67RZt88K~8^en7# z_VT+)Ntsv1yU*N^wr}Re)lT}MACoxNxf~7&6F+zt8mCU(-MO(giZ8;dDum@BEwy0b z8I*nkxF}WozBmVLlmys>Z`{TRmcfB1?|hv&G{<8gnzX&63)?9#AK&N|Jjlt%cN0<6 zEw>>}f7c}M1QZkLKh55q4yHl!X8}1#T@fc@2*gSA0))n)41bTVe18XzK}PVwg9laZ z?Fmy1FyEZN7u^&%W+EaeyczWo!u$)fBFtI9Nu$mpeFw=PMXhz7^pa^OLL`HtLq_=pif_xiTSRA$VMx!9f2t~JFpF|MWH~J10T^Xqu zmNpbMc&c|!iDJ}6>N$tAb8opnOjr9pIs8PfX~F%*_tNU#E)EW%+;Vm#KShA1_N}cq zQ3$VmEAt#~=JQ^fy{TS~s0Q|pelyHHopa}QoZbxYkk^5Ex&QZO;-N?2kZgwXdJYS8+t5$wZ|j;`2AW8=}RIJTL?l)aG54}gH23|H}-puj??ed*Nh zu%D87vn@Rl?FtL;{f9ImB8g-?f$I3lY`rqpQL#=6U<%Ph z`uq3q!=TEM7(#6#@f4r-FJCfCOVfIHAmE1%tUQViH-LXx_zRpZ3+%UC4B|T)`p@C( z%kwMHG8NCSoP7p2tw!z|GBUH{c;?K=o;nhs}UYR%9}X1Of$p%VLZFz;O2; z5cD#3d1J{dl=x2}*hy|!>KqFASz_C+JVS#ZF-_FPs`%5$9wm7ALZUzrD(NV81`%I6 z>yTs0!&t-L;V$BRrztnsbAs?g51ZTyX^bQ_45W?$JNT3YI3MizyglV=|{+luy-iK%#QEd&KyiF5=B0TIOGFdRW5)HX|aAs7rhz*-Hg1i*S{J?v?L z{rl4wegW1KZS(AIRI*?b2|Io44Ke7ko`>;s&l6{RWi!m!VID^zpK1;KLii&sjgY%B9mN`EIg}m%a?H_ZP=LAwY5g=v3KrJVW#86Aqc%#sF~+&5rZM|ign#6 z=y|vjv0G>|kcU7<MN zYhjAgm5tcbpbEpQC3x{9gn9vzB^@tnLKp;4(R9FZl0AWN?kgPHy@8?w#EZzrtwo2bKSm!MW?2QeJlJ~7 z#%3Eb_7%-~fM{#CUbQawl)_H&e5R_Z>b-Ofk{po~@vT2ez;*!kDVY@--T>Ae2Fp+S z$oqx{lE^JZ1Ct?nx$Q3J&({J2aeuF>szPfUu}8P9Z)oUyO=7VKE1N0GtoaONPH4nH z16VzR0?8qHsx7#WtvhHROq`~>dl=xrv>A?flhq{v3rDA!3>mljiZ3hWXb5pF-5xSO>hkkbJM!(%_od3X}{%r%OCL_zj! z)n;R5b%ovYdxTT+R1gSh!N_qW>F#VdrcHsY#DE4MVfrG7F%^O_vZlJd&KTUo4o!Q9 zh&R!q2gra3#GqK>n(*Uc6>qmfb3(~1G|hAuqZo!iaPjq9gNd7%oZGr}Yor^%DXK}1 zwhw*`{ahrI%z_ZYsc;$59Wri7M@H1(?M|j3y-3ogJYPXhP-0EgmLraN2szC7C z(E+L)$?YpxAsRz<^ytwOC5YuG7B%=-o8-ZMRIjLS1O?sq5lyv( zxHwQ5nF@yIZ`gKRB7sRq#|n6^5=I9P(kC^btYY}bkUkq}%~@m1QCx(Kbv13k15k*m ze{LF$SEmLWmapK>;GG*c`6AI)Wbn%6d#wu#%x+s%K(;90prW{l@Ni}}HjHvOm@;VP zV#*b|nQ5*|N8UOtcKeG*^z;xCq+0b z*@$}akxan+gU5o_H@$#mcV+uIAOvPeCSOMYed?)z5vHDMR~8f%79N0Ckg)ff*}H3- z@w|r04`Z(bf@u3KSjMT-@T^Ct6Id4D?e*rtlcaVW4=sDnN%p^(NVD?u;kIPu2g4+*XN9Ct9KAB&h^rSacz>((t|T?|>nA|Xik z%~A40Po6j>^dS|=4mKJ@cf4apsLL3mpoF2T--A%F4QqH#1SV+Z-!h5z>pisp=6bFh zmyn<{cGcn{a0;x6h7+Thk{4HDNzK`c+y+ zCbP$-tC$7Zk!59UD-#nF5>uPm+4sRl!xt*_#*<2trZ1?-5I0z+xH_}5SvftD9KqPS zO25(D5Hu2SGlm8Q?U2QRbYDau5Zqk){}&D(k%ZM7yd&&$;Y7A>dp;HxbsI)nT0aO@ zG^QjKNktLh-S4x~9x4XK|0D-cKE5Y#cL(H3z_&2D;->I;E zEAh@9mhr500A?^k)RyBi1HmX@BfM1XS5{{RuM#Q)Xo=s>&SBnk3!w+t*pigrB&?$(R+KcQH4c>tFD$;UbEF)P?KA2o@ zaUq;>Vj`o%2U#u>s_~D2i{G>Vf|FC@oH4`f!1(wkq@ao_iSokHh0#)fQbz$YS{L!UZR=8l{J#uN(@+01Xw^=N%WM;C52NAv#r z+@j@+0(ot~GbEDH0)av73Pl-s2s{rm3uECW=I}f?{`rLJg>?ev7|;TsV4NV+?if?p zV*|CE3vMmjGMF(rGDEcyvMU+pgMTDsu!?z@gtGk*I6(GlP#V8Uv335dYzN|f<9A&B z=wi=FdT79jy{dcm#5w)K+FXZ14~LfnmB@ydeReB@q%RsLv^ANJI0XYDOhWJ_pf-?A z&+~vSTHfBuYj)Ut3GMvUR8%AY-8c=zwh{5g#UcY+ydXnaS=hS=_n}`~LU{$G=BNPR zuR&JrwdwxJga08 ztwlRKF*%?EC|O{pgm@6P)8d-V_=lcr&1Gwo?Tj`FDhD-8jqfnW7#$364G5Qaxo6g? zmpps;rO)ffg@oaym4)@)`x8emnmsq4^j3{` z-#_f3H0;qV$i+55tyM|b8Dm%+|I7Y;+z=-G5z`u!&ftc5z}!97QupQ7}SvB4zCsocF=sXAM2l zV4{G3>Yc-(B{+^jeF=77C0?_h00z=rfdt||Awo>1aRXR0e{kNEB1Y?#D|?gU2_Hjh zj)}BL@j|dlA_lko!a!9mXm!KSMrddlONf~#qX%sRgjYomz1Y`YgX#bjR*#n z45og_@C;%mS9@_fM--9FUiFQRMgW`|&ndztC!6pIutgZgv0n@T_2ICJo6R%iR3d<0 zIwN=)WP(H#n~P5wek_#Bvqh4!vaHxNO?N_qf=E==ueG^(BdS}Q7qk@O6dWsXWe>xL#<^x`35m#p-v^3_Tn`#>&$tI(DQuFY!_$+Kq-WjlDZv>N(g}t)XBC`Q4x`f?(Q^a5|XlgVI4Ju_Sex1 zAteayZ<;w`rMqYJETDRAuQPIfl-m^dq^6|QMT$e}XWxQ%>ko`bo0EKVHhq%adTFD<94VSWANw4jVAvefUVXG2~FvRgp-Q zXMsk8it1lY62g$9XiTs$Lj4bq94SlpSzI|G(05cz z?g1bbvBj3ck?F>5uH<0U76En)R~}RXBdfy;qZO*r=8924v*yAX1t!Orh7v5e$`OgG z&Jfj&z^M@kw+r8GOH=hZSiXQ9H_4${>g;w!JU2e+Y1!H;4_>g%{r^@bE>+kg_4@VM zz<>Y{l&X2myC{N!#dbNuu>$^2aq>bbV`Yiq9{c_KcA^OKI>yANkkPLe|7aoY&??Zp zA7{NNAUsJXhZm0nPh*yK`bU_HHsv50l7?z*V-W(`288S38WfjpghgRYEbQu2%&rJ= z?Uj}7FP(D_h!Vmk9_4HrF5;R*^SK5-{o=o~c zFW@9Ij|Hvx3&g3aNC1zr=Wc|2^1pd(;VE=#oC~mJ*a6W?BIa4vc0gHSP^1dIjf0NB z^Zdu~7;sEUeepF&kPykDRyC_N*EQQUCHezs#Y*G!3lhT*>y;{V_bqC|@9u7!kD*nnk5Y{B8b@E7K1ezA$UkbFHR zZJVy$&&kPr_bzq%p*wa9m^S9SK2oC?q2X$;k<2Y2C=Ca!jI&rJ5Tb_@^djF8L7=V! z+i9Djx9xj60@a$#?_X1gu+<`Q)9s*56@) z;C+GsITS11OGeZiHpHltqzNv`ZX);5qY9v=q;;%WUhAxZ>wb+K!srk<4E9>V=RCM8 z+FSVfcja$6w492(0m+Gpq}{$902yWKt{6tE5!u^XOze-v(dTb#k({#&ug)6+b~dK& z8yX5m^u>;rqz+L~`!KzC2?q&1Me_)v%2xDU-p8bAdss_a|Hu(-xKRc6P)~@tP>`(P zE(&3+T8#)Ru)=O<2IF`zFn%gq1Rcl(x0)KorW_*`g|cc!M&n%Y`&P#9L^9YzfI^MOll>%~N11vc-* z&ksCgfQ_800D{M}Owzv>F6wI+87(K#&yXjgdAIDiHpNi$Cx-LEKJsWNPBKUPOR`2vQ@p1M=t#q;t^LnkH@u< zcVD@k9B?x^z^vKr&W&8-^5QoS+GPaEf?#x~w@<0S_)E_4$apP||Tn8@e zU;H>O!e)K90>aH}jkzT~njb%Y2rJ-O5@XCXpV{7Uo(=pN#`RQ+Bo48_wan~?nN|z* zH4m8R69Lb$7wRHw?_*?-RuQOm;U!t~w(-3e}ry5k2#+A<$L)O~Tm**OyCey1}u;FE{x)h@!K zP;CdiA?Tym3FVec+LD4z3cD=l9-(l=%vD7&W&i4Y;P4769Ba}C)SZ6ZQK6%ZkpAvY z(lfCkAH3VMcP|c~iu0RB8ifqNR-A$p@G6xXM=Ijb96Y65*D&f#=rU&mWYo z^$=QqEsPwhq3WWOB|GkWQ=U(I(`HE1DS1;&bLT`w2Oo3vY@v@Fc}<`c5d~FzmrbnywgdN?e#H~yM)waR{_n@_ z3JTmfVuNZk8B~M+QS#=E>C_qyqeJ|sWt3>b5LO&aD(kf}mN4VuhMaY`CTbvV-ZK=! zVBHmnR}l!yRln0y1vOz!pY&#dAbb zLINj=sV{|xhWdedACf1&IxZsk7kU{vFljp}$;i9G%*i(1D=+`E)H8o=0mxjL834Ta z;t+jffkv)WS^~a{oZ0ZH7&?tFdsMV2$&K!p-yAKLjB~)gls(|@T~0Z zY2bmx2NC$i&wtcx8Z8t1v!a)qjcuD@nu^cjZUp@v@gtDv4*_cZ7bv?>hx9ULaQaX) zzXXO-&Yyo4)&`xpjEQ%;SC~9KDD*(&2t{=t=0@m9R+WBoChHIYdv?O_G+v-XtXh9p~yZs)wwK zT7sWhtM#b_CcW}$wRFvcI1c;W#VIOQD4_K%EyI?sK#))@CKTr9=X*iYdB-_$jtMlS z)fF7E@fJgKbWz|K@NaZS+OYNyDnv)~gt}kGX$5P;w9m$Q8nN)mFrw-xuru9<=bm-_ z!~55k*^sI4eRCSwG2lA&r(K~e+i;#mOGrMpV?Q1{RX2(=Ik*vC~I>h3=9MiCDV znmk4p57FI)ybWQ?)C0D=5}}bCb2vd18;vC;LgA5ys5^|lXKhb$pd)G+O5OH1Y^=(C zEl9q`;J6OV)3Z*3G8o>U#;{R8I88BHby)%OhDbZ;hZ7ELYMOL!5Xlj+#Cz4fz;#55 zQFc0iJ_Hpe{|3&OfOMhTgiT2D>=-{YI<>9#UKp@qEfXgF^a4!qgc66(!JZ{{myJ0t z1gb#TVKV$iOi5FLuVxIqI5X@f-WOqI^VQ(GS>N7(n_=--dLM+=MoiI0L^iSigbWFb z!KpmzPf%ttb*$OomngPF%@Lpi2ZS^Z=c3TRb{~eLeQOv8OHd%B$$K>^HdgcEwp!QK zsm5LUZ4gBP!2D%CACCbF#ck)-zoP zmud!X@KT)cqZa)M)A@FqH4!Hb4J%NT8k7N4ZsE`e<_SI|;s6XXY zj7YF|-hyx9#t`4?bpmtOhmJu9M=*le@-6?+Oi9QI`%C-l6$C1Qy;w&ew11iiDp!P2 z(4sdc%30~2|4)Z&4%aIvCZ;1WHagnaD{{#I=dNYixq{bq)e2J@pFGL=ss$ie1H+aO zaBQ>4P&b_UFhev`I6%e2uO2-j$J@3|%i}~uh+9nu;po#M9rbe>=tOCFn(k&4C@WNf z|K%?MbR4T7GZ1{a{$@Q_=o~*6m$*!jDSnUGwt%ZDP9;W%()yF*`jwIkoz@C57Bax) zo~(Fz2btCgMVr!lcMkB097c-MBR>B^a*B++%I`<>kL2H-qdSc9zTuQRl+)#*D2EzVT8yxmEPFo*KL;W*(l# zvg=TRkx%pbuf^7x9;^alixB zI36E4(fJ7vniG*jH_QqxZEyCgA0)Fi!}s32;h{)OHN!a{P!@HKR#sL-O2T)YDK2N! zMndpkN!G927cY#myw;Fz2E<`bUaA)G0iUMsg-B1AI?8{2S>0&+3^As?Ijye-wLbmz z`?obprv6qaY~X?n%5RA3_0TYTKIIml5;d@iT@Vhfh`D=rN9IeUz~i`f{{?Hpx^4A^ zxl9fJ(K)@cn+qrSoZAH>i1cT$^}wX}3XO$d|VTr)C>N3XlKrxUrLumj?7LrlI z2g)52@)Z^p75aY&I}@m$^Zox~8fvbER= zrN+qGZcAk^Oe%vIOQj)7{U5KnzkB}Y{LlaV|M#4G&)k_&-|y%1US6-~_TnJ=sRAf; z2}VA24J(Xdwa2oyB>y>Ikbgd6#9_mC(r$y(4?LP^Z*98y)aldW!3Tj@JCl1>iqfB- zc;->3 zY9sQha>q#G67+%~RHC1E{uRh)p(&f!d=Z*SP-LTWT&lJZ)xXEEcBPhxUPvt=M!Yi- zppz0xOhIh9?-7B;oF*qezl1Yttb5gV7g9)H62=Z+euA=t-TI&q;qAW)of-MfI~ekF z)#b2_EJeZ3Z_Q4kzw0g71+m=8iSGD21izcX`u~JP6vsrCcL)6K55q9=y_54q=0Apw3A$5nkBHZwfy3u8>iD=T6w`>FkY|n3%|SEpIkH zUwXqlKFsq`(Ap&g-{NXg{uOO_g5EXCA3EJ!s{4+A=~40v$aui(}oaNt93PQ z=*pHfuP1tNIgg=5z4DP#P4odaFjQDI{fSdHe>GB`5Mjv&t2m#+!FQ{Me6O#UcmX|I z-)kD~!d-|R^C!onv{}c_(TE3puh~*K@s~-VS~a+E1b8%^d3kri0+iE|RZ_Sgx&h_t z?YF3Zo407uBJc#=1&j7}VD1=WecP9qPSH8T5&)e`2{MHMEV8_!v{QzL{Kb@|v?ZvVnu&|3l2JbNBWS3@`BjdXUDDkPp3D zM76|z9I+i;&Z(%M^7A9xMe-gj+EH%qQ%I)B*UC-;`v5V|^d~WHFvg21N-y}f7U6~m zQ|dnRR%djQ7#hwXJ>Y+|8c7=bbv!?R?pxq1w536fY_aArKLJblCL4iOGati=A+pZ$ zXuuwQaK!rkh2swTTc4(0<$Q3|`tat>R_IU)f6d^Q6ZDiK(5{mDdfTTDMRL-N^j(7j zfWiek7%`!4;2b#Gr0V_u{(_? zo@;k+MdHMFQuOGe`WnPp<)FdYQ$(;&&=B+T(ZzS6KqkTyQ~3HQlWMh{*r=j6%I`D^ z7ZXPZdEIQJKe9s!M++^XXT&L+p}q`L6$QQRbatE9zCY&YYhV09`79aM2&0;ZgxpY>}_cd*a&7Sy?gcYVB~T0bf{?zt33sPLy%^P8tIymnboCRmB!TPevWc_{+@=glJph@@5bg>!V#@N>NDI(!DFikUcqJKK08neJgm>!NBU z3LYM8(=y0(-td|>&E0h-MG411zj@VO{}?^~k0$F=x8CpcyQ9J1yeqC_xdVt%&fI7IN2YOd#A-1GVF7civg;^7VSF*RnWF=bO9Itv3Osa>rn zZnx3O7|T1Ntx?|Y^|6e*%h3fML7DK;&=2+FNB%T4f9jf_gzQOrZIPL6E<~imMW$R& zE0>1}#qIryWy{D|Y8CU&%*;$os8XPI-Kl48Pn+r< z7QtTm(F%(aosD|-5XgB=T~$I(%R*}Rs*bq-wBa$1q&|A^phA68^$5h_QtYT{X`#2; zxWvO_BSh%4@Lngv*HIu@^VU`-K_iu!-jxW#J9k=ANLpEd>bC98uIfTt^z1KI29rLr{qdu;-mqcAP`?yK2TQSS$yPBO*VOQ(?cC;)E=sXe&G7|A zkosT}{!lioW*ywCGI{t%gGX&&qaCL|>UdRuPQk5Lt_-u7%cuLx)hOz0Jul3#_MPOS z+`IRBUWl5$f%hNqb=+vXEc?#7s>_9_J_P^>)6$%qU+FwR>2ufp;AB31=FEcb(Bt4} zdn;}DK0ZUJ%C#{!);7`nP_I;|Kjn5n6_uQSgVZAO^~}PH0iw`FOg!W2<`x3l?|j%Q zcF+|-;Kn8tSzxv@q2Q)Y=zD8$&B0lr=Bu()Bwd4miNkZPdv7VM$28XXW&N8g;$dTPe@C;g-c^{1&9Y=2d zC5ll@Je4Q*^_{*I;nLy9=#CBW45)>pMwH}?F3o8Q!jQ$g=Af_*fvv1L3>O%=fBzj{ zqL#B07?OfO?R_QZNhqgU^aL{Syd}WxGwq|$8x|*&Ji^vZqbQ|#dUu|)T~ht;BrEr7 z*Ep7Mz$*r$IdP9nSIy|2;|$D6jLG3#iAlapM+&A`*A+CLhwRauIP0_Xmn`wIky?qu z%5a01^M1>1i9ve1TF4jWGqX@^heY0*)v|LfR+8o}yhfo%SFD&bK*p-@%eKE_jYt&4 zu|QyQ8Elb~$L6iN*?mLRTdoeVw1b=e-*DFdqOThicDL#DOUT9q-YnbQr)&jAY?=mJ zlgTTFwOap(TEOT{bZBcGuTOE@-o#BgTPHC$hz|DMC2m+Vg7$}wSFEu8!xu+zW89Tc zW0!-y?mHfgj2z{-sn?cPdaugM6>(n-S`p%-8pES#h%m^h>Otvbz%xOq|hNhxUwP6~EiP59FB3D@-ydUN5-eQ-@XgSy=sXf@t z*CbLXz?R;aE>!C%EnF5_tFOpxw&Fn2w&bHEAemRZf_j>2()5rdIdX>hZ1#j#>D)Ru zj*oui%$e|Gg%tjie|*6&^oma_pizs{;e97pe8Y32)!Lg*E(<2*)Cnzr=jN5bff;~Z zqPZDsqg$ZGgty2Z~`25q6aW_OWv4sm4z*8Tfc=%mu{m@fAx?(TMeczV46nwZ55@ZlAOu% z-M$v3x=dyo`Gw+*5;?KwRCXPY!MTk@3{tol#v2rbH=Aspv5iPFaS9sRQO5zP!&PkJ?~CPugYdyD&b>=>Z?mKnrdAHId!hZ!iUWYoO^17b zcURzWscUOfDJe2UT{2$25PeW|2#d{=$NXq#AM#)UL=10oasr(EwSM)=m2$0oSSv)EbuOlGLj*buZxLT=!3CTu8sS~ZFp%ljr*4i zCvOK|LZ^LuqJ|WjI?~;BucO5*;=Bmbpg*JL_TWwAZ*Q-oW3~>5mt)sZk3YQ59xFPG z@k5zUJMYyl8WRSpgw$}mg|o1~99tik{BZ|CeX^FDFxn2ZFfF51y~aGQ}oYqeL}dc~&~hhRx*g?=;W6fKs4 zg+;V8qimilU%)1l9}?26k8%x?t9&a##tRK)7VO zNC~Vy)Jxfk1uO9jG)XeU=s5R>tnO#%!gORb2tVB?(mtY-kRECVMk*`6G1fwSSRiG$ z_p@x6;ATAvw&FLrf>gnzoV}d#$uY7#EL^CEVMKmKSmUQC;d;TE={KT~QP`veCrK>fl%6H1peh`g}lx18jUU zii9ZypN71O#@(4rjqBQ1?l8)O>l>(lMS?vwQo2vn42^SWs8Pc7m_N?RsmG=55fNKh z~hKdwq$`qZKdpL(+h}6c4 zr3#y>xf^Kczmvr#O)`jmer?@g!Xz9n(k07bxv>r<34~7MRY@wPqSH4~Q&+boL}#4= zIsmFhm7dKc79F7a@N*zU)JJYM(G9q9a)HfXVGhp4IkE zUN)PO&jTjCxV$nv_8T0PqwlvfTt)2R*xcW^vTmOJlblgXcdV6Bm4QSAkKv1ke&G;> z7{6!P@ASc}QoPY>~lDONzx`gU*s)9}in~bQQQBv0w>tPYB-o@z39}Rqzn|GTM`)C#o** z&G`GFs)t;c7&b~^nFi^&g@jSUhDsLqcP$Dl)HzV3y!9E57*dKCL<4MN^f#y`a?6BU zAm*dgc${C3e$v%+Zk$J9ubjY3uY7!_!)9W}wJ-y7bmw)}h@G&^0dZTp%L;rT61_}Q zGJBe>t#X@gR=3m(XxT+(r!?Yznc=L&Vs*U%3;rjZN1SjPlJtT#&fEiB#lP?2N`-dn zHRwj*+$B0C^}P7dDV|~%i&_$${edwIyBpVoj{PPABSM%*Pl%3;Q~*pBE6(dEOc20@ zRT=Knr}yCS(VLQQp%Y(0U&@}GyQ+Y}N>DW3C*JWhWdu*C`a~!m)q@fM%Fh48!BOSkHIC z0)1W5N6Mabx85alo+uO;PZQe4g36rwK)E0NkKgLm#-T&db0B7Rw8UuH>;Bo|KmWY9 za-ZPLbmfv^V!sM!r9_E+O=1!WEzkA4pvuc4j_Fym$b-OcQ|A$@mBBOoJ_9MhR?&S7 z$pd+zftxY6*TvX?8=cp)VpddU#fE3VWNii3Nk~tRExGXM(G;0?5spJ|Bj3mTO3yJT zRNtOtKR_!y3y0uBO*%iC@`^7tud3ZcJBi-qZ1$fXerp9o0+#2Me)HEum11;MhbOEY z%Y7KkDbdH-V7fs@ddy_g!{K@}lzdxvvy$j5GOdNE9u(>o5_qJSnM~p&Hd6+ zmkGVc_#=8e__~;gJqcV>3#aavVYG9%h)ZYjoIlyLz;&^rvOy=L z-F`#kcVnQ1HQdY*0#dBsGJiYN;W6c7kQn^OxA+0!cigzDW5Liw8R6n8XBC!?`O%aF z+$IK^4CTn-#C5ckN6VFww}3VkhTLpZ3dyFV6L>H)P5B~P5%J=^JWoZ>;ZfpdLZU1s z)TDFAndgc>CvO+B?GL5b4qQ0HT+9S2JFDlIS~IB>-PEuTn{Xu|n|BIYfbI;pL#);q zC;$P|cim!<#Gg79YF>yqjFi=c8}9I>oSW~S6HH*}YVwvpiqvX%67+~CPCdh;d~G)d zkU{5}bf9w21M;h!EpMKNdh9cxaXlj=ySR&RR+v_+UtA0LuEi%Z>KMSq8~F2|U$xHC z;E74~aWF9?dIYu7h5difgaUxOtrCR5VshAi${j(8fq}uvb~4k5YrJg{fejLZ(cuOL z-@Vq{#oQ3vfYWO3g1^bkhvSd7weH+ArQDGQnS)(xUGB|@&JsS~EuQtpu-47HlvrI< z{Y6F4z||YIw6)Rd-5MZDE~8abNozC5Phm? z!`ij98UahVo(Vnmh)tpCVd)%S_W4D+HchYc7p^xYxSE^8F*{WOqpa#uW%Jv{&Yq%F01rMQ%V5MPUwv4 zNFo)=K-#r-%%$G!V(!YwDg=K)pYeTok^9kbt=EdI~#xsl~Cu=t_=Utm3+%yY6X*$kCFD z%TT3`{`A6>Pc)Mx8J3QwL6!kd{DQH@BrHX6DmYbMMha(Q{U)&S`@Equ(B2Ui^zbm4 zj?~AY52m8JHF~qO6Jb>8cggzWWR!+G^$u%>chxiwYYC6g&9xIP06WCUr!CgK0mV3Q za1d@k_K0J1M_f&9tt90LLi0n({Irwhbk+=J3cDa*%4&5!_lXVeV0W(PW;kYa*U-pV zypo?H8-yp>OYx$N$a|w|t(r3&P#)Vy__#v+rnPo!bdO z_v|qPmt@!q$A z!X2ANQ7`VvILQ_~HzVD~?4NfJjfG}?c9CAi3l)mih!5aRj}{RI+6hqAxSXLAW{j&V znCPGs!^_!MX2nfITkAETjEW6T!l+lfAM4Fa&k$h;pJn>?!2bP=8jMJ>xs_#`Uc%G` z7Ll?y$L)(B+6YIQ4}RJ99BOzBu-$I&qe`bz=xP4#GJwxlQAq@ld^YJkE6~X_R%3da zZF&RDnPJV@`6;1^2YUD4dZbcX&(u`z0< zelt+1BNOO2Oq{mdC8hJw7D-Tm;cZkPc_p7nJtaQEh!(G}`RUK>bxCGyOkMc6cj%BU z6u`DcQo#dxmNAav5g70ejtifmC!AxI;H)}%lj9~p&#Mu{I~>O1(sg1$z}1{-x50;) zj(qM)AP*+rK>om4mQ9CglDrg&04B!GAF0jtd-TZ8&?UQ`bExG9;q8cgpseYp8?{Vl z|8R55U+3gedBvYUU-%(_LO~{g`AkC^o6K;|(6b{^VjLSZmY0^gNKANwFVTu}xn}^w z70{lv8|R>lmx#cR=9udcMU@YSul1yz5pn0C#{kwE2m{n-0iH^1sy<7+2OD+G zI94C3gC6PFoh1>)(Fsry@73Cun1(Vq{TzcJdPFb9*bdkBz-WxCb*x%}usn20G1rX5 z0KKGbah24YsgW{^8gMw@nYvQ!LmiKBLd#Y3Vo6QIcmEmJe%tdH#!H!G0SD76$dDX< z$Ek4-S%-K~9DQk$(Oeg+t`s3wy8pJ3Osz?{bji(2H;{k+dm|QWf*KPd7@yDsIOFRj zhP&dT$wu$y!2rZ^5g8dDW1tOWO6c2s6e5dOk-slPdfnXJqx{agr39tUpT~S@S_7G$ z>mYe(R3F!E@@KX?iXQH9%r6VHjQ0{&q;F}d%cnQ?a~wC$j)@1>^C-|kh>bF0Sk^{dYhzN?6)`Atq<*gF z_-1yB$Htck6rexW7UWntMEyys+n)J)+SB7Gf8O=ac`?~K9|4Wgk6x^u$hf$|JNcZB zhMi`|S*;DC5LwC+;7=jP`aa~|KO^9b`+S7Zpg5ciJku&X;_)F!5=Y;vLKyXrj7FZj&I;}P0lg( zp-l_58-bXMTvQ{qNmHk8A>yWRA@4{sRF68a5osTC|JsZKn6FaeuT7jknSoNe%>WdW zNrBG0#^F+xa3B{d#XREiLgpd-7TaIk?{qrO`+o_YL*h~w>h+xd+;TVh?v{UMQz--L zmw)36wn2^{%h&x1jq|?) zp(!#uoA?YT!(7{kWx$RBl{zwce2D)Wv%XeH4kRg)1u$>&O@8?@EPx%q=XkG3UCeyx{V0acj`P zfpT4ebI&=4NlPlxS2YYXVRtr*gO=wt)IAWXG)4}eSO71>vGv@iq;_Wu{4%bOiAj{l zcdBYuP<(Z(c?NB!5SKu2b1P7w1-h6{{w!GcJsunR=31?-(GBO&jnsX@*CexG=9%2N zrerCxNOV*^)?fq5;Nbnfgs+uf{fiLi{IEz49pA8C7A-?1ESWWWyAF+zhK^3ol5w65 zUIC~d)t{W3*$VB}^>E&vl(xz*rHJ8n+$;&9T$zsX-UL~~L#Q;snXoZabR_VT&Lf@< zeZW@ng!2g0wMhIBVPLagxv6k}8#M-koZW2#n=>0Qz#ty(-X3d1oHWT`NF6Wv6dsPC z3ELG4Nn^Iz$n7ep=$2})8Ml#xIy^y(*^ED`rEflC+QH5+mGrabn|mDhQXL=}64DuU z1Fc3WdMD`-%2_dWBD=njVyhn5{&fRzi?ngdPr>r+xRx9hp!EBs=w`DTQKY{f7#_Aw zp`P9of1Tlx!4?)wW%)&b8xI1Bf&pp{=h6m?=oa{bt%HQMOZouJwN&yoLpb~-HZ^vY zW>s_ZR$P_*ezQ?vSfT>Gv4IFNlA!d6KX|%cJmQR8DELBSo(p&8IEthxk%jme(A!CIf(4ai$Y)K%-=phDT_wYZTK`+ z>eB;Ie-$WOP=4rJFW|fqiOoAZ3K+?!(%esqAAZlVN}bxb-wI}OSkpR$@HF=eWH{Mk zsZqRv)Esc%Hp`4TW8~dC?oMFpOXW!7GW({S7H2nzX>BYmSRQ19yAsns1xias@X*+m zQ-l#Usa|u5X`!so>kjM6W-#}_&fy6$nf8uCWw~*>==X@{dO&Mr$bJ8(Qx)=u;~mU`o{ZDGmzP$E&dHCyd~+2fL^Gr1aVi91M@9FRzJH}8FAxv6RVEZt%YJu0OFok2thei?F*k=}nzTgOR zH3fmk6r*;Bi&z2=c61&Jbpz1Fh>eW8G-@ch?9}?F{4uL;IbdJ#wryX&^QTkRf}&~) zsYOLyIhv7JZm#Wve|vD$@oG{y}F0?-Dv;e+`Yq>xiHm2 z5>P+$l54MPHTUEi|C$wC^5P0CaCxy7>xr3i4AA=3w^l5XYygD$|enW-|f zM^s^R?{PLHZfI(hITWJi6(UL;J-EqR zFy80-Mxwqm88>S3-FmZ1`M%kKnXEOpnrOZ}I9l)#Mirgt^%FPm!7Dv|qPmh0tT&mZ zPJoJj2xD@xL0ZT;)&$_0qi>3`vXsCY1%U6qkW&dBu~)@sM#v$WaVdIF zgbQ$v$^diA*}G?S>Do1Y>?7uVNV4bP!6%M8?cSH8C_>`Z$HFt@2tXR4X+<+%_Q%UW z@2Yh`;EY|kdtvVBQAXLmFBXrG`pqAdm{~&3H3vHQUJ{7*q zvGS~fBSbl3%cH+r&HeD`7E*6=<@y@#f%COZ>1OyWu^g3b#!VwCj5pVq<-;vF zal`;HZPQs-W10f1$}(f%<6GxzaOClG{O*}wC9_@a^FZDFHR_RANrhkg z{&BdiBN?1Pw^sGE(=v9ra24Vq+&e>du$WrXbWt7lb6gA$43m}lfJXjrbHb5TLJ4*< z_~n<~uTcwHtXZ=rSvdv|Hs|{N*&j<%TObrs561Z=L+Xvj=Fx$v2Otzf#@hn6$Z>%! zO8~rW%uFwq4u|2p*`O?*yaelf*5fICU%D6SjHaJ}o+7y2p-K>X9~no#4U1-56bk4U zsx9D+w$Z>@L?U1kS-+l$5E;z%W*#(=V1m1Xz$2K&dra=|&`Tw&sw2oah}z$2lH zJ5J$3z7#g;mm6%f3Vdw7$2D6c`_db=dA>p|vldtsIr%p~54?-6(eNFoYnNl+t}Trr zV}wdx^&E>;CPtU!SGc$8tn;-`-bcn{${cy+1mTwmigj2>!7FOi#A}di0sWVChS6me zc5DLw6UisI$=C_vibiu0lmZG}4z*@z#Pj;sz?|0W$>FfoKOq7Ge*V~_H;22_Z`G=v zC43o4Vf~pL^V8(hkm19-(wZV3NW3RQLb%6_7u>#kcQi1%z(KqjmFp%_79lCGNHJOu_B0R zBlt?_CEyGR;?!e3wg6WQ#h|4?A1XA2ggYtsvkNG6e{bn}~o7-z?lP{_3x1 zr!^;W`o~FLtK3aTdom3Hh$gYkOYR~NZ=;T=PIEc`iYM#8>DFppEX|BaNGt?QVk<-` zyyWZ^K?^ma_HK5+2-DQZCmm=X)0;u+tXc05qH7W_=Ni1dTbR3DbR!0&CogYnV=jR@ z&{5fNWYyixZQ+LQ%gm>qE(dRA458SsB}Ik@yyBUJOF-5*j5>kZ{(i1W&e@7hyan@S z-(EH~c97DSYBRzEEQd#P^of9+Qo41IsoecD?!cTJkM*7!Fulf;U%=mzx}Cp>QfoBT z;_s0*yG=hag;a}=`bxCS{mVmgp6Hv8FU0D`^((2P2^~x)%)d(ADvW%Adf$Hi^4M*v z11JhL@hmOU=-f@Y6$RWL4+|=K`i=zUlfn}U3>oAP3@KrBddvBy^L>5is8oSb$q=xP zt`Lj>b7MBHW={%fQ8t2iD$(W%!!9RTpZI&$gm92R(~g@N zixiC3_}vFoNB@SV$FlCwU~A-1f{xd>JDkp|sV%{NT2 z91M#T%!sd86x*t)KKBfYyJMsP>R-E4-U_*Sb!)*5P@NcPz?54Hzcr1J47fsyo2QUaV&HYMwqbrmVB|cSZ7ZB^N$H!gcuZk!i&6>HrL@ z7w}ZXr#hUdT#+(_Dxj@k+es#RoXk|FRIVApjT|n1@CggVX7bO!AzsU8T}iHP@q9+d zu{QgsByM_O)ypngtM}O5znt8y)7rtvE;?aIZ^y7H$EVodwkmv{cCYh+{8y!0uN`_B zckYYdjnCby)^DgBvSY`ql+K^(ua9^-?e7_j4!xbGZ}l7zuq66c?5C_v1lduqM4lM! zwgNZ67PK!Du5G||3!*2dzq2pLzB$^)RVO9ykNY|BB>-iGdu9hJ$)m+fK;Q&5viS{C zHOwsMcpNxpa$+TgA&`lZ3bhSFM;X1?=t+nNs^+=nb|F_#C7_^nuCv!)5+_%jb+wLNk8I@c>A*iXq3iCbOt*%Axg!f za`Ny~!dp16U)>ZE0!v55c)ES4gfAU@gL?jVW=$j4Ck zztMh3f=8!PUZ@`(<*cjZ)3&18Ygk7iud1%T+wCFregAVO#o9FnT@#gpPw~vlE37Hr z9hJv3`+{DettF~v3Sz^IFWDGedCga&B&euP}>ZjE^nM zSf6=<+7szLOqWNVL#z4y3m3jfODW67towsu)V9=fL4Cr4SKf6*E zo6nn2@2f`u?zPUC;cVLoJAdf$K7{MLhUzOt>mKQ;)PUrUao^4!vg81bLdze6R?RbF z#kS|H)aiy=#2Zj2sWf!lG^I6##AyS?CH?jiiPVAnwhwSwx1e$=O%b}0_3wO?(vhOj z`o3hIh*L_-_8+%2Baz;7vP`zsoAM&dMWV8S?ka!-lmjG29CmE&=XL+Qm*VL9Qegg^ z+}!HnAk@t$3TvNpmq~tc&F5FIHdBwRvJtas3Lh^ODTqq2N8E{Ael`$xnDY4IYGo zRA~{g9PdCE0YE|_{l#XKozZ*w`bX$(ZWr@_WzKYaQ@O`bv-KOKp=40)D^ff10(v$} zjIRA#RWD`vuZ9E^U6B5I{S*3@C=^Jp=_eZ)9YtAJSWV*~MNR4T^>=L-5KqerEK3(_ zJ~#C1uGh()kh;sjiJnD5*LgXq>tYx|Egq)meNj>R*11fF$mKnRbR~^RCb|BohS$kJ zf9^5P>Q_%WAO+pt)dLj>f|%4g4`x)$1n_O!QZo#DR)*x1+`a4Mr4$ z`pz`PX9}z-H>mdus;J$SV<2lJ@XOf?>RT?HIYqeS{$2(4G$eva(dV^yuL^qLGU1up z*kRTSjC*}lK6WTR?ccx2OrCeZ^zW(wDDUIT9}$UD#Re*qMxZ>rL%Vk67^QvR9*BvF zr|4Jr1^Tq-oa{g8KVWR}bqRt&Xl(oG4iCBt{$uDCE=+Hhs;`q+4Ok=ATot{PIx@Hj zpUWBrugWCLB61V~H2i%hu#3>2-P>CQfx>O;JV7KmSj~%0ZbL*(8Iau9WZKrm#eKW{ zhS+BIX!WZd6a(mN(6jyZ+L`b^Dy{{Q>>|N;qRyj+mjia5*dP$uT9cf4)wVMR%}~IW zD22Dw@!9m(9V21SB)0MGY9hm#AUeuRqgJSbI!X??9S!F~NNzV!((Vb_{t1e^ka;2Moe)j-Jh@e(DFC_N)FO*RTQk|G|BgI+330K1o-l6WK^u#A-@FXRNu+<1OIzP_7(p7LqdBz*^JFy4*<`xBHd`8J%=> zN09~ErWRaF4uwrSY0CK`w{A`&&lfevny)w(MI!u;LB3Nhk$BiUumAa@9WlEe>%MWd z%$TLWX4{RX#$nWC#e-ZKz-3x zyw;a+$c7PxT(>4Rh`Z3wMg&Jrnl^2JnTu(;rkVqCnuIfFobEiAAOT>k zez)BSYJ(RZeY60Nt`}^CxE?JXJ)?tu?GEeCn8F`tDE7ow zru>CsinX90XGJg~O5hpk%&J~4y~my1CPy7S`PW5Y+A_;;|bCQxF?0SIi2IOyD38-v}#fnG%CXA;n%Ohs(;^fmhmW=!uSoxDIL=zH$=PUIJg(LjN?L|d7< z9&iQeA!Vqea%bKM>}=tqPGo?i#!zA%d_28A&7l+%Rexy-GL4hn z{zy(P>$j%*Cng28sGHbt=a*sqMSXubYs$+1Y5}%ENp*k5o4vJ@6V;Zj0BLp4SHiqe zqTvGYYQ6^EUb5!;pGPD>qdWgEui}fh%8&I~|simL~Io z+^$K21KTjRpX0prJ%w`Uf)<9m$tpoSirJwvQ0WtR)#}sD$xVc}W8R!8SR7f4Kz+&a)yi zv^4N2cML`(#wD1+c05&gj$(>h_SVzqAhR;s>n~jDDr{lruYx52=J)i+D4TAs@UIRt6AlQgDSD6s=MiUh*)xyL zxgYzvA(OY`EkjxtY?Kp*!q7F7tM1lA7}$Zr!ENP_P|k=3u*6QY<$9XS_T~uKMX&4H z7npfFMfbvy!9Qlzy<<6g0i`8r5OtTBoE)h)7aU30y37VqNswQBIJ(G85_wL8knKPl zVYYT8q#YA`zh95ozFnj(exf`GOQT>220^ePQESaynr=yJ8Yg8v{nPQ0(6vlgZ2kG) zx(pRIAA40_nr_TUcl~A11DQ(h*WwV8fFV?N=i94RgW5Cn!iRMXIVMGNyL49GnJk;BgTG5S?voDvEVB)a1pPhWPVa=on;q0Xd7(*Ok?0OCpnKhn zyiH5JHHle3hE{jsw`@p|>&X1lY;#7usPqO=WoCqyLRgZ!<+-ND`nFAPn*c^#ZVp`V zzz|iT25k0!ATiR4V3xxX$K-`k`!q44agn( zX3u7!O84G+oy`j8qc!TcwQB_iT4BJAqBB9Qg%#EwQUC2RWvA#eiF^C0%Q#1$kHxwC zqQ(Mm46VhH%a`}>sfACJh^0*u0?VIXQOwxyjlBHD(-p38%R5GXWsSJEgRglrK=d7n zNU7Px^NiBi@495=6wb1@-}dSb$FR-BgvwE{DX##!l=~8$!>r&@DBk+yd38Q8zYsN0 zXIA*}v%jrd*8>M~{G9uVb~@b&a1;_@4HF8rjWpG5*Yzb;kPbn)i8^dEaUY6V+^l9i zdG?cLRp*W!<*TYs0ji+wJ`=qEGQzEP*MNY@NV%|I(uxF68F8L31f#Ef4JD$`Kp!m$ zx#Rcefpa?^QzJ$bJxj9-py3F{i$AKVs%CJ!sM&A?h>URYdTs(RV(-M)|8cpy>gIL~ zt-pwZR7Ti*=7<~(j0IcnZIsLhVh+t)op3B1o`A2lKe;b&a!wld>UpD$T8BT6&E8XR zBj^ELz=WLM#>U$;dyL7zyTTO~Pzt0c`n*j}{5bNRZuX95Lr9saEAm+M%~kGbB|9Uk zo3rRzTAB{3f%xUK49_HLtpdgn8D*pM#uOPg^?awy8wM#d>n2fdSVkL1!$@>B;Bit0 zI2IqGO{a?0^(G2anN}xnD+A0jh@-ilee-4u%7`ONH3IMQ=dRDP>T|!~Jog#;gyKzF zL0u+h6w~HmNU_Z<;JOuu(2qayhLur)Saq0as`0pS@Y2#Uct4!N71vxG97b_G6n;Vh zAF^XdPrr8<Bi>kof4147{3?(KNddL~n`K;}Y%;i7D1e zfQ|R!?)We!%yB#|!iWEjEc_pELv54dgww3NcGD7#47ZQw=?Sxs&e6hu*)nOtT09AJ zP4!DAsL=+=M)wQs4M}$~8;Ax5UOLstpL)Ivmb4?r)DWIamyS<{ONFqyye z6FE=2Zaqi`UL!wSdk~=FuLCJ5SoHFf0da?}r6S$lWymhQ zY=ZT=B(b@c{*PpBOCYS*G! zIiFi4NFGMXjt*Itnbq1>YoZ>t+-NkvC6Z2iBzMt&fS8H^*zpdf-2{5$`n15Gzk&Hj zp81ph9?|QA?mx-Hkx>D>8lyKmaIHE9S?rIB%Ct4%H&WIgO}4wb*Wn3(5miCn92ZU? zw%)w@l^g@eijK^5pEga7i&3`;mP4H#RW`e&pWY{?Z<|7r#5{WQ_U$W55G{+*X!dTm z_vi1!VszX<0qjSLT#a7y37QRsKn;?;n>E+`uMLzIwSN-2Kxuq*CY+h0;RYbcJt19j zG>KMuWd}$iqKA66!PYBwj1(CUDjwM!Ja!VF)a6V&UEL6-)$pcvpREBh-_^N)udz2; zu7T{sh<_&-7vDdk$+&SkDJh5A`quC3u|Z4Hh60dN2<3xsaq_0X9>a%s#ZjVT&4Ts@ zeAG(a1L5N6s5#1|u7+Yb5!`#!CFnO)R^>x@5;n7E$K7~aUcQOx+Ge4Mt8+Oxi?BWG ziW5|_PR~Q&L}X>zOzNZTqNBDYq{WRL@4zATOm=zMCMta69M`(6XTC6*5~=PbmeBcK zZuCCI69anQ1`Ftc07*;vJL-U_BfqcuI8CO2wJ#>fP{^;qOCb6p_Rr=fh}7+@rKJP| zEWWccNxp%w8j(yz0XD=*+V#bs1WEf8?9vJQ42xBBidXz^=g|;+tMb?K%=7Pm2Vw%( zo)|%_&`pKJ^XGSxxu(a~*UOW0U&7I&wr#fJT9M9fJuo%cqBqQ17dI;!ISk!jGM|@}II90-MB+#W&YTMT zlQa-*n5gd5XTv~esS7=LxdDMRS7LN4 zdw$^6xStASUA!sTkCD z*=&+qC8A^_-6a1`rD#xrRs(<-fp2o*QzZL0sGcfV#-YZZ0*aJ8#xj!&u8OJ@nGma^ z-Wsd)Zgrsn$XI9*5Hro)OITiCUhx>Hu(m;*rFJr4U^oz_Pbzi#i+O=EY|wLp@Ny>+N+L`xx&cLw*0HjGr_P;mqvX8pZ+S0h zqjfL;)?Sn%II=!iBeCD404OG6+J1QDaxGTw^Th7USmlf7~;%0ZQ8|Z4*ya~cac2P!Y{!7 zI%lb(6E9vA5cE|l_*n+mM@hNX=pVU;d$}R%>guXqpgxck*r0X%%$AnQmL1d%mH9xw zP5hPAb%CtF^vzdMi71l;(8EI~-o8%8J$9gsGuWu!wyix~)8_+j+p88$oss`XyRh|9 z$>De20k)vT9@|EE(NNmCUTm7p_TCD!QGuX8H#rD4x$C6Yf&KfJUpWjy z4`5aXE67{4aFlQs9-ms(mi^OT{D}j6*`P`)Sg@5%Oa!?9H99&P5ANNkXnjVb&g*ju z)ER``E5W|1se`H8sAS%_oH;HpEm6||6sw*C0G2prr5urT@ywDeY4o#SzKnBBC2k){ znd)_xfrx+e24^mAcUx{&@Z=QSFr_;w_!z5wAoOCDH|0Ojw`LPYQE@iTe9otn2999>;D7^<-F_wN^mbreM}JpGtqI)oNefk) zH`j-e+T+TZPr30>EyVguVmmnJwme@;Y+V`r>*h$=U9VI2@spkW0L6E3(NverWM%}9 z&U2U~pG)}aV0(6q=<1UU4w7yQXq&kCE1}NUu3gI+a|&qqpyjflvQvl=XnEd#mKbih zwyXUqPl`eQp6@`F=>~e*d^qj5@G6}>xw#-cjmt+6^egh0jSPLr1~e3lkH!i{gWsT` zXl8L6T-34Gc>3^&M~|j-iqtjkxoaRT6!OhDZGFajwnW3wI34B;M#7`s9VnEVD&UEP zAtKPRd~pRX4t*k%D&t>ChButLL zWK67CcmN{dCNWTL_tms2Sg7!TUQvqlOpr0c$1xQLeSkL9k~z17v=)sFdFt1O+=B1(Pyk z4zF7B=MNw9drzgYpewb%^MF6&zGpFlWcVBH(bfr=1g%Mg-!cLDpRntaT`$;08CfvZ zNhdpy36+wf$>sRhIvPN$-0c)xGB%QP>U%vi3T0CI$4UG!zyz%`yV+@BUhC40|+oxthi|D(s=`Ael&Q?P5!GkfKu?%fN6%x;2LhJ zKCORdg0z?u!`(9-fc2=uWc3KVYwfHzt7a%2CHO@}FoV_e3Ag&72fqG_BT{-P#gn=# z>%P1lAKLE$p+Da{@sjgkb}dbt;HhME!bG-Wb2+U2t`q+S@>Ou-CvpWz@v+;-IqGPb z20|a@Fh6%rYs|Afd4GE)>FG7Ce@ot{@)ucEPEN1VWvCo&*oi_b43C^u?)O@^X(J85 znr*pxL&~?=8Et??Hgy{^Y}gU7+Om2c2TDDU{OPb-g-7@ptbbA~I>+STB{VQIi!8eb z1L)Q45UZ4z=%6nkf<#@G-jJ3L30EE4!(^<8P8l=ut{LSqe7TY0Sct}c_Xle4$>B+Q zvt8YYSTOPRst&6A21zEmGnYwR1vXet@t=K9-%>m&=Mr*aDOPUB^6P$$0DF>151`7U zT_gvP!bwZb6GqW|pD=(7a(RokKnT$5ptK_pUu=g!`O=0+qdhih0$<|j(bL=5I0tr2 zw))&D&d)SbMuptmQas+>y(6!;L5%%Gll3)!=jhzx-TwnD*f0SJkZaADoKef}KOg`p zho>}w%*6Kjz%zd3SJo%uo(=grwy9;%ML!akUc7W z)f-{uptx>}ZPO%}00`lmJGJhk?J>rgF$g z_{Q24yzR0o(Nx~{ZkL_WyubP2!M}P{UFDkOR4~dL#nC#z4|8QJ-l%uRU_#{ylgr8Y z%Hq2)wWrFh{shzooIvfEfdQuPY^nwc&}JHFD+~q*z*4_0Rp4tAlH2WI$8nA;)uW z<$Z#(IOEMEV`)0FfY6eY43CoBY7Z!Sp*R&^i(H9Ot0}*t6whIU89p&UX`m~r{^Czh z&UUtc5E5m9x-EV&@v#~#MrV!OB?~g6J1km|o2tEA>Asdu!*c!J@1OnNhj!>)IIV&j zPXz1SfcWr3AH7OXyY=ZCFasU3bSk~`IK+3CdSLT1KK)XgDBkv(mg*42da$+2>S9H_7_7^8ZHi^`PrisH`_$9 z;%`s`)Ak2PtnVZvAa)_vOkj2l7QRU}1l7hv>H8VEs)id5Bs>fBBJB$Oskm!Kt$LH+ zQD(ng>_P#z&+TS@j0Nb{h5y~mH|eJ0VbBS)8M$hGzL?YWXFE*-}}Cel$FwbzS?G>|yJw8xaIj;1bj zg?&hfcPkSBfEQW0ja5I9gT^)qgq=Nab%J6d5g+I1zX%x&MkWquJ7KkUr%t`iFDyWj zt+gmwtDxx^NfhwcJO~-stYf1rQktq`%f07Ksce&afbU;RUz1mavlMIda$=u4u zDW_M!lvREC;(cM?fdj2L6(%(oVM8x5UON!_zG32tQ2a+hC=w? z3`f6$;3J}diI_a+3>Vc*x%I^H6mSx4)h5Z|v$0uW zSQ<_3<7DqJ=f7G2zy$Zw9`p(v(8BRY7!{eb7g4^4>7P@f_>!@y~V zyHK@CQf0f*tO_<}bnI4*DAiGVmLm znyOvDxA~g-FNGuBhc{5AlgP9keL&Rezdn81LC@vno6ls=V{TrUvEA?lh_TZ%cF^KZ zbl++Na;rQwe?flsZx+-1W|Y*ZaT>#KsphUjnk7k6xBmzutpJ#RfjXWkprT!F#Z`#DuF)D1|TuDksr#P4H)1 zA%^oatHG|pr7@LdsoiDnoH=C(oYJBx)Vv8%oU-f3_wSiByx&eyhf!M1wL42V$@ANQ zn2Ow9GGW1bE<97d8(RcZrUDPO#(GPc?a9iOD^`e_5sWo(2~{(giqn*K!r5-ovDnbu zZ8pYOAY!AGZ7&`?*!6fQr*Cg3yNVt@(Jjju3q7IV6oei(#UP=hV;d>q2Qum4y!3Y1 znIopk09Eby?>~MpjisB&s!ohlX3VQ(|NBDVt-co?#;h%U_R6jE#8kb0)1Qu>a1wz3 zsEd8RE^Rf8iI$NuPE2r}h8Fii7R|}Op>0DfwN}?dG_wunb zXUs73$=40K1AS-B@Ss932wf@zuMWOMsUCb{&WDH79qYGai_)xX=TyxV>>q zUE7qkz?GwEIF8INsD{m(4l*1MDhwarOJw|Dx+V!^(KFb;#*0NZt zzdwHRM9zUS@DVk>jQVj3ka?c3*mi5GE1Cy3)Md`?OT@}(8u?b8XdTBO!8cweAe;P) z<{n(PmL%A$cZ38NHyP%OWsPr8REP=a$37|Wc<;_%!OFuZL;^qY%jLj!OaQiqiOzk0 zw(M%y7mc!60bAkqWsc*wf!M2{*`)C>lT!23uV z3O)S_eM;R=Do)dBX(4sT0efjT|5}YovkUa0hgHiqZ8&dLHlgD%r zaCM?3#ovDed#U05;RWPj0hPpwUt-G#2xO`WTbhHqj<$Q;18!sVMPjb?ZM0!0QKq9U zK^EB>Zn0rJ3XdId%Yn#v=UwbOWj!lxg_YEBtOVla zQuml)LR3D!bKy_4Ai2843Pwcc$T@pc06BCst)GCc;m;Mn(Awu1O=v*#e*tHdn2_Fz zc27+2{NMx;^>IkA@#Dw$?z^SsI>c=1$EnWO)cmD3y*r=R4Mvf%w6SJvQ5%K~DJWPZEwLN-7EBU-zHF*I~d z(RZ4&A7NIne*Ju2m$M6I^ha78LZKq5z3Npqe#R#|5T2An^{=ZVT3j?a3lDM8NE{W` zOY`J_?={lBRSY>~wN{ILuscQr-Cxc!M=$k=x`Py%((tQ}I_cDWs)Q53jFqjI>A)a2 z#nbwmn&wCdFAHE@;C}{#X*%EIok+NeUkPm$rD5G*z!f>YmBwHw@-4j@yEso)5^(UQ zYEZ<3R({0VlD;h(tknV30JMjr0lL4ZOJFV?Ly!q6VrW0~t~RxtLxB2dJ&8yMjdPtH zY-WGbisdTP6^FIkt(~GTLHa!O;@=nTv;AWfSw{s+scd5S$kV5TdjSj^UwV%c7=v=} z`!xrKue+F|Dh1T4<$NX1_?(GOLMp?}fd!5+?sq!GCQ^J$=pePt!rEn2bOE1|u${(x zvW=o<~LSmxL?0!1Q zS#f;RX4pOSdLbteanOzagYrT)>_ljIpX{Dy#XK|p^jMBY1kmkVopkR0tD4q5mc3EjqQ4}g^r*$+GWwef{PHCfoCQ&rxcfFs_@AJ>^`~AKD_wor#um=~1fAh2ITR+`AFs zSjZ;LMb51>Qo3?B%)52_|8prgkN?rAVBY5EM%j`JXelXt>B~RA=qPLhlBCUU1Dkkg zzzF5=Hjj2DF%C^@MxbEY^nf^s%bQzLv_)$d1~@ z;?Kui6t8o|$xA7lq2?J9Tk?#Xv=mT9*_N^tf`YZB4_+8)B}a`7v+S>A!OSL$ZEd`X zYnMYlezmXUM8>l<*h*-I$_*1=>>nI<$=UuAg{VU!uY)Qi#}gzW>xe%VUhZK)yvHeRK0UL|*x0N`k9Mteb8%@cibHKqDGB*!RoZpb;=8y#1>2_F>!f-m?Od+wk%n;8wQ?Qb0z|N>CU~$Ci&@u<~islT)8vg;#cG zjP=tr19aob7*{BN{sNz=b^R`h|EiM&o#Q4xUq@RR#g^QdS1~9qH{!!RuZ#xFpm%SV ztKB|rElCK^aWMHue0a$A#OC6@7MTs@H~z{ndT+qxs43iRF;jmSZeN%^XC`9<{P4}L zB-)MQ9d8)R`39*psb4F!fhTUD`CoKHU#|Q8YY@8(ZrT>0ViG}0&;vP)ydlc*V(#Z0 zeLMy1!z;eUZpf5bv-VnAK~R80yk1(*7-y2xS6XR?Wok_PWwIfgLFVqiidpQ4a_=>C z`pks~7l`bc9F5^Fuzou;$oDTOh^{jwote((@_jlEw{Aw$-{2|HdX`7&EaMrhn+({E z7$>#DlHM6*a_uOdlXOyOWd4E#as${Ld-e|b-U`nPbX&I2>2(*);;-UWkF*8T|g>`R3&-j>@Pwj4=o^JF zg+4NJ|NXmR$bWo&Rc)oKU~bW7qy?ZRdOhWzgh4SOuYgiiyi0q)R^7a`oh8Y18k7M^ zh@QNIFB%2lJi5wR2u=4hG!>ur={8A1L-2h(1N^|(lUcbYt{wy?R;SV6AagmjhNyy2 zjtpCSSuE%%lOAQ)q8{x@ zD^BvVAM8TX=oQv;u92%`HZq@QCyL^RoRo+BK;e{W6@9MP=G$`~i?EL(^xBd$*#W8X zYqRjXA|o=~M#k{xUb5tQq(T`kj1@*LT5zNGNV`%!{u2{tA}HH{-Kb@k^e2 z{>&NKdZ>7m4}z^K*X$hq47FB3<80jF*C$5(g8w~*)ONE9?PbGTN(FBV@Fq4=Dl z^kz~dL(S5+Ji7rzFNJf?YZjtK5cy}1Su9Uz0WP?%bvr>K(7&&mji^miPw$5J zIYCNhUfOx-0$hWtK~t9rCbdQZBRKkeVbjj8et&82xW^X+5RXXu#%w4|eAA=!|0H>s zluuI~)3obRVLxh?hhQ6m^W3f2uJ7ahzQBh_Wqc{)V8ds7Q?qBcp!ily{ZyLb2LdXvy% zpqr*gg3i<*9hlE-^YSFwwy=kMql%?``bwaac{k8WP#ZScZwE76`spq3y19z(2ytLY zgh$f|>~g?9g#$zt$DOF0=Q;n?$@blv4s25nW}!;C$y<@VUw1_t6e?8Q$*N#aq*7$v zy6iaAuGuH(d!g|@&tZy{5Q305d>>rt$*U$;jN-GNYIA~(yVy@ZzXt6reDR{7{*UR? zn>TCLEH8pS?G>2Shg+Bl#KDyiA{pI$lQL5#X}ZQU?GN%JB=^+H>c@g8c)SOHQJYhc z-{&3#;5NXF&~ziJFXg69i+VNu`h`}eAm~tBc+cTAblOQwLi@UvZRD;0U0za>b;OpX zx{_}SR-C5FfKf$gfKU6GC}iS2!7=b}9ad`dNAseGx;+dipYH|mUIqOZGj$fJ8K56Z zc~nY?TN(3&t3+ZS(q=_5mXUie}->p7=M|WytxpwKm1t*^5^(%>K$&;^#AC@%EV}TI<7u2E z&~O0>*HM=*51pt$V+(?7pg4xbU^d25JAk8}_QcXDjMh0%+FVH&I2yL?#px*V}QI>naHI(Num9>DqiJ7Qz`x2DWIgO{c6oT|D3mck?id*ZAP$+~O; ziikY*3goAhQd7*D=a_b&$Rcp)*+|VU17leh%~3CI*B4Zi={s2ADO>^~Za06HUyqf3 zzW}ekdHOL>VqI}fEYD?6lq`}Y$R}nZvUAa$s0!-DAs*V6&kzE-us=PJsrCED(Z4<; zTt`AJ53;XXiyF72ZgA4Zj3{c6p4unx-d3W zezQoL$1K8k^{PTMCY_B#S|J-qB$XrkG)O5m$lF@B!*kiX%oqUx8aj;M*q-Z|-nU!& zi78y&O2@XeAK+{|9!@XU<2`r$YwNY!p83w3cY5M+vH%Q6Zj|hy1?6J2;zMz|(;4p) z=Q}-M!tB{wP|W_g0j((;$h6A#fFu04j2=SJWChtD&-H9e+(g4`%56G$v(h6@n+F|O zl&^@BC>X0B;#h~ znSNxT5+sGM`ZrCVI%=J*F)^*z#Si<_8j0b&0L88A*%$w3aKZm;FQ{y6Oj?E+@wKvq ze8M@Yza?%=90VfbEqV9qm9b%!?3)nXcR-inXAL&?=3W9QdhLbw!OkQmHX~fTLCS zqoW)-hmkW@wSXZ3;LiJ0o9o|l{oD4e;bzucM?{H+$ddQ%XFG`KGDF6a^c!dsYSJ;F}S-f$_UOxg-MxM+N5pl~5@ra|+nl-m&p)yYDSv#=*;f*ds)XJ!x3CzW@^Jxn zy7F#hsHZzC^$^G&{g}EJPOq&Rcad6mA}C{jqI@y>&MWTx9QbGrsxDCB2_dc+Qm}Ve zq+GwC(SGZriK3+0_iNvI@G?~hMz8UhC1m$-@y<@DFE6hA?3^_KXt@5zk3N;dh#A1& zI<`=J$s&ZC@4#$rL9JfW4>G~7*xb6`X1_?y&&b8p49 z>=8&_A4t>a7|!vJs}Ij`iYHdcmJ`L@j(W0<0@I4NG7?usJ~HM0p1z?9ufUkW(ci8` z4{{v}+-Da@0C2v%j#r>+eh*0$6g`Q!cSoVB7e)N*sEE%kdd*>I7k^0AGW5LC%iSeW z)5deedA*TeyZ2Q+uw*y*&x za?HjXP8%02tQorRZe)3V!lw6izZ9>QH_b1#-{khF?x+64!H+H!oYQ^d$t)@wL{n8D zb%`5le;J*18Ryk0bRS|wS&rIUx>j(iCb2XCNatkzMc`e|*5dMlj)SXcHLeo`)=$IM z%;3`xrj2+98o^DJw#mn*=hn`Lsm4)qcx;c}y=5&$ldjP0vKiQ|4h9N++yoQKGHMw=Je0 zoH#^ zOR@!&Sl}hdKM*^gvtkD-1jiwoK3=<{_uRQSoHgjmbOsWy zq>uF1s~b@t67D_jUlu(PaCGBXb8pjxlaw3m`F0P#Frg`%qoUFRq*w7k}jnRF5EiK!BM$^61wxXfWNs~RwlA02m0osWCsJd$H>eYrI9cv0; z7Ytc&JpwPQ2Y|8HLDXSt9qg8Y?X>2_>~mNk9DV}*WPnuIF1NDhJdx2Q=vNnnkNKnM z@>IQg{<#$92nn=ei@)%?Z;Ou~J=fN$siwkb#$I8ph9ALqN1}wf9t5R$+D~2KbCaa2 zXc5NRjUTU(J!|F40r*?Yi#jx*Mjc6Tnc7|f`gqG@cIec+%ne=h?L)j>XB!3ao7VlH zv!O1iT&=-IRL}k0eHtMN(5ZJ;$^rm7L(#bppTgCQz(chUU5q=yMz73|{2iLSWYHpP zPDj%pbRt9mY^_)hA}~hd2_aLcVYgk-!#D7F3!KGuI#LJ{uRoxQdZPUC@bt7DmY8-+ z_vPx6EJetmiSvDF0*v-Cd1U{OvOj84sKKmXf@?+7mD8^Ul z8Kj1V&&n*D_4vsX0oPk?5D#L1zc}Ter)7Ct3A>uJ0Q7vaFse#~UI&e`~&rFVcXVS-&qoPM!_UPu*r!zd8?=@G_tdkvo7Jd63*n5(kn35Wp z@>MvHuqsJ)(Mn_BE#X)2a)v{atpPu^2ww-xwg^7Q#!GHRtBN9WV<>)~Hy6H&SUV6P zh|}1o0`7k#NVAl=fC;s$b*?1QFN?aT?kX-@x|=SB2nK>c094+RFUl=9w-ChK_wt5@ z-Mj06YQPF(dI){b8M;$b3+0e{Ui ztyG^rH-v8b^A&D2-uB9UTv6y=3YP0LeF% zfE}P=|LB6&4%3g?2~h!%fn6e93om<36i&$N#B60rrno4Q@hGUe(W8rT>Bnh|+d9-H$8{(BscGYXHpnNaK=)HA+ZG@Nr>$7jx9yJv6tWd@ME2!l@fOL|JpW zUHIm?WZ%GGYy_krD%n&rU{tfFNVfwfS=lPBt|3d|fw)hnt)a`HfS83HKYCQG(_gQH zK})>?t)bMlW&qmfheh{aD2c@YE?S-ziKgoNIJ%Y<42A( z=fZG!#^0@ffAXqhzevr4W!GL@TG^U!q}8SbAD^Ms5=eB!0cAu3MWqq_(h|5_=_Tff zIlLi#)1Yp}4pUhrb`T_3c5iUBiXXuLC5MMk$ZNO|`|?a}BOTsfvxbOQr$Gdp_C7mj z{`^S7&LGC_2QN(XVI(aCOo-sZ4nQ7Xx}Qz|}vG8+h7K1^7LVDsq* z*$%KbkL7#%$B!RZKInORTbSfaLAp6B2z1kJ6tOc%nzUqazXD7ptiz3Ocqcdvfet1d zcw%$4+B{ELoss4=D`Qcz-Sv-@QDbg`4}W=kAF>nD!zf#v6J+G$B`2! zq~W|Vy>+|3svy}O7gvH?hyuMLZpg}B@*$%i2v^BVO-=Q3mRJ-D%w)s9eRZgN$_go6 z&ntvL2?8=D*(v5RnXlT&PZf>g@OL9Ft@EcI)0L3lQ2D?vFch|pm2NKQM_No&x`AO! zvg+1$KXRdI+-c$sI4xIS(e4*tT;BceDtKxB+D!w=G~jSRt9rkyhF|rY--q6aTOk{5 zfp-QR1KUg0m)NgyI}jkhU3qzaz**!#Xt>-<590bmkB~7XWm|@VGE9fpz>rK$hiL!vdL8oCQeA9j$5rL)E>g!+P{*c$Eqo5B;&P zYFODPI`=Q?Bq_ERxpE7ydGjJ@3sf{@DurOT$w7T9a*<6S^jjz@sZ+SdBTvr0+UMXe zYAOXnqh1Vuv=APeAHmOLRV2Dnr3?wLrLx6tS@818%#>t*vL`5_`AMs|8v(r!oYdIf zyZUobcH8%|2iAA9Pp(A+fas~0nBmng}vz91Dc@FidfR21APH;ocMeOzHW7^RC8d$JN$_Bb5a6U9L z^J~U=$)@3Mq_5%&M6iLT^jBHcXe*RTBEkh^V}jbC<;wp;kb2e>M8X8g~GbnCK}7%&^vq zFE!XGFC|=OHia6|Onh^MUqp@@wwMN~!Iov`@!PR|NI+nMtfWSsm~w+E-(G%1G=q;{ z^&?A_m-Dp;Avv-ECBAGa+rJ^pRSOs+NZ=XWZiJDMk_cqoI2~~$_&go*y}tfG>}Lv> zFYLGL{O)s~{Z8A`e)zh9k|0(mdEe-p>N_&!5sx`w#(W46RiKO&s-8bMy*L?WM)yBeCH#LUdTD5U`5EX6kwYIPR-!K&ULp0d(5ae{U6@L6G38@{yovf zF0ts(he)mou4Vznw0*aiqa4rYHI3ifwq#QAnh7He{2pShUxXztRe?*~G-wozuvF}C zjtZ+LJR87!jws0*4RBUJ6BA{AX7vJ0SVb7e3+ow~#s^6ygF6!j`1o@!Zwa@q{f!zT z7*g}p-}BhEKxesn3o}(1BKSgGJP8sRjyzzlMomSLakz&n5Um{6gH~l%)H44DIGFJj zw(J@TdGtNM9@Fsi(P@3z!nA8Ov%Q8!0Kv?*wI7+^eGqp+1g%elQsIjqM!tQ1 z-55lP8yPsSoWe&@avDR6m3eAvjp~Zh-@!dKRfP zzYq$s9$Z5(u{*8%gD3}kd->ed%|rXzefZ3~UQK4&-jl@dEGpi%^(9KB?OC)n4)ha> z2P*dWxN;YxnS>!OSzeLHPakTZ8AEA6V+;Z14Hfs1V> zdql{Gy^>Av0Fzn|m4?s$jbpzT_!H<5wvh#YB7&}%>U6q(!oHIC@4HXC|W9Gs!EoeMIL-Kr zN@*O$)3XoqB}EJ$Mk0f|$X8-wj8)s@_6|zsCA|ieQ+Y#Vby#MG2NzbUp0`I!CrO*! z3jGG&ym`}WBkW-_2rrV3G%ve=145M@nA09=LXzbaB38^1D*A%a$YO@m zDq!8RCfK@<$2DcN+K6g4+WTa#`FagN;`nWrRXq@9v+m5brSRXW;sk_wZ(xN{caAYorNM0yR~TY}0OI zQ8JO>nDrT#|26Q>n_i?)7$SBmI87jcl){!TjvhHO)tWLA3P7$h3*9u2%$Ck55Z-47 zeYL|syO5;vA6$q|sUID14;PB)S3FmMIWyr}xesAY>DP^(Wl`MrEvK;I=1^5w(rbQa z2(}jc?&syH0PiF%-ZR2~IGet24m@<|kTIFdN%zrIlfi0Km5Ao=P*7z>1(m2d;Ii?M zp-97+!M}7QcSe;sY1Ge&FAtNI$T;@mBX7_ci-98fpfoM_rSl|#CS|EON2#6{oIR7S zZCT&N|AXsAu69D*vR}N|TY?0o9X#YQJ6-MR%7vOOujGEF%N6ZYk8eCK$)ldRyW);v z&~+|79A!mU)b;x)bNfH|@~yVE1m~;pOpVwu>_D)a+=E9X3buvx9>1Tn zHU@Iou>Ky2Hav301mGxO0X_A-qP;;&T;*T$b{sxiE%Wf`fu0Ltp~7)NZucSF{ zO;mZch(-zujoP2*>8NA>{Yc|4lQJ{v-5wI@0kDzLi$+?gkGak$XK9e{yjGn&?b!lk zB@kBH575htw_NfKt`ekbjOPBviHPjpL95ue>b{~k%@P%%m|&C0aB)mi^UoB!=={;b zdMX+jdWR>)plbUGxv6-0b;RRo2pa@(eu#o*$-O=Gm&iOg-GCLs9vjJm-4%vCKZKJI zgemH=Yu^zgI&(U^WJ}JaWLc~WuUy9UqKNX=`lwRfA~?a5TYbKByFpw1g(!drXTv zieR$MkNA}rBRq7tO(!Gv17jJ9%J{ZkI}l5X<0o8q1T2%+LU>ESy;0X6J$@VxdZi@7 z*G_XwvnV)7C9kIqheGAoRgQzkkpS-*D|zg6vem7tHQY*aVFXZRA9AWAe)uc`I2c^?3XQ@ zZ`dDdYUoi_T`fVp;54cjBi@f%a|Lb=;3XRt^u`IsYoI82>2CpQAIWYA%J*e`V!9*TcxanX%+u+{=g?>%j^+( zLb;v~=%70BGD?ib&o1F|gaofjL;(X~c~fTsJq!{&Y*x0x(7qaemm3iR5bdlSc#5`& z9zCO%D61(|`+Siui*+JVR;N(7R6SXBl{aryc9Pf>&U?!C=DpX_?#5d+L(d(XHfzWB z?I|tU&HHL=Gw?Rqc`pu!A+q!erG$nL8zCY)4q38v>7a9t$`g4ZC7nI~J7hWW8_k7` zYbn%@udj#cPr6r$IMRK@L}WB##J-8gIgMkVy@Wt7W9As@4{igGQZkK=MxlUo!DXz& zU>t4($$vIR-h2S8Mi<&r@Lz`ucf}h<5mlOG*?eL^Ps=rv0-~t9j!~Y&3F`x5xP7-# zmdY?RtIXn68Z~9gF1xRY=N@TlYmclli8Cl_HEL2) zaK^w)z4_Id)Eqhqi$B-VXX6pm<@tLZU-ocSQ$=$kRlL?RBy5X|bDYZ?(g{(Dfhw?_;h|;WH9&fbbQ~K-q0D5$ ztz6l3o6%`EkU!L6<*WJs9$ggq3GWc$$)w_RcLs7;?)cD2Kt3d>?0^1^c)gsm{A33z z(47S4J`Y|q_1}0b3$eBR_)habhw1IvQ#xLEd;lhYk#3$&R(QB)ckGK#u<>Gw^1)8X z?f?oF2Z~|(#EF9I&*2Y1EBnkBfhq{4{xGf@BfNP$YO{>pmUC3-h4t4nB9AZc$7z%% zjF)>8s;^ffT8e+jUo7Ej_tJ+?CtcK~S+9u(w)x*)6bss!1yq#Hp%F;eMl7`yG;<&4?U6ovV<{xM8WybAGLu z(6cwnKc03wR`qn{=89QPSWAwK{Us8H#6_p1#GU4pICzKV1sZ}0&l_Zw+yv`q0^&Ufvo6ot=5=NgR-gpxjN zj21eV|4N@&x*UT+gY+pFem8MGH_?@j)-&E_(~z6_Vkb$MX2(1S0994>&5sSD_U`ZS2NNKN747fZ*s3 zwGakUW3_cU-2{2*Sv9_Xes@ZIm>4O9_RO*6dl7W$0FhqfyZO9FiV{sRR~e2)Vj%sw z|8Uf>1V!kFjQ|7UQJNNNx;s4klkt!SS#uzw=O$eFNCXb9Ge#QxC6)PJ)VlK4_bVqM zVE~E5*{!q19Q)Yy_>pgw?l*9@%#U)g)-j*8$rf%QgFRA0#nwsmh zE~^Xp%PYe*Pf&+Msr%B9c$|yu-Iy)`=b)WuuOnO+rB4=S($L=h2#_XAe9^+dN&LBrr-O)bJ1V-iMV`>m#Qcso`j@EWMgpLh^vHke2 zW((Amj6mXX^%`92Z8Cnuc@vUulZMTJEJV;1$hPE$|2zEp-=8+NwyxwRO&db5XN@ZD zXqQkkBh)Rk&DOM!2=*dLk*RT(=VHCwmtrr%0pU87s)!)K&iXrB-NN5~7p)p?e#=@M zTcS*0vKU-K6Gm^7Zu><7d47GjT04acS()G|kJkCz*(or556%)x*T>4r*N)8$ElBX{ zRmB7=!_VZG^o1R%&2P$)(q-e>0~37^PhV<)*}JkBQFED`;Ot^{wXqo3*%-NKG%epm z?P7KgQmN<7kQjlI66@6geC%la47cso{v)SsQDonuE&j9|caOL)b__{p?REgqvG;1G zTDX1lbHLmIfKMHFQGB^eN0+uG;fXoNdI*n$*FFHB?fqnC{v6s*UuBh+?Q&LM1u3w- zx}m{>hG?)$@=48BFZhwswmkcZPCESX928Dz$BuceIlv{$Z)Oix>)HcsMvNoh#mOz13(Y#~>Z*WNINu*lb{zogqdyLG+SIbW z!=VwxZ_$U}D}mV4=lXSWZL$9N)TfA?9d!~JNLrxhNO_QY@uEm)*Oeo*FznGIDzA{i z9ePo#qb0p_6=+nd3j>-Y@F`yAj-aSl{6dc+yItS)BZ9{gBcckC0xg+AwN(>Bo$PKH z<=pZ0i2MEXWuqrAKD}?*YR!L7rA+_hPI}u)OYj3o+v>|<)2gp|Gy=HE`8wE^V;ah# z__H?_iETb5;_!eNv)s|HO&j}RaRY-<;YueytWCSYn&Y9I+u+Md8EQUw@N^>uw9MBK z!TAITj>Wc$F2+ieHNzsa#wG;PkOLTxR-RvWrFj6-yg$M}bxQeuajP4tccN$;>Vi*` zH2AW3RGFr(n*d{>=VY|HyXAi@3Q0t6TD;(9b4mu!w=iDZEtGwtt&WouX;fUogB) z0$QBVtbN%hi$hP@7J&D{LUspqGV>b^t^ubc41^_-gD+w&O-Ohga6RI zUFXhQP)dfE-RXR2r%c*l-b2p-uFk33Dg8GsT0)1v5%}2gI}b z4zG>SxS81JEXmhHUVk%S5z!p7ROsiXJwV3bI6s^jxt)g~s?G(^AZp9GJr6pP`~P8c z5F-id-!NmD!eaIipqG*$=3hVFpA^O?`vPsM-EmoM!}! z;U&NP@*|i<$Xf+CKP>!X8g>6?W5tVY0?^T7st1K!mTbw!(O~3uSNS|e1>zt}kI51k zMWvY_=WE&nE-cCyHldV(>LFcEm}=fnFw*4W2GjSlXr5v)zMW_W(B9 zWM3(ET6_uwHNSEym~f9}8qw1#UgbDmZ~R?%wc^pq&TuZa7=X>XA2-*_%fQld+w>h0Axs{|@$tkDs)4$ko*+#t;yXdQ>xfCSq2pKe3VAq2 zA&=(A7D80>7d=(d@#Ev3f7o)dkgI)h>)eZMu^HppXDY&GItvRN+8gruH7rHxd$An~ z80&%^B`gd5Kp zNGY<_MO@{KTFT5E(K3E#%dT?NwPQ;pmXo`B!XruBWPDouSt8c)6)4fJ^=#$yUmQpg z!=&0eY<&v+Sy1nyy<;tc#4Ae65}W{TyX46>Scj*J!)uJJPu8PbmTk_PvJ)$;4HFha z1(R8_s*60cOs(9WXff=#iEKhG9w-?=zLxP0_9)R1PO5*FRmKGbDy1|HIUVST1mB> z3BRsi>25)DEZ?7owk8mIj~+Y_s9DxkeLjc*1x29mLM#+G;~cZ}I&@H{VMi>qeUaAv?{=f&*=i#{-!y@+ zB&tJwjr~(w4}XjhLi)Yy$FwZV=iapoeW`7-xiF&n(51)%xTdaS5%>wF5Y9tAqi4mZ z0?4VZg3Zq)&lAAic=+&HDYwJ%jRsbsq?!7myRmUF1hynu7@es>%w#)b#vb=4^aukR))KUG2U?xuX&^() zp@j@@FZvuF>NTo%qvrZ!JQ>;S()s63X?i#V+tO}5MTy6l?2|Nh%ovj={m_7Pgcv@y zf*H@4Zp$Z)kO2wJ|Kfp6;6ME5A)1X8-@r6j;6<=Esmn!a#e+_*7Y=cE(t5dc*iQe###3DKdo*jd>^Bly6xKIJ z2;JG?-kO?^cz@%sX++Ob-OgRzioXX`buS)TF&o^;-jpAeNzv5tF@+5KdFiWHBOP3H z)YQyGaVX0&<0o#r)ppZz`ZSSWzhZY!HcHTGlf(vhEmtfbq5tCrv2`bos=8QJN z1h3QVodMn)UGnPlXH*ESb$vMQo@lMT?h$R^GJY7@7Fc!C-w}J!84nqoKIU!(Z4ALQ zR^$`z==uz|xD3O4H-i48 zU_jIkyZ;c?V4f^QZ^h;S39xr_ORziTO>B;mrKRGSv%?~zqCyfAJK6+ulbSijWf`4k zfDp!KW-qD;vq(Vz@#|ZA?&eM9K&!#CMRgiB-N-!b7ehFyR#$ij>wy+-whQ~&t@t7@HQR7Uf+DWGi;E02L%EIAQ5+{$PB{N) zWRtZ?vy9F$zfF0^svC1|FmbLZ7BZ%Q*-EWTEax4p!YM@*$SvOHCfUYo>s8b(kSAhe2KcHBZQjVToqIl1t!TRtwds;SgP4-b}d%ry7q*{v> zBcZ>i9TPb@9A@Kes9oapva#@*$7rc1D!5>5kly#>X)r6f%vwue2Onpjktly7&=tH; z!@PfNR_5;E|Drn(?0G&n0LzPH&xg4pdfLr)8ej0DxZyyx@6V}~WSJ)3mJt(`(*?Q) zZ~M53_euDPOAYy*{Ux=y)}J?!^r5y*VnN?gT`BV_dHqBQwpiHm^Pitz5>@nv%PB~x zZ~%`f=HuCxnAo$(G#4q?H_17JZK!qOA#2^gC?=>61|Lv9^rBu(eE_-;$=6l6MU9j} zQuQL&dr@(*qtkL$75r+bdwF&M-Q#u6LtN&Sli?-DwZX{v_zE^&R!Mi^z)3CiMvyaK@7mXWVMis{-K5@mBT znlEBE#~n)N-6gZ3Ps0<2dS3D2_j$8q57aCb5X9Pd|3+rrAi|5;hO+avbRC%ieD9nG z*D>=-K1KMfBc9DgTzsBAik!w<^0~3f#s1@HARRQsQx%NV|8{&d_s>7GNyTWF&r&9l zEQ3uRTs#q1G_|GmN*p8bI(iCWd;cX=5w+l~FHBtQB#g*A3t(5zV^;IT zZ*2*P`!0}W;HMx;nCKRN6qx~1p)Z+Jj!zH8ysHGaJ5nUz*Ik(dFIS%LRwE3=0t@6 zc}Vh&Wt&!$j%9+P5L9X!;7qr8cQ&3hK5&z{L%F!FWB=0%kSG~~sohp`#r1H@*y=mK z)IGG*)B{~ZuS_{>rS&n6zTDBZ`PSYE?-Bixgfjg6TJ6E7`QV@Q;6oOJZGq%y$nkJW zWNg@vw4|;V*fGa3rWM+)Wz%+9g@eAiaoO0BiN10 zap?LVvG#=TGz^HWwHx`9BBHbd7P^$=*R!{oqix|59Aso1wjB@3rK>xp(8r9aP_bHbi4Wc`DZc7 znzRU6uW`ku!b-03l*w3-0Vb-}iSV3C9P2%3@q8;o(7|aHQYyHKrB{ zbfj?pq`p-xtL`86fU(rc7`>gmQpu;ILgt^PVscrHe{(XE8aBVLuW67 zX0fn~RD%bbet&zqP%C#WG^x-_!@eN(qc}*$qtzP50|xbNC4=GkXvU{4>_ZzOMZbR^ zkWh}0@q=OH#M~0r>bM(}=?8PQ!3ukN6hV0E)@K+*%Gx#V!WL$W)1?e)Fa&7fw*M#KDlo9y^W3 zQ5#)@42947ES$jzcLa^Mhdp)cA#U{|i_`ffBm}N<52zZSJf3 zyIXqm@LJP{y(>g_zJ+e$+U?ts1yy{Al6|0<$aXmNE7QJqtzBaDgd@}fwqjbGEMLgD za3OtBH{wQ-EOjZAHLntZ?nEzn3QXOP67J9-T6J!+j^Tw5!h_Euppj`f1%fApo+}X_ zk_m#pK}sKRv(Uc}sFwvgxw!PE^$@=gH7#&`q-PAngMx zArbp#_^jxia9D4>`GyrMSB^rxzFGc{EC;ZIX%=(XPPnT!T<%&?(E!PZY#5YOAWr{d z@o9EW+ASQEZPB8zo5R212KJNMMWj{xH~#EQ;?>CG2e}lWbZ`aSKIa6U!Hv`MY+A9d zwd17Zh=xx9^e6_$EI1DI8Gi?|Rb;rbV`+a2q*GvixnEF>eF*1tY(ncnBQ7Bae#fx{ zA*1qjw_jwKb)Lr3<_gr&(JQ$ZMKUIqU^^PdQ|Hg8MyDZl00h69Zi#zo&t7#@e zi!@`m$+8W$ZM*%BsI)H|O6pJ+DI-67_Pyn~5Sg^d;v4oVR9uT6jv$YluWwaK`A2?p zy%6!_@Zm?jv!6XaIUV%a8r0-QK`R^*^iYPE7S5S7@}F}&V@{MihcHR9OvZK zKL6i_^S`qqKoSKW??>Y#z5~QY$)6_mb#1GE_}Mczk+yue0a^3k{-r#shP1igUP_!> zevmA+kg51)v*fm{knR)w0zCvESdjy8fdu+-=G@-XKE9&I?=hWSolW{J5XS&Nk}R~c zS_1$fbt}-7_k?C^)m#P`S?BrgGuo6AIQ0BK020ARCL8~K_N;^?ESo2C=LHLPRCr?C zRbljgu?NAXpi*{DYy#&XRE{Ohz8bvX^h2mDX0$@&*fCk?kp3aU?DIe4sMU zuQ0_Om3myaN&ApS&D{M;P9LcyTuymB9EsTFe$k2Nm(x-trWtF@~ZvgCGwszp4L6Rn1@-!z$nHuXRtKUd@3s$s83dJn) z*uz?9NBHt~sA>}u6GyLNEtn$k5BJ*Jj>sDwxWnGdhCwz7ItAUncTXH-20JaRtc-8_ zv))Y0I3;X3bpmNqc#c6Zz3dAC1LQ*+AfQjtOyzQyGyp z7Q7>@CvOS_7FEm*7>SVAp_s$LNu3)DaX(ZnrbUssA@zq?dcT084q$tXOl>{diCt9; zdgHrmWPrlOkJoI7xVD|5APGlVQ~Kt$jqKGSP5}+vV2FA5TMl-+uhqls&mpQrYfcl0 z%+(opNjgTsP=+=}=Sbau8SNXLUy?icYAi={JnT2rTh*}K9P{*Eo>2#;dZR5eF*EDa zrd#U8U#j2u3DxPUQtxMGX5Q@7Vc?>GU8p!Yg1`wy*&=Xf-(prVnD;dwq9SgYhTIT; zBNhfP7s)+9ZZvofU$VcIu4aZps!<}*UQKwgK_Q&9v3<4Ls-Ih=1Odm}U@*34z)|;G zIi{Ku4sZot&djU~(bm#pPh4|q;c4%bv4LB|!fs7o>}9d$K!!ploU`BmRTJ|Zshf~$)!2g{5n!7>ryq{JgxNeJ^`)o&;B>59zjn1hv7xSBa z-VJD$-+UjLzPx%MWG1hWbhTgi^V{aGm+5iN|4mMi_tGz&XFNsy(#IIFLlEP*<+x7z z`f3c6AlrR%F>#NSRhR#MgLV=<-Q#(cSR^vmeC>9(3VAio@lh{vJIY?aCx76mrCeC9 zm5370CqRB&?E86lfDp_aS4qIp^Gl;_ld(WhIZVcpMHR+=yr3Y0$81K8+LR=YA~>S` z9c3%cZ+2_GV568Sg`_$<>_g~{fng^)AWc$UXEf6Mn;9`JStDrQ@#0vc_HW`01ESt} zc!Iu2_s^@$U?U{YAL!rP&9du@%Z*J{4`#z;`qQ!U5Tk!@<$VFXDcZ#QG zyVra_zXK2cWNrt~+F`CVowQG0NBw}k+>9dA48fOZa->F>!iGP6JKg(VEx^w@?eP(o zAA_QVR0Jx)FXQs^6W%5CkT0}Ljg?#UjQh)D6EKl%s|pY7JYudZEYRYQPx_r$3!T7> zRyhL=iGk($hlK5LS!ih=5VtfV#fgJHPrKYu0+XA27g-Uj&{_ZntNg?kDZyOkzrTL@ z;+d5UJ5MnhtN!*ey-DMqD~)Pn{Dbc8ORDW+-thnZ$HwEcrRNT<%QKt^6BKG6h7<@9#4xzgX;~RV;g`v6C*aW$ZjWTW(K;+-aH! zVglgz$k-y%G~d>wXus=$NAyOl*SS?IYF=9F3+~)rQxU!GONTuWT1L}`_+Dz7uU}$9 zg3$3wrrhvR9f$0kbe6;TbhBy~dcH_zf$AW&^4@2*^auT3=iHdZu8F zjQ!b-bo9je-x+Dt?k5*qjRB6zB(fd{Uq?{efyMWC!5CYCzYnfJymK9}Wq|>H))Fk1 z{v#HZD9}3imVgb&D>3m02hJ9p@V+UxQZ5<86!x3fE6U20n5OqOqu>(Ou<@{}go903 zlCtfL>U0T=My*#F3Q`9hU%%R@q4)2rdWW%ijOlpv%$YNG3kMesoLv!z!mSm5@5yK?^k>&6tIx3>5Q5D2HgQc79dnh<2K{crb>|^= zTWy9h1&AfL##E)FYUUx@O;m~(3dn?OuHR|+x-f~Ys0vRHHd?@B-AgXX?2URY&A%;U zz^s3>YqrVR^C>5fpBj}?Ua_U=9M4u*jP|3Dy`43YV!VH0=(GJ~Ku}+I>bePXU}HV}~12Nrfz5<5Grkiv~B`-MJe>DCaq4k93Y^La)Z zXi&=&&_55)Va}C#RcJP3s;(|80d`xhmgE-tP{|Cnu4g^G2L*pw39`q~^mNOo)vOMZ zD9#6UNVhcUc9j=0N{O^LcvjKnS@n{peh2vn%BBaWBEN;${*8HBo##C!J>;ZDw(J7u zf2%&D=W$=K;GaxUvU5*5CutLlLZ_k)u#Q~P|uV1`g&%Zfv2 zp-=hVHw*V1SM+`$a~yA#664t&x$D;t&;4W*5ND>wxkK3%F+W zEZi0D2GiN`rOdk`oXf|HUGF5%f@p|?+LQv(rx))gi(|w0H*L@Vb_JG0u=8dCWvz#j zjcpde|1IV2j~%938C<;9@{8+G37%lc z2x2a^^ce<}4un;(R+0lpy1K?FW-*9~cxby9F-d2y+Ad+kX_lum2VKGg4%f;^e(&D) z)7@bZwj1z6(%M-ADKazFCj>3h$<3nOleaLqjJ-NMr7u<_Iz)q)c*$1*NcxOsWAeAtAo$CNuX%(GQy;Z(q%n!?9VpU%OZj!(%wc_XDv zRJG8IJr5vaW6G%4XKELjd*EHLF$+R69csZG#Jr(DXF{xz`dFS;yY}sUGgv+!Ld(^T z!!0X@3X53{tH>l?%u<5h|Cw1ne*7LKI(LTe>=;l180w4xyviH-gJ}gk0G7zyFM(A@ zt+Uzl8;V?c;6(=uKXdEvd?_z1wDmk&8rF14QF(x)sF?Y*wzS;ll@M z&CAYO(jfuSmcsCQorA_r=RBO)!|w%d+7hu=yGcb~ZQUkneo))GnAqdf^Tdk3etl-n z3w~$WW>-#T_#d7>CX*wB>vQ#I5%}{{EghFrD-i}Kp7Sd5aoUu3NjR+V9`q~^#Ie!~ zIP#_mlGu&!}u5Xq-py0j;HQi6AK-N$$DuJQU+x`}3e+qOW- z=WBMZ@KE!eOGxwp{xxc{4)TQsajpY;nwrY4m3ImD>6=%PD>oO4CZvZCcR0}h825D) zQ_zPFC>0GWSZwaVr~PprcfT|3|Kv*uTFI@e9fgS=hl*j?W)@K*B5BfP!VCkwGB_~V zKpE#k4xMCSZ<1GnBrAZvkqv(#ppXV)fjoe z;hHrUpV$4|&~okUpHCjW`1x=5R@+z0hRICT$y`(IQR^u8A&N!nZXNP#hdv4;w~oE` zb`Ux_Hr;J6wM3lq+KT37>5;t#EJZ#sJph1eVN zdn#{iU=uEgm(%XPqstO#%`do9)I~Kqsh)H}G2$vDQ9PzHMP%wTDV->whwQKH@PE zOObkJckL!YU5sQV%LntN$CxU=pF_MNv-;L}#{U4Itj#a+#99(&a#|V5LBhpqJmB2x zWAYTtOiW7ny2}R3fPapnC6m-fE})`p7vL3?1QNj!*pJ*AuLUSma27U$kBXY~{P}aq z9Q~AwVXOErPZWW|*8+OQm!agi##hPMMQ^d)tfs7i2~M@Dzh7~H;mD0u>Kj8sN+^)Z zBFL3Eq`+EqTu2|TM?52Wy#&-+%U#B&0NIWPWO6S#KQ~u4|GGcV&Q^r3qPr-`RBP4B zf!s%%!#oQT<{m$Ji>w{>px5iwvW+b#r)t(g!i>0A`Znhf%aGG@87MGQzqxNETOOw6 z$PW8~ZfFG`J%LI5XD?5|0}!7vvE2xmq11_Z9E-n<{MEoui=WV7ptgNGqZUR_GSspR zo+L?DmT+4roe6HOg7HltZ9R0k)P`~ zCC_<0ma6qSgw_ul6klErgt@BjQ%C*Y3bgahgr45|l@35 z=^`Qo)h_+yKcJJ`sxDbrJRIw{K-C=tB(P|$rk2fpw)p#(KU4e6pEs|N-BF9!1sn#~ ze#YVmg1N-*As=i5TrCG?qk2ER5mt`Qal&xoVFP)FRSnpe7oj`l4@Wyph){aQ+a&?k z4Ap-I01k?KSY;t2l|7@cB32Q7iNh)YsHX|Ez0_ml>ad{BHp9bWN&B+miOGU;E(q8z z*@i3b+Q4vd6@L8Uyki`&dhBAWT&Q*ewZA>X-9Rk-IxgJ-q?IF$V!x${1xmwiN==$f z8(4>B69lM~LGkQI`5`#`cZ(V13JHPxWKXRebVoLINw5jlqqq}oMo!7;C|N<`Fj@cZ z-oX}wSAMz^@d+6Qxk73@)XV#2gCO_t%|zK?0S=)&7I0P!@bL_S=W@9eAD@#&w>WaW z=k`V_3K(5x1^U!ye;Q?zu|-|aBH)S#r(DcOh@C3010)4;XP3377T}ewj>kE#%4oX6 z6#QGlff`)ats5#9kQ&a9?8)+sAp{Y%P!r{~SVinm52C8{nT>&gPv*K+2IMp|kd?6{ zi!*y|4@J0e4SC(oPw4+*T4Lh4&TLY2V&;&!`zVF(3m5iQJcwU5N0CcWa&_1*Yo|1y zO=qXho%?f0f9^{Pz2@g*Q`415KRj?!<_tLFjFkZ9`KSlJ{}5P-b8+BCl1=cyl2NR_ zpn(r%u&1MZV(r`Bd_j|g3zE*Zj6ENCumAWbDr26?A2#?a#HJoRIN|zI#&G)7pvrUn z6+w&`x`tMXumZWauQ^<#$>Dz(%*RYW6=wF?wx%2%HgMI%d`g#0RRciqy4yBU)g_jYj$GGc6^KPO$y_uE)tmfMK4Ba}1rA7lmTatg{?`2*hokz-&_6UiM z$bkEK(AS7)%i;{G)#0>og~=yT5DpuPWI$G=Q;)0HN^~08 z6)AH+@G^;Qmq} zmU3Welc?!>kmAnoa>wosY-vA6F zn-pDx5HmOc@_bJA8S<5dGEdGjzVSRZRo3%+Q_M7$(UN7eX2)hB(6Qjt3p^=mK7W&} zR!w)Gnn{VafXsjK6f*a8{8xdc?dVb_#F)3+AL)&8y`}Tsp`%N*H8m9pWf(ki+ZdgK ziQwwM(>=6g-82>jpF<$9H|LfBusUF*ATT$ScKO}T(4}qbcnhrHOkc-8WjbTkluh_O zT3wOTW@av{+@4d={ClM8H#0hAzI@ObqAL6B=$W@873$c^DyB|IdC@DRI{_dUrOz2P zCKf*q2gYV?4KRdc*R72qe1Xn#-uL_W?g_5DK0~ir&t=DzR)v??>3qf~zAyAI#uCx< z%qBP89=gzH1Vn2P)%Hs~xof~4S~m+KWmF zX_=almT5;M+AK9KriD~gN{f*q?#J16{f^_h@8i1v`5wRD9O3hMzhBFFKDQH?yNSSE zL*!0ITi@S5rt|xMZ?E_Jj*Ekh->WIaYR_?LHq+^#pQ3{Emakm6r)EFm z-jrpZ$^7|cSU{Km1v4Z2PjB51=9mrN*yN`ffZG`jZiw;=Bm+PFAfu=RZVh{$Xt(kj zx4NJO(-vZlpyQl5XO7D{YdEg?BRI9T;BfE-iL1RtokOC3%Kj=&x2Y=x2}60!eY{im z8WCgT`@%&A1iZZ;6}q>miLU6yaPE^b^{}kzRSQD%-XbWm-N4fL*7eE+K(#itz&aRd z$0nJjJ#QQ~$QS76eifg7RQy&*93+OPXG|wzRf1XprQWt?ph6HIXUDQ{AImqTmelg4 zr6@^h60Vu2v#tM~nvYX|fKihfA!F#C=jv76B@)H^QcU`IpP6>Ns8YDW4J zpTD?hCSE|nX>XT8-qJC+4ShzXBne*AWPFOa6e4fpmyz>#osF$+E+u5(T#5f1dctP} zgC!|TvJ0mI?MVGS7nMm%907inI2WSuHEB69;Z=^a06Vr4<6$ey$B?W~Z#z|e_1yRY_{#K9gzZpK$v*ziOC%qE3>}Qv9iMY-Y zoFbz`8rqB4f6%9%VlWZCo;@0Os+e0>Emq_R9Pkk9wf4IvxEciNGJm0t>-UX=mc&a{3j(Nh}ozctI&-J zq@Y*{oi2mHe-?U-YbgBk>VFL!QcC2|5YfHQv|1zGYIn}|7Ok~hDTOKo8(IS<97Ro+ z(Lhz4$H^G@^+s$*;YIcGhA(LsU*#>0 z^uoD)8;|--Z251uzJ0e)kbjxJ9#oefeN55Iym9~csN?KxL>DEEu!-#CY( zFs~@h1{0In(hcWPWTKxFlYq@HH=&iYr_c_YeJSF6*^D)3d9!FP=o}9mVVy}%u6H{i z9^ub(3@GLr&HV7Nux0tzs2YmTF8Gc6IU@Bj z7xG7P-P^Xhh{u^uX6`$#_80)n&zDs*O#{+d7`WyoL2lCpDo#{+VNI{7o`}Ic9DB2{ z@CMoo*ZVSYniqCiE@4-9cR!yxKJxO6c&KX5fDjCd)(Rf<3LHWY|M~adj)FGATv<<` zL*zhn&*ZAwP+-=RV5**!}bamBl&4d2jMcD~2VM$TKA7w0xJ$km)=7Bw7vIW#B<^ED@QP#&-S>$H$Q zGoYs>kwvqDND{8xF&*ywA%~ASpJKHA@wpp~_V(v|HlUyQX~%~QEf1-E|8EEMNM*A% zbuYTcMx99*c%EhO`pi}?v2^Pb7N7SrlK@5z$*7CZJEnY5>}@r8ipmVD((O$z-oKBf zZ!k<_k8o9Qa{Nrxm1Le>?nDLe|4>n@K#TxL;gq9jRk4~Gb@bTLRqUoxKqq=YE3M_z7iellDCxU_{du892{BWhURLz|V|&P9E)^Ny-)> zhpd^|CRk-Xkqp-4d2M)2L9Gt#9@0-sQ*~?43!iDg=@<6uNYyk+mWqr_BHKa<{xV4_l>I#S$WzL@V^+mWf2=M$jchMO8__5oQ?ZsDir_Z(oV%8{ z7*QGl*!}(nZcFjNJ^R%o)Y!g~+E~q%>m^K37qY0j&$`U-fDm+xf5_=-lZ=aD2*sFt zcro9RmRF_<4x0ad#i5MHZ+W;B=Tsxo!*CkQgRjM z`vs-sI!1@yKvUIiW|Y&R9jf-S#HnA3t!K}fBLhT~@3Qh_fY9ddye8&M=a${g$!P(n z;Ma&u&}7@Zz_n|0aB0}X6bO%XqTAnR&%W|<)H?Iv{(a$KjXY!G+xt&Glc4>knKKHl zi$~k#b=BUJqW^}VJaIyU1>>hlPa6I5r5_kh(F3-RJ~9Q39tW@i@gJ3T;jR)~Q0CTq44VkmL|C&tez=^xF7}slS`2i>@lw@Mn0N@IDe@DWpq&3-u;Fv(sdB*90R)-Ek z07p5Ug!uTR{FnLvXaO|paF{D|d6(u>rkjuh5a!86c%4^q;ZGz=U{QPhN)GtAIXfd5 z7dxrNSwW<@NON}2X!bvHfZR-bU?j!V_->XN^C?yqmInaF&<88AH7jZmngDgBcKXB% z6QH%|09>02BLo5ctR8`~%3PpDfA86|Csju@Sa{Rp*Xx=dZrMdUK`DFt4Cdd0*`B%Q z>$=$}7jPgQp0E0O=D~XpIk#{lIz7J0c|d`1J1hU*J<0VjdzyRS*`do*=BcKfq;gNE z{^roP(HGPY$!XNDgfn)9x!Y0OpHT=k;Q~@u9>IfO6U*H!;`B>z%E}b@ z65HpaVPK5gtFOZd=F{sv*=hSC64``4oM~*6MCJ5<1qXiq$J%Da@q0mNc6Z|XLUl3g z-$?8Ryx&7O!js>LIOf<(T6;z|Myv1Kzh8sX0pRHmh~$!*mX_fv@!QBfpM(k+KSP4CC=`XvHOHw9I z+NoLVc&+OWO(vz{#SF?2NUq|7g?N<+4cy`SGs%$=UO#lp+=is^WW-Zi=!HTDAU>hT z0Bh@PUZk2nS#-ftlbjyHdUroC#C!Xy$(Vs1{=#F2aB(vh0?l<&q%*+vp(aDl%X<>Os-OdMY7M^jQZ@lzi|L}x7sqry}`kJxmDsN~)WOVfBF zL|LLk!k=^u_6bV zPke@`szpQEys_q~rP5hGZvQun0m?|o*zC?Ph`^FHIxh}8@3JM}SP~;Rs-KvAvFuS+ z*d2%68vT2op}u~){RhOc!8{|@)3MmjHh!x7yQ+oqXb@%;6&Q5vm_zKe(XY1U9ftEa zs~()}_i|>w zwZM7hZJ#W8{^`7a}l|bB(BNyw;-d}S?WKg&7a>xeI>jsfl`qy!lj*= zf)E2#I6jYDLi2gnyQm(Kyj%Z68b&1b)yJ63`_PDp^Q$-!MEQ{Y zNR~c$`>39@Tauq2qCuksowUuc1j7CES%@Aj@x}1%3rmmfO*a& z#Tdj8QB+24xP5J{jAcpzl{F?&uI?>Jid2*D12~Uia4&pCM7VH}7xyIwGGD3y?S1bk z=GmCv_rRM68 zCRQPRDsGm(*m9lqaV9$KSUv8ti|bZK80BSm<00if-`L!S;#|cV_6V}d=1b%B~s>tbst9T8t zWTZP0gMct|-XNKu$nX6^w`D0|PKt|wpO{GVk)rvCk`^`oF{@2V5yhI)v+BTo+vj9(QN~Q6vBHcvANnbXGKMnS_Sum4?6KBe`2Htnd%Xr@ffTWj;h;H}>A)S^JU4MRZUafC>mCD0<`{3|3dClV`INMDbcmiAQ?#gam0xX>fXyv~;s z4r}_>^zLLIr+h-pW<}j~EhY!El8hml`xvMbgHQ-p{xA99P{Jg5Uq?@xjTt3K_-vl4 z-z4WH6F<4r171S_7XOdn;u{+49sC2yUlB=suS#CUM}jSEx;kzoN6_)cF5Wuw1kVBB zE(02yo+A(tE6LrdSC_wPP4~Sf9O@>Wmg#K+-2F}84xodRPDih`2pE^S;IMOLOGRQB zh6-Q!=M}Uh5`v_6oP$t?R>mGeq6&_lr{D+rC_G(u6FCv-01bxKeWqPZ0=}qT#5WnI zZ)j)&->fLP)!0ei9Cga{%8r@NxIVDusW&8El6q)r1Q2f-O_+v}2(vib(!XCN!GZI1 z=2k|k1ygu(mv0LW1e^kDcS<~j^y9q?71QR8xoe~Io+{bjC7AKLGfhJ}e zaAvz5FPU-Wcjt2-SQ~U-4pfeve*>UozUr+@7Cj2We!vrE(X6g%hj1{OPqd?RBObLQ zTHt(}a$G~xYB|u*)pz^D{LHq@4uxFyZqYSFsmc`S(pP+P$#)6(lXpi0vb%OMAZGvP zOK29+SslsIJ2z!u1qa^?4))pz3D@M}6AC%wlLYi2clqJGV#Vo~UDWC(RRzkp4oWDu zGiL-W`6`v;M;+_lH9v8Skl^;g`>8&;2tx^sI3Po~q{y)NmFB6WI1N@!GSlRY%L_qMN2Ec)8oX9YKz(rXsk3vNh>nDp6$R)k9a^n}~I`uxb~AAiGL$nZ^{-va|@ z%G|r;DJ=)W`wgk}s}4Gj7$J@>;KMnk98agnZYkYN_bb`>@$<^B zg~|12{2q=71OM%Q%@sG=Ndr-rL((xM(V1;b)C*!!L>Ys6xUJueuZVyL;wi=qzQ7zQ z5VCD&k;mMCUoNmii(dg|m8*@j<;*L&gZY%A_bgk-5skuNbDclVY;sC=`Mr^up;$y3 zyIWX9CQZ~=`{Y6ZVlEe`q(A$%GA_~6nh5ua#L~5RBb_wT{J`;)JTm#|%vNkjJ#R4) zJDuuLI^`LYs0$#0l5(0!dMG+Jp*!A*exB5~$Nh5sN$*_z{OXt%J+gKv^k-pL4}(y5Trio%Rp^m(?xNtTlScK@Qic zJv}B*zFAw!H}r}fLR%{7cfKAhMr%~MX!12JW?T$V19c{Z-*l%sJA(FQWso?&U&(7mT`e{=6{$ z%2B>Awz+mjZlqmShvT?JKCUt*ZiID6J2|Fvh3 zLNZHwVK5k_2AzQQEXiA+SJbCRgtNWi9FiCLK-^FQJ8YW^gTfxnyQ@Z*Bh$&+1W>dI z{x}JrYo(~cxnHU@oV8M1Gx5DoRx%OM2ta`2FWcWp;{u@HY-}xQ3 z6f2rI8*dYx?y1v7Xb5dxxWyt@%jhc|&-VKbd@(Jg6d*Bac!I}3iM8F{h0}4C%AcSG z!ke?1emn+YVd8w&Sqi)?3f92Y5UY9pR}c^*9s`oxEsYVYP7xy32_^_x*0a)+mhl{J;M{^=9ZzIDnMT9TK z>j*j(QOugVfvw7z-osN(&h~m_%NfYEQ#?KLZYQOekjNzuBKt4I&AU}MGHTR+bPzA7 zq0cWZ4{`FGAP=f9@;(S=Pp(}u@20mnB>S>(1iOGrsE<kRQX{o8M z$G?X8xtzEsc=`VQF*&+pGC*ja|j=^mUL3HCF8n2ebh zk7+;*I(Q_`s*m`efp&{%!@W2cWpsOOBbzSa^%58dRIcbR94ciTkeK#6mLbjM-upaw z?eD+)a+h<7zU~K8IXZ6vb)+xIqVi#p z^#^TuNUVWbTN!k&!YsX9QuQ~2E2-3-d6Lx zJD)EpXEP&S1>;|9q&bKBOgb}C>s_zXen6C`&${x`#eb^T6Izw>VAtF@;eh1OSp3p&j>POGzKA2Rj#8!wzl)4}pGKlv9Bczlob z@X4V3UbdYOqpoM7QrrF!CK;D3#Sfh$BBz0z@Hxh{_v? zh4pR;@HLe0i_V%As$oN|LA0l&OF`A#^g~RU)1O~lsSbl_CWGl^tT6UUoryrg*}1u) z*@@IdBCPk>!PrRPsZ&1H!ootFO=;WOEmEOPa^)YY+hJVqo*NI1gi_Io5|(hExjB$X zAwV;qrve(zV%=)kAl@Q5Z0sGdaT}ny+!|5pE16kq2O|G*x8z3X!s(@_hu}Ad-sPgie;fn)aCs=;*~d5Iig zLS$TATDNZ?R=}ZYx1F)#aCmmF=S=U6v&l{6^4hOIB`UE2fK~AIUhQTfu;rwA!7bWr zp>~TYp_9%b!sprCU%z`&-77A3V6B*!s3R2HFbBMc6ZXO$nae?LSj~LSYpQ0mMOq3gr(+U(8#y2>!Ga`mFC>tuLiDV6UvYAuy@S; zjpe*%JWBOt^af2!itp8YeapdX(*M8#5}R?y9do!{SSY0p^tDUn5pYH>mj1;nSFgUo znK)?Wh})HV)z3ibC4gn|UY?X+X$d-rQ?POcFC*z*m;WV5DU}MhnN5h^CN-!34*6Vi z%5+xr3jOlOv18i?-aspnM1){oIt%qmD*}`xU`!G|J5!8Au{4+UMT!UNK2qe4rk{ab zg&pvC!bgz4!=^KQqlm*!FGfhp4hy-xvH3>_32h%iDX5MceZC5qm$zH#;n5vzZgG;E z4~Dh(B6T^eau&Uc3=s>y3&$l!xPrh_>FG&v3#qteU48k*%F3Q=cy2Fb7BZsO&W1x) z#@8jCfr3yTow;c|(;b>SmxPqjCQq-2u2O?eo__6W3i z-j7I$*5GJQK1SbD(GQ!R*wohQX={h@nZF!h z`Igb9C`Zz1k6g8?|HJ8iJ#(kSC2ZB%ErctVGNN$P0R#2~*8|njSGlj1LDg)kjNX7< z;)Z=bi!Xq}H8n>fBzUg|LE=J3{dPa=;e!Vfu;}*{t5Kf>J+P1v4)&>6e&1E+%P2Nx z!Ww6F`1Q(*wK(^GE4lCrG*Hl#l~Kt-6CbrJem9|B>CoWzW(AmaMRNV*(&{bOJ*dN+ zDKJ~Sq9t)~b`C2Yc<5CJ_lsNsbMEYQu0(H(WJUeJ5%`l?9A19AAf7#@O&ep>+itvS zdWVi3)zMH$bgc3Ufi~WC{deW_%+P+5IZJ!pSQq|@pOZ)Rz64svn3gRC`sA@J|EAMms9}d%}A8>e| zzs=Md;acg2;@TOFXE<3m{`jxMS~N_T4OliBUT6&MA55-KAz#-md;uk>OzN)P6>?c~ zLJftr_(^>Pb*Zm?qqFh~VUgu8FCso5C(j@*E^gfw{ZuR|j>&xn4Co+MiCrAtbLwmm z2PmfH>;3cKfeE_B$ElQUxLjgZH&bPyl$vE^PL*NMR$`yv(f2sVQ8tMj5O}nz0;WY~ z{CS?_8#9Wv!}|bQbedN8&ZqN7saETE?%W2S+?=c<%&B8Den_mk-sc%;`FbYOWC3V> zL_w=D>C=_}HObugou%@J%p(b>$r0?*MUmIr26~We%QRSaJGdDI*-#(|8 zrKPB&9By-EN@jQ7T<|EuH*av|tx?0eFS#%w;nLCK(26;Kadk}`yP)6tt~xrKIHasi z7-3-A{SSn!6 zMSWZE1JsD5$TLg?T(U_Ml{MrQr9^%nNn4+PL38ZCEY7tbB2E#!?YB4Uaml$BDmk3E zbaa#%J+nEyXOjDs4(k)a*ZZ;#WHgJhe@RYYCcMe+kHD>+=~?5a3ww&9d**@#qcuf7 z;I{V$!_B}mW_Q%YT}Q%>_0vqou~nCz-CTT^of ziQA%=oqJ(`N|;lkp!LGyXCwC)NLY-sv)hBZugCgAIE)|?Bf)mrVyBOM`IY3L?D*#q z0EQEZSuLW;SK@ANDm04Wb}UK9z0X|u=(xAEWil)*P}owS$-q`39Hy@~%%s9hq1oBK z+Yh+W){hn&JLYcNKOj!RL81)GS$)f%9K$^Ay-SDJ4F1t=29*HQ-IKi=z4-K;s-V32=TH2q5oB2=Bf_&zb62KZ4p-vCNX$Q!n? zX$a_3K^|slBkJC|mwc`@w1(aVvY^)J-u-}BMP$^b%Q4t+-8D!2qvvoFk(;QMY;uA5 zfZn?;-9W7g$J2M){KzOb5W#DpA4|cN3mOs``N5U5^Q#b&9L~z4VN;lb5M$P6Jw7-k zh2s3_`^(w4+ker^{sg8{p9l{hNdxdXVILRwyN7)z{i?o?C6Z@!jwdFRn}bZ_2Fx`` z#naSL`ipM&5jb7{MaEqfL#?QcMFq7zB$LCCo{f zTOzT^JKIKGebJ)!UH(T4up`F*0E5(k&|G{PfXvBO*Rq`oY8D$CTl-Gd4V`QAdZ1h@ zLM*=fc$C#PToh+2ZM0t&#Ra0N(M!)zqftNf+p3*3(eEE2`bixwu46`}E)b-7v8hUU zq)QWhjsq8qdZN>2E9~*QsBFRj)(n%O0!%ra(~Jl46ga&~5;TD$B3-we&Q4CQ zhRhvrs6b^pq^lq|v|t90n98CF)*+bIqSNW^OfFeW9;mjQgLtIp&N);!fD1MZlHVwz-qpuKoP~n>*lt3jl^$TmgW(*BwQk?5w%ZQ;C8RA+c;rCW{=jR*|u?B+Wp>-wT`2snV2I17&PsBtW!Dl2}o6YCM!RBaSC z!`{X3)l!T$gdWnhOsCpL@Gbya+$;ET8G1N+7ndJr=jSpydI>_eUBzIw5D<$gzeZ-+ z2VLT?Fv~O1PLtO}(tG;J91u>CdGLXB&f_1v20HHSE13ePPgj)dwfZf!8xD6ayn;h5 zAmA`Sz6@JQvjQY(HcNpdgZK>LPiY3quoBb_MY3P6dI9wnqWm@}R2tWU#qomUhcP+! zI;68fEyy|ZEVo{_miNxd^h%M|a0I*MSRz$M2VvB!B##j#tfZV>%!O}uZEYJmY|Q=$ z#5;oDXM{tAL6l}x5t`o`UiS*_qysjA~6KZLWBcr2}KM@I*#h;ZynU{LL z9IRxex;2{CI|w&MFTZ`}gL6s%2akF@ z8d)hG9D@@rnV<{oS~CyW_05U}=ru)o)tjjh!3feb#z?xiHR*JQwNxiE-EO;g5tmAN zW>*>>Xhti7A=CQv{G^~yYaiA|bM?Q7ra0DKPbSeSiQiK?O5#taUf6zpJw@8~-7Btr zinRTYwfd2oQIcZbl&}Oina3}rSMaaU6qFKpVHDw(+FOj@U0ADURoz zs~fwVc#c}fbVy5!EBL&z4~rw~z#y%u(G3Sd*r3ce<)RT= z#ireqaWk=Ko@t|$rGBuUB(ABKR>hw4<-id%Q`}Pxe-Xk-I7;bT5p@0dzH5PKXW3R8 zSez=xL>V3WfXxxZDe4==+hvkSoFZ${oXYWTzjr9asj{?3P(jGZ z9xGtz5G=Q>K23F2)C<0rnYB~-j-su*DW!)FP4owrT7hl)kh=r?HYTK8ci*G2s7ec*S4!@lXFRRJ^DqaDUqHN=pZH%?w9EA zSUoiv336i~P*d^ED=8_Sz98wuN-i3?;OUO49KxWz|7c}Mp?YbH-NL<11bk*uXk6x6mAJNA$? z2I;!|>K`zmiAU>WWa`G8Im@U0`0+zBwO7H6t>;oTvX`5cMe%qK07_%yHyx_9Zlh(W zGar6_lZ-L|lvlb)Ug>f1K4WZ_NdnYz#OKPI+a|0!VU0F4AhYc`mi6!8ZZ-_D z@gX4ltgidM`T@WJ_9!$q?dQfovmN3wgF9WKE()pHg871dC3(sky|z*U^pOKiSE;4P zRAZB3A;%B~U=2%oP$k7V&-k%*@s|ndo%x=5LQ4+=9gJ+#vA?GN9o8SG|K%3k@ggBy zQOXRJXNM`de155^v9pEflusM;6k-0O3DSCT@7Qz1TpB!NUA-n-X5DK@Rv%(clw+eas45$Fp0&ml{Fvny}FQdR~;j+ zCa8^5s$*yueGUVR(E7GOef374g|9&sXLA@6R~TRfrL)VO^mt{{M-7k#jPXl!h+?({ z>(gv#C*X{`Yl~4~kVzUnc?#4fw%0v|m6*+_6FoM2qZ|@ngAxSd41t&nZTZWRNY;O> z?y=SlMm;7w_x*=E93Wtu8nGIoO#NsTC*&5D;F->R5TU7pw?;&?;#w0V_hICLe3Pp7 zM(zr)sHIJzwbqJ~LKzjyn5dYTTu#t@{S6IuZP*F^pQa5?eSmFpfQv()=b;^FovRu9iwW;cam{{%_}hwK`iKaq3V&* zLMWB|D%S8~R-OLV^y5~Tj$9zYiSGz_m*-T zzPP$^I~qw@{U@jtOqluxK5}_2e|<;&Q(9s(vg_ueBpg@km2wkK?(6c%+_iX5VRnP8d47gBqCSqDAg(P(0Ui~=Z>J#iii1xsC+=Xqq z(N&s(Xhu>A=ihDu-5_Id*2nH4hxywt+6Q~&CUhdn0Na&w!zl=HQFSg@kA_S#2{e-F zG`suAymxMx5tLD1Wgwk5iR1jn|qdtg>bq`B2n0_iAa5;B0B4l3_dvgZu<4ns~T z{^BP|kUBoxz1!8?`OcEE(z8*XFe_Inf&b$LPkDRYz{~+{JM^9!dU_htPINaTww-g; zQXMZKCKj&@%{jnZ5{5&1OYTE}{xaVZJL*?T62VJpU2P3i+M>q{m_z-7sx!M`sb(x2*DPKOCs zeQ!p%t^^jqW2!e~I8EmEf=QTD5$^d0Z=D<0r*1J>q#_v4>SQ8KX;5LBk=>9a<(829@o@Vm&cPML_ z7}OB>`nJLTVe9vxC}6gCHS(s@?mVJm*xX03L|B zi&dQAH`G;{M<3s-wgdId%;_ z04hp90KFB1UQg05Lvum4#i@a>X>aP$A0^JDrlL~GN_vR-4Z();H9BxqC`SFdsf<&t zOJ!sK+`yhKmukf=mhn!+N>Q7?sm6&X={_oQBwr6wY@r{@a1CExr1Cq;B592iYh;`v z%&^s>3)mC%9>?$9`vS>i7#cdo5x@U#2b>}hKt?G33-Rt}Z)GXLqq-&mA+-}wv_^R6 zvEMHu^nI-Fk*lw*E#cZBCt(xjsO1)NM4ES21%kIUh%nUDR3M{AsT>VjGU4I&l`t*z z#uWzRkeag`3xa^U+}eKwAU=i8W!0chU%!q-{{P2U9+6l|{%q!Jh*C);CB3#oCIKCJ ze?;W;Q`%hCdDuFA^j86KT<{Ao{5oTXm=KLP)?bRMJ|UHw0D8$3K(VF2ByZ69@vzle z%NQy$RxVkj4gAq|?DNZkBQ%>K2pFWa&E66^oug@%LLe2k+JdGpmE+_D%dP-PrvM)} z=ftsNYWReudOj^tOn&xZ`^&zrS`1l0cjJ4ro*GVwg(-itU5MY)aot>fY~U7_k@oHk z0Vu()NewAjYr|b3ffK98DF1P7x{Of+BUBEh)80Q!mu83(@=YjKZgpw+ongtYV0C5{Cm(RS43Fu1_r1&eZ(ikAMol2kW< z^Ox^%Tm4jiwWZ>Liqx{$ylM|N@w3DL{2vjFlP9t;|aA`w0o{pv+P@N>)GQQ%>{kdwOLj zu)4t09r+^hE&CR1-g$NI661i5jL8e8MQ<|>Vlqrd2#+M3h&?wqovb~{8uXWc50Z1+ zHV0T&Jk9`>$&m~Dqp7Q#yD5VsP}~W7)ew?ZKfAIos1O?!eM++Rzl4GFEG>4) zC~w`1JU&xUyEo^+S<6`|?!%89J}fz&0B!-h#nCS_gKpBWu|mfcXKFTW_WgseD}h=& zFR^KGGv%zD23P`J^z90Wb_f%kt^VWkRpOoM?TUPHkpHmtY7L;<$V=eI2CTeZ1HTc} z^Eii)M{r5Aiw`I)eW*0EqDK$k=*LWr5Rur{wyj%BybPJL z_OsR?ihu)1uwTW%5=hwB&q?SLpPh<7%znJh?AHI@73Bh->T-m~}QZ}ZB zJ*qMNO5IrY@})9tqzW>6kL@*q*ih3vIl2qAwqEm+CH3IJ?&tTOJ>%A@PKt2>pF-)d zP+fdf_y$XMvmH$b4e~ab07p9lK7A3fhg!6XE|r$i%bj|B5X`1^Yv1Rd9=Rq#>1blm zL?sOCXcutQDIyIx-rxoS?xBEAnkUA_TLqhLboN0^cBeRo5=8huFaigI7}vz8eoqno ziB0mZ_ZgnfG$per-Os=gN3ZKWpK@lb_OTCGdBsE?yqby!h0>cUr+bUFB99z7BGU%k zD&^3Pn=U$s;L*7W$gW#2T9ty9dS zUED++HflUac{+W}vmb!ssLgkj-sFb)vRM626_g^`6>j@%5le`5^s@~ckSBBRAVv*c zVAozon8qY^I=F@_*R1q!-1yR&UN%L-BOC1AmE@;|6p-_}Gw=#eMozMeb)G52~-E#kHUlI<7`|6V zw0}D^e*p&yg~?dEc5|>!=8?O!VD4+p^7J-OKLL$7tMsZIqhz}<{7HdD_v{KnAs@0- zYKH?X$78UnSqxJs(nn^iUOuz6uxSF+g zJ3Me40wcm)P3Ns3jU3D}`Bn3*np-K$pIxf!dc%6fOHGpzv%9prxzn$QP57o+k)fju z91Wd~cXcy2JRG>Y+pbaDUV9(aFxQRp%pVk9@%_iAKMT)ZeC~hp`SY5tlmGZ{|AFPt zpEvZYUjKeU&HDF+2Lo4wxM}L^=dYy_bQ^9r`=;)|!GmRcg!B^K&Z$i}6NvGeI}RQ{ zoKp4U^ww=A7N>R-AMcNbStttXw=A{d+FSFz)>0&ZA6Fx+<#WHlj4MKP2-kw^lmul#mOB}1<_Ken}&zxBf( zFp|tq>LKE_bFa|ikeO`kvkL13Wp=x};DFvE-q5C^P@M_dqdW|nkJ?bhExrp68v8`--KgBQes(PTX9Yhj@Z zFjKv1>Y2G(VDs4qXGyHcg$R>9)NR832Ws6;OGXK{>)2dAso3r}EQT-wQ$6LvL)s(} z>^UfVypa4Ee*$gTx^ri4Pz~xBY2`+yvh_fi179;e*yGn8v&0ud%9VF=wu?Kq**v0TXuyW-;D1O# zn%!?VCum1A6e5z{#hKGTQXnO$FTVm1O|dE197eI8fA{X33C@(kPo6wsdeO*p{mEd0 z?Nq+W`Er9Nb)kb0^PEqYrjcZ!05+i3J(;)B$|$^Vq{&9Q;k!NeFb!z$FAHQOHZf1> zgXSX&j3;EMFrFfi$&wa_>O~|~x{pncHtyczn@lUGld`R%)mzth+e~XKW*oh#tQ@Ie z7NYve-2OTZ*~g|IYct~DP;a{n!%6)D(xB1Qb?EP*l$r+~W6Fmz&6sD6d625C@e0Cl zy!ZNMQFe7SJSq*`(8A1>M)+0aK_X&j1U9@+rC=XO7i*l`TC?Hhd^O+Y{wd6fEz7 zV(!EK|4ASfH!0?f?mp1Xox2YOA!z&T*)v(1Nda^{`2KeMcz^FmJ9eg{(GbQK#B>}G zS6GxXvfZEC^7Z=g2B3PMWO6x`(aD&(@oJfSap!D@+${=;VAqEy-;%wwZ{O&msl?0x zf;o&Bu_+UjZO8bOKqeWmpAfdIt5|;f_DuxYk5?nUk&tG^AmniP`)-%r#e_x+eCCpg zx@Dv2Cd4$}$%^Sv^MBi%#b*8>F_BGQ%5z$!DY8`M1Kfg72&EL3L1RARVv~YSAO+El zPk77_Z{o`B_pT;=5(;V7#65U~?|lP;euSucb2*fzGK_uNE{IC8G=co zqEfI_dqwOS!AmZgd^_7S$vQIz<}-&*tJh}#Np$f@EnQa=Md!$!V3Hv&6$C@76UD{J zD*RH3I>jwjuW7(&OKu-fug^bI9Y7%ZwVBPptX8dBWhnEb*mqUJQb<{0ZfN$ce^B=} zD<$&Kn)+&%ew*UjQZ0hhXUw3VrbRMM`6!pi<2x6JwiW`;JTS1ikFbR2j_BJb$h9W& zgO|15L@OIwmmxLmT|~e;Ba285+64RYY{!Wcy;#gkJ3HO&Uf-ts8rx zePy>ti_6#W7njjJ4cnn#!IKjqA+mxYOJD*w0aO$|WXYJRIDs#cxe^=H4>*r;(8|`- zHK<5ks1EPBfNRO2nDQ?iiU=*ni`a`fzlNu&)XTYx+&jvoI66lSBXJ1NgFB969Q!0+)qa0hiO z>qusb7lyhvM6EqbkAw5PE2q$l3tdQ-hP24f6Ky5U=-~ z+Prq8eDX4WJ=#?vzgGZ$=k_r&ctG+w* z-d>9pjW{`Eo>A7Gg9o<&cx_v604FAiMU>z+Z)iRwz-q;g57To^{znTyW;ieD@k@P% zgNxk9J%Usgn5q7EXn4hYu?b_oF(}uX>5S5A19Ka)or2ai{{{9)xw7ecO&36#Mq=yH zPCbrwu>c9IQp>-ISUiJY2MieCgQ$pv&%K4 z2cMo%Es3xz!-IJ+S={31SBJFhFy=3*!a!Kn>=vYT^`M&L<2RVk^WK4 zi0Wr6md)UwI1ldx_rf9J8_Y8-XW0zdL9vY+e7mYI?meSZbu8O6f#BYfGhej;h$Cuv zQ~jpLrrD0_|3pZ|$o0CEDB}%WRb{L+jiy}N;STl^u19C5t$0|~58sB%h2hJ_aixo? z=b0il3Sf#)w>N@wP>I7j`W>5uos*+?O(AAHDXTY(rLzbP4PE+_qU96`ciY{VA}3+0 zrwbip9sIuYuE+hSPVG%8pu&^B0VMnXJ8yw040sma_lhLcKAL#KVGB_ab!?|>wSxo$ z>a1_4|CcPwEypw~dEi_q5v1bXf9Y-e(Wze?R0H~vyotLJ{h;c6WM*QvfmPxaRKt8% z*np@DS7`Ce!@MabB|14s%EJD7iNn?#f=)|1a{kI?%gP@OEx#|7fq&KhjE)#DWxZ5Z zb~oN*YWpy^{NBTd;^p|MgQmSbv0y(P4p7b!VSC+`Bz((mFVP-u>4<(*{P zWl?%nO~e2i3IeDyiPed9vR^9~&dk%pPM_|x2t8?V3`ix?#4P^|GKZj*v%m7Dam9_w z@r14BHE3(g`1DuZ>*G54DTu0Ja23oa_6*LIe!KrSO|HB-yR zB{_e_K+k1`VuItO`_w4m#2|zz=U&r`nCE`K9(nKXyj;pR8&*LmgZkz*GFd{z%}VDf zHl>*ftH$|GCd#lCzP`!1`crr-@QtBCAGZu|%3>D|%2BJW)m<-|D%)0~QYmn_*ry>F9+vkH%rO5q9GMFFl~?|u7re*XZuQ;2Dy z1Szvy_3l9%%6ty@^5-Ch$`tcY_92QahaT0K%;smfh>Q1ez{aUu%=1-Pv}lpMgF5?|@$Hk!shaD*|JQTTC2Te5V(rTw zNY2%}hg-7TSpM9by$*{+3);r54@YBXp33WfQeUBT;ajv?=^D|dV>q9zpTd6YS>d7W zh$}r}aTR?%m)D?p69RiVaX0b*Pk1NV8)OF#)iCoitMbxNkl;opv}vDxo?PsqsYi_k z!#Z>CcVIyIRGUgEZe`++SpvzH>(;HSzrg7v*3=ToMO)#T#g%i4fgk+TvT3McwSeVm zL3f+04(~haf-t%cBLJ$7o;-OSu>C@v;Ee2-*++BhXE?YD+6#p-Yz0UDKmdtli5RIF z1?hbI0io1bK73eQx%NOR%oJ?@em-UyvOPu*%BQ}%_UKXkthgml&P%{uNe3K)6MU5y z@=wjzeMZkFmWAzZoKj-#M~k|AHXVl5i^!p6s_4{42?a*1-vx+?kNnylmuG&>>=Yz1F6?B_M zY8L>U#FVTV&j*$aIlDe^X3YQF-ts$jivF;51K-6l`Cv^I`V)!sW8}+$eQD4Fa7R%t zmTWdeU>}X>^s~VuPm~~GW~ytBN^7VnnI9JSPH;vI4Zj=vt77ybe!J~jb7D47|0qqM z%Q$$d^xeI{&Zy{baBaTn#nBvUIU#N7#+LeeC;kJCk!gEJJTN118mD(W2U35xk_F5$ z@5oUY`vj#aA?FHkDN%D zpwz4>#_>+ssfbJ^tH-auR4W;_*NXY~fYd>2kz=*w`+dm7h|e0G64!xEk6( zZNoT96^U3&A!0q2{cO04?VF7h_)ZXWr-C>_#AcPw2-#R_iL0-XEHK^d=Ig;QZUQV+ zW;0(dJUm>vgc6VGi6*M#7@490E_8`v@14&3#8Z(B>5u$fwWJ>L0}nl}C#bTF!+vT6 zkHd(G4Xb|*^2|O_Sy`DwKx^A!-MhEqhmN$+@4qnKX$7{vo*(%ICA$#{X>%Ir7R?~y zhODDlHd*ljLivpQ^_$>ZT0(y~(DEcbRrPEftRP{MdZ;uDTZ#BC?dQ`@X}ga_Bxl^a zcPv`y{sC0uZCEPn+AQ1SeCHrSV6jrH-iW|Pc-hxIfd!DD_zE_fBH&P0dX}Gxd2^?5N2|lMnS#G9T92j#F6jIKS|&N4tSq{HkFvjwq6TTB za%Z`hVej7m18q$Ek(*hWR3hlr|b#Jej>kFtx@(M|Q#f0w%_dygjZ7US>KwWOs- z`4H>dM$8aH(L`_jMC$nqWu63s;ybg9%@Ryi;v%~l9f~Uo?BQFv_Z}0DMd^9fSB5|D z*;AQt(0Y~%R+VfO$Kn&crZ2&L4(Da&Ab{n}obckVQfhFTp+r9BH zKSuTOX2X{h7mP1iktkW-{4=YDHPBsXLq0#_PQq-Y%_F~Ug!@v7`ug$$d`k<{?i3Up z>YsjopJTgekG3FylNpJBRq`Svh#`S$##pSYX)ef?NoDvX!#k*LTkw^KY{CsJdVqpM zcw8{RZ+Bo4>kVY4YuXVdAn1)v8A6>ua;;i|XhE#lJnCm2#sFPtZPV4AmFP*E9w0y{FT-<79hf-0$ZG_6AOPc zZ2*RqXqhwInu%6p?U`EP#I5*I6E0PYCj`%uvZ9d?1dADm|N3hS9sf)!)rCi8%pAOn zS^PvEhl`gy3a;7gNi@%LK&1VuTcxgIKTvWbxB8AF;}s)8a0GWV5Q_KirMwskmD+YC zzR_gA%(E5a%j;;INWca~^=b(nw9L=_{dWatlCp;Mmvmsu$o&fGOQV>U;ykL9ag7WA z<>QO&>FAi$_qhqWDf6$-*18>R(P|lDTDHgI-h;uH8b9CaD-*?=|2RKY_s13-G@NL= z^b6j=1xIw&(P=?nk!FbeNkr_&HL(91LVEbc&l4Q=57WSxvq%dLlU&6X&4J0;_!7%NU4Z)j15uR0EfZ%zPy&LZ?kR3}0J#7_Zmu+I8GM z3qvA}@BYzt!NuQ<7&rN8j!Hk*mWi!%t28DF?9zO+!Pe8QR)efovHIQhV5M{Bx0DPOyy6xVvg4GM3B!*aNO*%)a%=*6jYTp+Dk>t-t8> zePdCQgMes1YKhdBR_)I(67DqrpB}#IpE?O4Jo6nuCklFlofP_`jyB85w>6T&fwonQ z44>VUCn@cZ3RX|PKpyxGF(&V`g)sWa5MmahO;2mQko$+O-QDIEtrC3hocwscaI?m^QEfZK0 zt-SWmK2y%QJN~}t{!tHo+qqkv9*0x299cnZaTX2S)m$v|Nq&n-SRc zEB!{SrpVsPfy`vgBI@Q(*PVA7_g~4Xs5%{8U^*CJY$YNe8HXgHbe1w9_OvO<3mqsmbLk%v z`td`rA>2LVBh zj5TqYp1(P&U&6k9>Qu`ilQ{_hZLD8(0cR6PKLYo;OG*Gx2Z=03W@b*ww;I`q&~d5D zv`^n{xrP`~JLFty$j>iDpy)k5K+H{4a+V2v=(x?g&mAuwXDDKIih?ri3zv%N`CE95 zF;R#Lq6=~%nRRq81Tc?yH@^qs!3VoI9NY!4oGW2&7L4{XB4M7b2T_vY1RMK~Qp}x} zN%SO0+vgbs#253>14KS~pPiMJCYid8Uwuo)`+FbbeG7XMEDiy)kOxS$fNqQa zjQ-Ml*aRLMcju^&A6~r*V?~a#dV`ch!xyqcmBo)#Aj07!*Dhw4z^kjOA1MM;*;w%p zT_{3>m!gQoG+!|P-^Y&+fs>l4(|EN5rwb{emd@sS`fshH-Iyou>F*d=eqdH`nENDA zOvlq2P%7z675EtOsj;TSHlg?QO7nrVMd6`zliyd);=J5|BHD%THhc$q49WlKbqnm+ z+qYYMAC%9~Kidm*?ea|&t`KmBxZU~SJjTXTj%#aU+!BKK-jfi;nrd~T6fZmk`2l4M zhL_9UMK2E2Z)P2Lfg7#1LdL>?ft(V{oP_e0@_YPI4yNBejLcJcucLFyib29 z)<(wLOxPfnfLfgwm7)SnOAGhj%A=89y8m+ap=8c`RRd?0NoB5MvbBQ(KIaP8QKL-y z_5I+2^rShqs<)P%#K4l2eP|1^@tH0UDNTO==h{KF<@P>lNKJfLQGQ23-ES^Srn2Ct z9WkQankzzctp_IR8O+4%g|_jR8_7FehHQd^gj^q~SxQhH4Wagg1T=n};6Ib_j_f{$ zga{EkX?oW+Dk(O_6G$WU`TsI`_J8KlHn$5RBH4Cv7LEcfMrwA4by#)x*Y?k;PJ4#8 z>w34P0kI6=**AV4%{_d#^a`XO$x?t2*8;qOT&`DRx{d(;5({RB+XPQ=z@9KAZ2j=- z_@p0U-!mU66X-=3Zynb7`PFFf2yNAauyZ1ROdzNbB+e{95s46Y?0LzNLCt?er5MY2UA`Wxe*7{OhYLXDTH+Zy(&enpHoc z27EK0q1GIh_h#_9Rn^oERoD@giHX^3mT9;7YGt2EKj*`g*muy?#b^?p^dA76WMxby zYJcx90hGWwJQVNIl2yoq7qOw4S!mKV#z{I}@_n(j=*my60z?vwPyJn(*B3H0&i@8Y|FXIg%MDSzGWbi`h5}DoVvA|$ zQK*&b-APHPfB*M$i$AZj2GoojNN&wy^<;G)HmnO68~^ylWSZptO{`V(*Ub4k|I?Lg zOvsttL2B-@+BTf-~d!Ab+U2heka#d6-@55NT zTz?n<7c3AU+G?OqnY;2@1j%{eBx;`A1&0tLop}MGYVCiwlS&izu3$tpPA%f+s_Hb5 z0u%$ik^W%J+%wTxk3kxsf8B4CGC&9#L*X-UX|MIvurgE!`Bc^U{fUVQ=djbybc_2;U zIeU`yn(JI}YD0)~k3olD99;SGrE9GjEv5-G(^e5WyB36Q_M%iq4=bl@&9+Ncna+$8 z;E;en88Qp5@)FP3fC1EMrpCs>4v`9R$z!ceOq9@Uov>_g@1QxDu7K`8*(p{ekOsLh zVKR=*+!8%6%A$&XJWvgcClv{_*+ZL{h!(7Gz2RSNu6mJm+0-xl-C!hxQEhB_CXAR`mk&!ZsQs%mAzxgWDwQx}_ z{4k9w8TWv>Z4r;{ZsA9|!DMd56CS&u*#IupnutGaL01x$-4jAg22Dq0u^r5HCB|sP zohQ`mqNoP$^RXhdsNmQVUU@jpo~Rledl?xia4udpO`bCCzK8l?j;NfZ2gGa2_pG#K zP8KkPLnl4{^Xo&jMlv87=xj;2ug48e$ zrZqHu>HOl;Ce?mid-Tqa8lV2XWCjGrii-~*3WeV--N(lS@0*mYEU4eB&R|a67nhRW z4-}_Hh!b~dtL6em9ggwkHsgZLZ!=-y#Fw=?%iO|r^ujKqLv=vz=(WH4<;zeiNu`5? zx4B52%opFfRO07HASm-bj^(8j!?D$|Ir)jy_j7C9J)vA(kkOCUyx=0|hF5w@9t>5K z>-{Q+{3#_ zGy-jXe4g^MjGpF+x^cU1hKV%9W_CKlxG>Gc{xKsdz_PxS(SDfl20@?#Y&qGdHuLp_ zABL&$Gq`j6cKxsy4V=1ui9oMrb#)tCaAIc4keZnK37($lK|@6#FqDUE>9IprE(>|z z-80PV%^Zf5xvZ(7XL8HW+-jvL}o?@=oe7{T2?w7DxDUNOmTHGWUGl?KB!S-5)zzrQ5Bi~1A$q#Hc=6jw>EuPo^GAyP zt&6CkH&HrC0^r3U@cR5equT~5PUQ9_cBNqM8i^e9@R*r?ZuzTGqKy4o!q_76zbpJT zNN}!o#q9Fo*_F+pL#a!9WZ4DFWfc_YkvqZJ8#h3ZzkpV%^yR_SmXZCUz&IQdO7FUZ7)ug3M4J- zM3n_>ZJi~jAg?5Jdmf0-W9KKN7D$aTW>E6$NeY{Y7zWUEMBMUA0mmXiw@LkU<2>%~ zpX}x3YCD?h(g;XYaUr8Pa0CW6aWI#IQ|-0ktU>2)M9_jmHE#t{b7aF8uD_^;Z$BDV zKVhO&of(S(yi56k@4`iu!EF-@7kYW=LuR=3F*7$e20KG?_VpW(5c!kc(HOPXWfdx_u0iBiLB)?Kou5jEu`++ghqiVWxJXh0o zR+M?#-wa(y!iq(q`(cRvetKd-1*gxGhKrEQtsYP z9K2_(0XX)Ye%l--UktFU$srq4qRHgFo*P1l1C8;J93XW8#9qR|9JEmDc6G3wo!NX% z2K;z^naZ?!MfH$eyf2Pa$jcmf`>d2TAE+BF&K#OPaxw>I52Jk zd=x+cH|wjJ^sq99+I&A_KsM6~e@OzeETel6Faj#&ihauh}5)m@8tGcccR1N?oP0W%#Dxx6;vKF^8exM z&BJ=$+xFk2QiS@JBrHlLWGKTjWGF?EgtRiJs1QQtOlgiJp-d%&C6r>Zgl4f&88bJS zTBZ^T(SBX`{XEA$etRE#KY!fM8m#($KcDye8qV{)&g)Ga?s@JW9yYQCCXl+RucB4! z)}hRs5voex86^j+f6c>q0)<|H>Gnl>^WNibU0+{c?92%gA+qo3dPSB)!(V>>1U}kT z!9nU9Sv2W^$p77_cPRK}E4$?lVGdyvS~E1%mrz}pd@0~tOvf9j$OM-e+k#!fqQE22 zYh|*ctb^FpuX)e?kcCa+Q(%QcZZjuI#|q}{AezW`qmlW`*0_;h#1M4t*DVFkv3=FA zeqjJSe0yTx1ntBxlp{zRMq{PZG1b0f#MmBLTin%Uza1p`;D1oTiqJ*n2;guCbzhSs z_TwU2STSWTvVhkPTXwWDABk#k?jV(DZSN7WD+LjT9hyFkEsV;4WbZTW+EwR?3`%5bbWD9rNtEHdk|#&*&m^vCvpaVgQPoC z`V0_$7*;d^La`GM$Faw6BsV&`BkOoatvI695D5l9xhN=h!;7$2*~}4CGZ)YRCiCII z8j~|arJw*4GMU8-hD#WoSc@T*cu!x_iEo$I+T+uUKDOu4E~#{OiZhbHUQ$t9y_HRI1^-^hu%j~ zYKMk4d%u+kEj@l$qh#IXYEGPofC6GpxXWrvVrpfLiTn5M(?Yv;FASQ3ye>0mO?
-ZJdkXXt7TPE)&iE~vpk)|}$oAR01qjeY`Xor54Vd1OjrZu-n zzGLIzfJo6EbAeQP@y$k3$s9W;Mk&JiWX}MP5%Y*ZU5hsMhvOg}WTGk11zIF(`3>|> zJRx+Gk;KKNR{(umg3_qD$~yKZ7nV7EhW+Y-<6;qwz{mlAoI88=AYjm36$*i)HX(&! zTxUiC_kSJNFZ(hcZ@&(a{Hc~ebLZ-M^ae5r2Aa|K-PUo)u5OKD+zPY+tf=+yZ`$ib z4n~8fOovfHy?iRg8c8swA1bF{UL)f&=T~@)IZzmZ7@)OuyxHjOc013QaWnB8oDiqz z&^RGD`rs@P=uAyKkcIF*GMUJdhf-tS4BJ^Xf#*tkAr9;Y0>c&KsRbED;hS1aUTJ@iFWV~Z^R0qR2*)B@@)oR9O((PCiEnljNLPgzDUvug3=NX5Aw4B7Th z3CN3v(gRciVzobZlKr3l2g-XkU!HIN3TiF9s;vZMJW=O6^m`RHI9E~!>uSF&ry?AU z!$QlQyf2~KTBNfq21f1E#K_)F^oa?6!1LD!hQ)|;`rxaaVe#y_J=*4n^Yk!YX$$(2 zho$DGTVzF7 z?~}*UbmwXkOC;JJSwtaf;SvOLwS zYAyXHq5Ac*$a${HsiG#soXWf%iq;VN_>WsD31pexl{$7{v9NmYZFUE-X4b}8hB?LW zW2-tTD z>qjoc_L!KQwJuzWFYl(0pNeiXdI~onqq?!zQ>{&pcuwbcgM-;3k}Z9s=+I#cPh3$i za0xpS+!>f#i@r5^+0E6~aSl+{3A&3xJQiZeaBt83nK4E_^)$T*_DdbVZ?@8Mo zN?T0W)#aTX$s<1}e;rqF?y_+Z-*^G8o=L1ON9#rG|D?%h7&&$7o;`Uvvm^T+2al&E z+tqm}37X9PS?nXSuVDd;p` zu+g>}bS_3!pmLmPqnKTO^e@k}f_m2YaL~TQ)Nm4)%nduVp(<9co3;L%DiDk{$`f@P z`r+1~#|jU6$E)C38Ut0|C=%wO$Bw)xiiOT2CW9fEp;k~Mbq zV@gvZsrosg=Rbe;KlOv8BvV4`Lx|#?EiANoYMB>hIERHm4nIIBwjgUQSxki9YxxIk zo$Q$c8(H;7u-92PZwp7;C+YN7MBTJkE})bYU&&XP2h5iB1B^w*{^BK;p<~$NNgF6o zs;7pzX&`sl6T~K_p9TdRdhBoMW-YafT-X_TK!>Y}@qRAg0sq(7*UQCrJK4n!l)lL> z)OzSuqq!#rS2$ivG9ICb;BRdT90OMV_E4V{Wx(Enw`qTw99m*}KD-xQo z7(Zi1M^pzmc*O2r9Y(6#6CK8LX%V*Bxh7;jO>eru;v+hhJ-{v#!} z_^GKQ6I4_bFnTDUf4f4!e5+N}{~1_4##p~p2~Fq#o|XWKu!O1|53+!usKqMR2eM=j zF_ELzzaKtCp;#YWjpc8+X)Ga|y;dO`C!{`8_QTcXKl(dVIjfRUKMx9V>Nk`Gm}>t6 za7B>apkDuO%AZ@0Z?s6s>J|F>VIW8)sW6QgO6+?(5oIbrn5I6Y^N2ntDhA>1QrZTr zD4bG{A$JdEPUUz5X^9=nV1MXA^ba++SyQ7&*Pb%v-h&6;S?Nq{)8=-de%GCaqbSjH zlkLhAgipHXb~if0_5oLP@MkY<6p#Om+G}#4Y@6Tu9N8}fbFY6FAPCN3-_+9wfp%?1 zS^M;ea39g{$hYs`Z%?@oP6ilvwv-oE8M_btV>T@sx6{9n8tb509dbaan8bM?(F9np zhigi7+!o#T?Z}zOt;dXsaFR+G z?d%{eb_NWyF0fffSL!zsnsHO(jiJX*D>{C! z7>ZR@fo+x_WF9LQ(h8n%*vVV3FU1d8*^4Si z!`HTBocl0ZS6m+KT&V$6s1l#FU(zhNl1OGp=Oz;I13;FmaHPzZO;e!sRax{<#u-a- zD{%%xxPKh$Fpve-2_y_}JZcaiGpDZ zKOnA@XP!ymlp*^oMG$yiUdeg(!Y-ZJae?AeO256nBB~<>PRy4{GoEx7wY@_GkL#cq zkgX#Z%Fs-oBZO_Fac6P}Xb9{c*{-cSO^(vEEO4df5(H-CvgRxsa0 zv4Eyt4Cat|n|MNM&f)L1_p#8y&uSj!xY@7XAGTujKo%QqW6tq{U1^wtm$pbPM|fbD z#?SraD`^M)>0Q-zJZw0#C1`C1{1hb#HIVl!S=>vKHErLXz3}Aq7=P`X!8yGn%g*%e zChBRmhm>;B+qcgy!<2pek>;D;((|j}b=+OACo8TwY`z5u*k9MzQR9`vhUHlx49wy- zm3D5^sna&TseZe5+6y2(I#U~LVB5q5>V!&?Mtu7KMA|KvWO@@~3g6B#hGuA+Lux+4_SYsa&5X=#95 zhHY`fc1v3Jjk`>9==u2smb{n-1V)3u2F6p1(+4iuH+TO0?&8EjEgaq&aE%z+_op5f zzl2CudPTqLL>y#T>qdqce2C^^MP~R&W6jjKe+_S+XK)NJnDS(b8AB9jA>iRH;hg$e zaXEz1L(kta6x>0SJQ_`5p;iSdQxXuaZ!e*=DdX8cfxa2Yqqs9qeMI*?Js{XjI?N_- zUA0+XU)=0xB^S9M@MAL8ZKLDH<pc~#u|=z-)L_Y-DbNt!d0dY zTlPv*tWL5m6PV;r#8|Hr(Oh#W;HQ(xX7#y*;!o0+OcXafKdw~E;o~ZOx z7TBD%)AVQuZuY(IV{gOOU6XUZyeVmV<>%M-7r(0Rc*yh+Wz*7={H`sOscJz$MFsf2 z7ZxBYF@z{JE`+yB!;Wdi0QSJOkpJ+X&18xAc@E>phoXytDRa~^pLoK<_^6vd&}XNaed=9K%_g)G7ICl-@J#kMWN!uZYD^)l zYBrBN{@ys&TMz?RUzQ61%gy6#`2WHIKnGahDyDNSez0l|bU!-j!JRvs0od8~7vGCX z=`6tM<8O}=)TkQ*UHF8QZ+&`RUjcqw$pEKvm5i>^Gpal$U3o>Mpg`<2EFe1iu2z>ps_u%!ak?ii;`n;^Zfp^% zHC3b}_jN-;Lvaw%1BC9UNyG-;Z1!NPrk>XF&L9tzD`!>AM49|>I!m&R4MTd**%UNj zKXO}KA1`1$C9PCt`g8V3PmM`ZDT9F}%X>Dzidqd3gQjyzN{YBEjd>hqskWpP`x5|| ztFgDh;iY}Pq^stnZjQTe7dGeBW)c^KUFQN>=3f+o5>sK-qvA4nXq1KOKpgnNrX998 z(iv8C6b~qb5Cx{PEk-&h%PvU+#_BhaR*ftz71VG?%C)|XsJH-aWl}JkL!mF!z+h5~ zE|a~Ee}N!NOh42eK1G)Q8SG@%8qsviloN`-59X|9Fi%eucS>%s8?ss!=%F~idG@^W z4m=A#&iSca8WxSL<^KMu?5~cCi}O1z_g5OrI5L(&Oa~LgZ(ksN?hk?# z%X#qN4-d5lO2{eCFX9#_BROxmWP=AdhJAZT-8uK)TIhTp-Zg5QFC;q`~Hi--wLj5w7 zX9CqMiSjt?`8w+%g)>l2zfVc~M#UZr3o)25KI^j}d@%VsZQu&5c8D7vhhp9jSgs}%IR}mcXS0OM$>zX3>O0?a8ZBDn zQU||D5ZgIZgPN0Ed!LZlQ9sUZ}c*ZH?>Ko8RM&}s@_Z=#a4ny z%nB5G44fU>b)3rwk7p7x6V8<-wvpZg;W6PSSk+tEPPfYeReR>+kG#18p$m&AHeI{S z|05!}y6yGT^~9sm`2&9cUB@Ntb{bTRxH+3|Qq&Ehy9^4173TX*QLBU7L($eZ?!@WS zSAhYlh6H77Prak8t5NoZ4;_BD9!RE-2sX5g?Au&nZ)1OvNKj|gI!R5736*ckZzLhv zMp@mT7Rmqpqn&R(e0;({a+h>u~;_x>jR z9jt9Q0Vi-wlMi-l*7Nv$BP?0-0kBUIaLwQ=G+GuW%IAB-VA4}Qc}y*viSh4iwoEr6 z`x}pgaM>hcJh-SvZQrABTH%D;+3YDgCRrSd;{X2N?ez7_NGRt0Xt#$+`Wt-?DK`oL zt+H-7q5>ziC?>Bm$r{qJ+GFO)V=bz19$~LQFN-g7U;9{E8aWf@H$jQC#Xnskjn1>TlPxa#L5t{5;KvS3$S)Qh)oTL^fTh>S;s=o88)WQ@HELW6>Y zEGK==2}c;AK9FBl6z-U$H<4iooPUg>;}Mhr2xyEUyaqxG)I5(=_OI!Qlh5Q6t`q)n z;-b@+8!9p5Yy<0fIpAB4lZ)Z8=Ytd-nRS1gK+>AMXwj{#9vIGD*J2kZKz0@dzy&86 zs-WU+H)H(X0WAT?j^@5gse6pl3D92$<8YDIOE7m7R#-n9g)MzJaQ-VS5eJWRZ9|SNXiih&IyOUoxO13%?ATn zhb_!(az0cWRqpmFq~YeGHFVrY(BuPNy(H|CxP_+t_*}J_XI0KjioZF{;I8w{0G>q* zu0*-wjU38BQ|lU_?o6o3EG7jz6DxL)+s_|LiEo=QrJtvIG=@;!53W#Bg3J->bW@nD zQs`Z>ss>xfGpBi~Lhi05IxUIWL8{$*+;-zqkWltD+>Ro>w`b=u+R5@uVazTNRX^_0 zKi66RqHu3mTz?)f$hf29i%XPQ9bkg&+oT!S(6ycSxmpr6sjc*WMJi>Lx)Z{MuTjqzZ20FoKTH@{Fub5jl+g92TI-=JWRPvWD!~ zTyBqetVl2KatwSa*)iV8`T|WI!0t}90jS()B43(h!Y)D$Rk#~T&)oAf|wansCc%M{&r@A8m zyZtsNT+l4;onrz;q3opJ(vR5F5@GqNTyO;O)IMh$J5OC}Ev+VVPc=Vn@1DX0?@TbR zY_l<%++y|mM*L>Tcf}TI2XRFk{`FMD{O$|2MXx*6|8hU|Pk&F}k9DVtyHCJx)Uql-mM~Rv44eKB-!#kruvBj=AW$96IUqoI%x@sF8rl<#K zVMrTHmk3WI<6-`vsLFD=bsE*s#qKbikALsq-;ToIL>G@j=hBVf7%0=)ajw6}YNhYZ z#eEaPw!a8UNi!h(DFNZ(Gs{1Ne@5@ycVo^?Amp(_7-rl=yEYQX{<=v3Z-4^(`pG~n z8oh4@`O({swVLMUCS+!IKiFQZ5@d?5$6$Vsh|{WpN%hb}Rdq#2mw5d6HRA!`&pWdy zwwGxK>)$7y%Ch^3$Zif}*a7zR>GMsYCD zUWFG@s4w@2I;DJY}#6{?o zv6r|a8=M^?oR4Uo6TnW&OvM=TOBlP{alNCkO5Zc_4LB2ShoUBG*QvBn|DO2xEHQfXio9Q@# zRX~pE>GVJ2ax61nlViD&6;$q@(}ng~x$^fSDH9>)lysIcO;L}b+@a+pFTLMo{Ux#h zJZ;kbj7^WzpxsD8=GYf5 z(|D`ZH#IK~I>XmPPhEZvn2zc9Z-=&@;UJwA#ndp0!eP zG{tTsg8tAxB_5a67yB<0Oy@qOr%@v&iGk{{dyL4@apJu=xwqMS)gzY@Gup;}4ZJ9Ljk*EiP#P@8$qD8$Z{Pg|z?A{>|o7nS}-m?RjCOfSscvBci9kU4n~$u!<&jq@xTIZF_Ysy8);tcm|L9J%I3O# zM#spA2uQRL8Y$B@l;l@o_`Vs)o+LTg*Pg&yvvR=v;BpAj(FQlgpMeoNQ}v{tUChjR zQZfJ$EKBm>u6%c5`Ra+Ooa}U7U1F2qth8GSKeSMowtq zgT3@sWb~ZJgTIEvHRG>#;~&~^gBYOmOOoW)385Y{pc4b zR`d_QciQ8;MgRUg!%KnlWSHhwTJdFF-OX?~J9SO7MPnIRN zrh7ZjYMIBU8OK-Tx=<2{=l2dTiw2{2fQITQ2^?b*hOQ0aGvDcCVrq))uC?q>-DIs| zwOwDbAtp2`ErDQU454FdoP9Zu`kF{JMS=iMCktSK&&A0%@;^c;sVbj6q%>3iyuDX|Vzx zQg|Xax{JW2Tj92HL@v$9dFCMsII8nURkEiMh4Fz+#4E@R@dbJ`ftrJaA3h7;j^~Zr z58`Orf5$$NXh0R39BYZmH;`c5z}0Kk7}2%X_GB5Hgs)Yn_{?sX0|qhGSe-ygJwp1R%Ga0UNppWcdP%QkbKC`(qOn0UnqhiThe zxw#`fPM@czWBj^MW3lK-7!AjYY}6poOOyifqmG|Exs~lA(S6eZIRf2lmId}OI&v;D9hQSFREe#2fkz2dqn_NBqs^r@FJe*@&QxYjVKs=OCYV~o!%L5`VB z-q{`4*2|vHERuUXMaH>{ra;yWOnL<#wd2^WZasQYz2oSGCZ}wccQ1%qG zI@NP(hB_3c*K!V_9<^#edqg3;6u;XSGBR{wBvNdJP?*BFa%W}{aKW)tvRL=spSmW* zeIcPxM$)kh^I0IqiwFwbX6&W&Y5N;XGuslmHj&q|ACYI(=d%gB?Csk~4sT4uB1R^m z@?u(lc>A`;K_yDqO#r;r>m!y4tv@RljCGJzTZJMJIM z`D~U=^2Gi@$Ej4M%_|zWaomJKkz2L|K^BOX*_ACG=CZRrosDIi(d!MG6`0%OgBUZB zlCHfzn~{-+;OtGbYG*Btmvlhl9scX6eU#moxm*|G9wnRViF9~WD%`T-uvBmDFfyOy zE*qFXYBZ3KY-;g`{k8WlP;WWpt0eIhE@Ez&0j_1eY&;QdC zJn5nucWe}SnTF7AT&e875*%_o2k=Fi8#OsNX#B>jd3l|Atx&$Yl|?sh1Tlpw|GZCF z#GdL-JkmIN@*c1MJGhs0?jBEQD9&V$W6gs;t>?CvLVY3#~ z!}G>^+$P){F1%d-rfQO1+az!Ax+|*R(EVuuM<`F+-l4f4E5@e_EwE&ki$*@>VKWYw zej`HNaLW9e*ZE&hL-q72NA8@<(TN+#!nM}V18Q4__Qm8Z4bKBTvplbH<+ap>!n5Sf zXUa>~Vd)$v*@?dPX&{Y*2Yld`4dBVaz}CqB<9s56O_W0e*mpMhGl-1r1*dE@&*Mu} z@Ep!gT+ju*U%PtGYK2Tp6d=Rlxs$r=#Xbkt$m`KSAr&e`T+M+Ja2gX&9^>3^Yvr#&G*S~@7Wc9$&RNfq0plb5B zVP+^ZT#ms5{m}ZV)e9|~6Qq$Dz$DqhN#BqJT9Cg2*pi;9UvV%}b1Wt;)S`!%QS9$L z?<EQ7h)5ag`xkdkwp``95_J`UA3gSa zjqkSaK;l=fjBb9LQL`W!(dC^}mHHEg!K8JCpWgjPI%# z{w!V&#xrw?kENsbV8tdKOjx`DQ9$^u^T#8++5v@yDIt<=P8tJv_-~$u0;Kk@(VZn9E$ygu~4k(mOTPDIzQEVU(YiEH%M#B zWeo%bPMiGe@!~79u`~8vPJ>Ax4ZK`Nra{4H@z{xHOj{*9)#DnHG{9ujpMpG~tmrb~ zf-sXkhM@!yil^!MflZs3k)5_sR6o0dm<7F%pVn-$42X2u1vEE^FuMe~$irupEWlBu6hqAP&D z8+FmVS1+$%+xiT=E1+;Ks}U`>1w~YRj^enF(fs@Tc^dLi-DCt{mB3Cas{jU3%6Kd_ zi|JyRU&q)T7@{G7zXm2y+Nfb3&SRWXmfh*6vbAEixA$njmpo~J-|C_2*P~O{;j|#D zBvm6}0X=atRCI64r&8GnT`rld$3)sqNeyBlWxSk>AX}-FextV*2Wz839(pNpmwJVLP=tJKY=c!t#pNIL$_Th!DsIVUMqTK6T_fvw5#o(Ivv z_0%60FmTomE}rBF<5y5sFb0Wv%Asg&Eq){M32~nBl)sQ&oOk;edX_FrBJ-37N}^e{W8f{8}6X>5}WD#Q z#s4=1@V~e~W3xn;jOQS*7OV&dPw!jEOOSy%fp(j2C6saYuW4)v3llZe>IuwBNK^b_ zBAO@0EdOcJx`|6()A;{dcMS3o<9N87WlFYe?OUp9q=cqn*;(;wp=K0k&QBr0=VHre zuh~-Yk<9JlM~10j&k-oX1nN0idhecz;Uuwc0?v^z;ihizaz{cV(|!k&=0@ zJ@sq25A33~9{u+HdwSPJ(PrzJFST0V5H|`(zAa=_<;#Fc+Hr*QD`*azGVdwPjK;|v z>clfD&u`z(nS2<`M`81my@vSr%O2P_KCGOU9nNaL)_b*jwl6Wz%fuDfG~&y!wO=(U zZ)6)3aXL?LCek1-xyC6zqWx!F^EdOym;yIX^L$Jc-gJ$Yi*S~CNXESQy$SEp>`Av9 zlcW0k8oGekIn43rLsQJlv*|L?uU^HT1;Kz_)b~?tj2tIV-Z^*^wN1lQkChiLUmj*+ z736a^(fWTjjWEj+tAAf7QbjPE`SZt2cuxoJAukP9qSWp~bnYA%@YeiYaJjt! zuCta+!vZ1yU~t22#xXQHD>rSroqgnFpJ7Ef=z*&BJxHMx?e^mhoUOF9Uzt0`S!8te zFz`XyA(%Lzd-pLm6(&7;v;cN|v0s+QGL%IDJIAKmiN@>gELTGp6$d@ zf~2~z`U}hSU6`hZr`&E+ClJ0tp3R-R>;6!nffs#!_Tr z{Nz4l17x@)18Y09*s@bJUEUclrcpM;7GokgKdS6ml*Sx%=$5qx^Kt-^dSApfQt1A>GQxNut4@ZE({xvNmxfmy zFjoSoC?mu4wl*3(c&be@3Uf`+YsXBPo8t26*{Q^7D@{VJ&R0@74oHIz@TEJu9U?6P z>iwjQi;boQ73F%-81gJu9hseM`nv6tW$g^#y;+Prs1@?4Pn+K&yFyFVeH4PvZ*_t) zll6euG&t89PG?YV?iiYO-No;0(f%Q!xS;9vm2nx!joY7g;jTm6Er{T%cNsV^E}#gd zM!4aT|8gYaWzn}8tlW7#^E{t%kIWV`yRsEL627LHl~@p=#{Cq9ny-TD=I+f~*REyr zsntCI(na2)CuSuWVrF^w-c|c~`i$>4e}7)a1@*SlZdznh;8Zu$oh{(292k zL$Mi#|M<*`XJPgINa7$i7Q<#>P6mH=xb4#`8#;kN%8C6|OIf1r?}#%GRYKRcEQZ!X z0Gb!1q2spG3JnEgE!V~YPIdwt&F^-Fp_cI3@t`rEy26pkY7ee~!IQ%dJ5t#eqT$KS ze6-t}XX2W`_R>b`ZTTPau6JAV1uim=ViXf(VQIFWP@!Rw{zf-!Q ztuY$myLjECH#Tfut0dMukC6|rIu!_79!qgM)7CEwrzXuTtfcjmlj;FqNO$d zLi+W(SF3QrM^#kJRT5HDhGmr?_4y8BxahlNGoIvo`eqNE&d z+`y6^_GN*syL<>Nv&NZBR*VPz#Lm(k#94r>>=>wG#>zXVZ8+cc;X1)C|Iu+hHC==A zYw!h&y@C4xU^nxWh249|x|Mbm;!pls#I4BQahYW`r)FM?0MOgM+w5yeWOp&TC7{n+ zE6^`dROdKIqbGYK`>mL;T_@@WC>`>U2F4Ty%Q~As#q$Vd&$S7{D`B9$k%B$2m;exX zl{QbX{(PI3w$0Q{5o-k$c{P}@=u%KPi`Qiui zqeveZ&4Mi9jG9R9~p#V8)%r>;l4n-x(;gDYaqjUp42&Cy;+vcv#z=wsyYLJmW~ zbP2m>KZpKg|p1aX+7qYvPZg$v3Jg6~} z%ej3!=3W#}dj!&^m8I0jHXD|M-4oo)*U94BwwozFbAY!XSXGWF-whiRPq$6Gb9I?` zDsYmn{jq`R_M-v$!wCe!Jz~4&r5|TegO^vb?Zn%6@7fOY2F##E zb=!(yNAw`y-a#BzvtLkoKk7wNyFN(NtNml`?MGm3&_a{NR9T?l4>w`!wTS}5`V}30 z2|aV5@U5WXjvrnkffIgBZw_@GlJ;nuE6;D_)gq-eF?YD z|N0JKU#Z*-t{yI4pI7Gi6`2w48#nUa?Vhzh8zpX>Us#=~B827M()3xl9b3jexY&FB z)5@%$%sY6~Tu&Lfb#u179qi3c?ofsa`}+XHL?WqFl(M))512~v&rnoFHX}R%f{ALtbA}~GKzXk&g730}kxH{;3`Df= z-(MoyV&@ZsUI4-p0r8fF5T{ zM-;_PRUCnB`Sf4kp1UFn|FN*^lzLm;fbz;t2tH zG#PBiE;yWAlrG;dNzCN_tIrppj~U-(&DGVTofl&OA^sE>z0fxlAXqEM!K9Ins_)x$ z>_^y~b!+|&kpW?xO~Rd+(=KAb3GwV2H*Qc*;IDC6MupXk^-?f3?HvX5%bkf^5`OoK^w1GQ8+;LU-4CiGDGQ{;3Or4ikRo?JTyQ9+ zg|4w!5qGzE19=ey$lzCKujzzwO#JXJd0T_~*)p0ywlF!kulniA=V?0ySeL=@p3i`8 zvf#R-yfN=^W6$D=<%Hg&n{kitdv5XbC=6q3EJWaBj2+0pig)4(&Bm+iw%3^}HMDuVX^tO5t9eYxf@ zG)^2IHBzACM6q^(jvm}d){||EJ6{o)DJAcWl5Lm2{dUXs1PK)N^S~QS^oZ+BEyKe3(0ywrR8N&$3AEjb7It}ZA{m4fux$o zsy%*$d^VA%wwahxru*=uPj=X~xh(2J|5h_BtWAcI7WH=pAt^JLK|KFwsF(rKf1||7 zY3U+q^W+FzFc^rKKFEAn(aoFUWTN4pAA3qEjG*h|>pSPK(G*%Vw~qRBy_*?rHVqX6 zc)s4p-5g%#v7|tCHMzRAeWM%~WIpwi(LrIs zXh+;$-nwxk4`ZloC(vn4GW?q>yjtV#3x+pTH|4KNpS_RQ4b%U|aTVRv=V01e9OBA< zN&`&s*RDP70Q$+IQX1cuB7<4Eq4wtQV*Pk=9Hk!$3QsG58yjRw?@OU4`1pq%QN_D6 z9rPbh)8)=o;#ZbIi{TR(U25Zv?7+zhi80LGH*;17rlR^D_787!Z1GJo0^;~`^8TIe=>~*GZ>Yn8P~l7C~88sw-{E zHb5gfP9!MxS^lq!YnlkgcSX}V9o;(%}|XFOvL5x?%yCw;#nyRtJ35o zQ8sL1vNzbGH&Q6zc=O*h%FLU$qTnCM@@t*N7Vf4YcTrE7r^VXB`%<{SOLTQ{>1=5k z)4n(5`2+_10(Qj36>i;`(;Ep_JHUlX_vMsnRXaW0=A)0w7;^CM+dIB5htm2r?%>~s zYFW0UN7TF579`~SbK|Nlq=sv4U*KU#JaoYI8-4xKHw82%Qg?$tQ-Gukyx z8R(jbPE@qm8@s}9Vo>DnOI55`aEW<=AfexO+dysLKz8>esud%+YpvgYbMj`y!!+$< zCL#$6Oe--#tVGNHVmRdmYYFzZ79vj8{ak_YYD(yxbK)Icgour=eI)6ESyd!%E`B+f zy_p@)_&kohHxtmafa0O*E=N|jf~fgh-`0C6R%*hO`d)*vR7X_qyByPw?2?l0BbCK} zm4nbN_9;WDI0NZD$bsZVhyt_%&~{5YAk(0Yu-no)rz zBOEo`eYv~rEu$tMWpw-HqP&l~hc}eFsW>a%rwVwC(WLLXFDmOG#{Xv+tjQcYIfX)@ z{ti!r>IbtnY~OGn%uonLDv}qL>%V961eNt$;Nv4j*d{-QNM)dfD`~GS(*(kjJ)Q=D3KRQHZ0SAu6Ne8YQ~nJ(-^&XW zH@I@rO2}j`WvlOB;X&h79?N_JV5sI6Y6JIaQp3%7=EUHQ*<3!^A5IH!QBPwt*&MJ+`TB>ccohV7D*?hBcO^dmH zo6$5RN)bapKE2qMCIW}q!&g|*qA#%E>|2P*Wc&HXVlFxQ5dQ;!!m>k>tnsZuIB>qn zf%kU-u7&nkTMn5Ufl*_sgzO0LhCYSr5kY>ee|W@Wnfs3$v0bdF7t5kmuBwn^YBY$; z!Y@6;JS0Yay3NWz_U(vXtMmRl_C=7|&IhW%WCk*f*7R?s4mq7V;(tRc{&_WOB(F2( z2Qc5P!*b|aBfO|3GDKS~gzoDA)NT)7y>{&()`R8B*ob(CH-*UU%jYVzFvlPYY%t!K zW)3uN)9ne+5$w0NVQcOt5ku5eZX5n-9S&9=Y}SiaVs;}W$bTH;8SD3n2D^Lx${Fl>8}6T9IHo_|nn>R=JJPIum4+w!+_E zx@G(4@Zh`j2P48Le{>JcD0?DWlKmXl@Pn$~lys>Gq>E=ec^j$U-!uLs?Q8mA0CW)D zd5=Y}#$bf~P3?dF$a{B{>Nj=$&ovXD(f><1^1P0D`H6cbn5&}QN!))VIk^Hf7FIOz z=T#`S^XKChj09y6-0Vsf4il(EO!LJc3^n46}&BCsev zhZr*aO6STA)8dVUZ|y~n?d5X)jH%fS}^d0f0d>V)yzhDKOlzhAu$1oox1os}S2UU)Ri8 zP3{vZ!(c#`4z~Sj-b;L5Wjg#k!|z)N%>wFXrVejx&;fc-d5zLUP^S$?&->)~D7Mor2PwZ$V=pF^?nn5>Cz>D&Fjl_lWqwEva-9bG~dKGWNk-9Zt8f3ROT5^?%Vb z(hiN1%6@@(XfT`k(dCJ;#37k)MwOw{6Vnh(5Mu5Cf-0jO32)MpOZQ3I&?r*>Y!+oG znyf@usd7$~QjDYY2!w4>{J6{5R|w6(vu)&mPm~@3Tk$^-gO8$%XV12wUD6ywU36Dl z&jC|R^N;Icz)72O=>l63g6ORkd12SBi&QFsQdR1(Zz+Ie?q+^~Sz^9g^;=3YMdnoi z#V)*L^s9**Xu!l_Dc=4hj&H=AjQ0}CnR+Oag)0I9X0PT6AHcNwR2>EdBr9v@q(g^B zK08kQmY#u7AckhcD?!vrCp#K{=C&Rlefm2lO%p?>)R!Yeg#n+?n(dm+=?ScUPun}w zFONG|jYjy>4|aD!yE{1JVpDWIcGWnE5C^PQ8*MKhy84rC(qne?-1?K91B2l1=l!5K z%Hu$dykfrM??~Isix=B7ewB3c4;Pn2o)D+8bFoBbaVqW9wZ1YZA%*=RCsUym}(&m!9EjsV?962vB*l-ce9IGRj0N53QVLT6&> zwtZ&}(5kMUp5-RW*eu@go4OEKWjvYD(9Pq=)-zNA9Fo!j6YCTTF zKxgynMI+Z($FqkNh@BCEQmG@p8|KanpPbjG^--)!?rPR%Pi}=%q@7Mhe+CTM8a2VA zwuH)71c%QvS@}mo3bkv^H(*b0oWooGb~Z!5jb|vK@%hj`9MQz7d?oFW5O{xjQ)JU7 z`s{{(lalo#bBG3{Et3%4-~oTx4;C1id7R5?giHnqw+}xM9j%0h&r}~M*P1#adjkg@ zi+KG9oF7Sqw?Ck{K{eBad)0tIDL9n0+Jw{@k@{~{l{Hc-NQkcLZF-EfYd?>6LQswU z2eI@6@h&}ZkN)rq-%FO3@i1rJKsiwX5Usd9`svfBH%{g2Z6_7EP8W&}Tq|jnp;yme zRT|D~l_zh{_V)5JGBpj!wxA!NweWO}gN>j$U;mS^0RrMacK}j8N`YN*g_tR5|N8d2 z)8$bX0fx^W5TMM~G$OEON=^0;281ChRZI?qlVd&b=GXps3ZlKPT^iZ41!eE+x0&e4 zVKK5-%%kZg77m>86Pr9M7Ut#tf#6D}AO}=dRdq{S4_wibErS*7q|^iaj~~}!UQY}E z4BW=)fyCOs)o1dynDwif1V#;PyM1!xb#Hnl05+=#9I9vFHo!$10#UdrK=l?oQ+)SmNZ{% z1kd?U3=4R<>@rt^kbzlA;bAWTduJDy!(INSz!EKPG`9^!W%dIs7c@9Y-<5f~JROBQ zH`EX(yYPFEH8TC%Ih~D*=y;_x-aMa0Pt!XR<;JoCLYq%&YzB0YO@Hz501GI(3(U4` z5ZD!^Mrlj=S3HIIuu-E&ds1+xtTE8n7k`nm6W&Kg)hCj6Elazam>5!E@cLg(iJUiW z!lk~bD%st2;tE0qv8aC7woMz^U-epGRRRC5J)?>c^hxaKqIvx?j_hKPW;CIAx{S|w z-OzXP$M0W;zA|e819N5G?nULN+FFeO(WDYFENX5Q6^-UnSr^b($VA(V*RRt*iPdDl zuZAf(A6Y>g{!~WV$^pDgQ>UE9z`6$5d`dTV*9;|F zh?U!cxxnz$%W)av$w%Lr@`e(~y*ww_+(&(u(FlJ|XV(wGPZzRRVMa8+p6x;LE?X$N z-5J0!e?sx8xYF>ch0NcK*~!x&l4}IfnQCL~9_vS3h(_wQgLfK*$i5@4ybK_Hd_#vOh_mS+(!^$RRYDz? zFpc@@R!73~d*zHqNPsCo*dBh;UJM0)Q;tb~%$tp;c12+DtR0A&G`LppjJWYATU`dV5_8vQohxthKXkE zOi=GfS`{7Hi4$Y;z7NvT(TVuLy#?TFk2{U~#b2Z;Mb>MiZL(mf z_eHa@k$T(s`Opyq$rPj*M_)m$B+Ba_;mbd?4|>aQt*#e zuby>bX|Qz?f==*(4*D%xwzNY#)U2g|JRFzed9?QxR6UB!{nFncX1r_C=51C`Zck>= zot>leT73k&J@6iO02|wJW3tg7)Z7_ATss!%C>P43X9fE;R;^y0*3x9^l%#P(q5Ifs za_H>You>~{m2fUqJelxPL}@5VI=FrvP2N(iDj8mc>ll=%RDADVFezv7Sco<5*tU^o zeDzQ?oU-K0TV_LeOKJVJ@gSrOFo^rFCu!WOc?%YZGf2aJZl@bNZm;xG^RWGa+lHP* zvzA_H$m>xiYmC9{LM+E-1NQi8po{#^sptqeTgtZbjyD-^7Sj&zhwPu^s~or^G8HTX zo}FgObmDH;!V~^0R#-M+N0SWpY?!He(rUx6eVO@mx@XRwM@>illFhW@!uq7X70`V!?UB?`y7k3CfHnEF=a_rQ@gzf%T~9SVNjdH66Ktn=JK^m`Ek z4!LOXCm2)et8wP+Ss9k@_=~{IW;LhBMNDx4>3_5#EP>0%cR;eR+2~o09-ggOTwnEG zi9Z7KiFd9=Zfa%GFN5-!#ItkLe6E5BF^ckKth_6e9BcFq3>4}=p&rD?LWtfMs%;%J z>bY8J{8vl=2APx{j!V1X#Td@Fy?u1~iWRc>?{JB6*RBFv7Xfb?kVqVz5rvz4-b|3) zirS1Uu(Rg|!*(VOCB&yvqjldtnNUMV5NDJ-qKuMtHZxr&P7DNd(T;82>$3d~nnwvt zrn)(GjP2I08;XOXSU}ngLSX|izGD^5<2MkT5hL_Ec$78w)~yQG)jeG)qs!mc z%hGA&?IQ-+&<_abd_dp{h;?Z*)=j75hANA5W7M@XpYUTkH946njuSSNLM z-pmvd1hbMsemLw!Vx%1S44R>gPNCr&w;3CWwsam_wo@k~{yB7IiNC)sn>{Lb6Z{0T zFYb$z9{=g2;#t*`^6MU#mb!x;9ku|cowW9h$Qu0vJxo0OJv|TQDeEseidbm-?%koH zu}PJ)KtTC4EEUr;yq&mY_CqEz ztA_+-U}*+99e}KwJ-sJP)KnOFZUss4DOpu@jIP+~3EaHCk-ca_+ZVXQ`4Tfk`B}g1 z^y$+k{~nAu*a|onRq&DppY!Y^v%40xyE#jQ!FZe7P{300y><|e-?j+i?3O>}E4XSN90z^q1JyM8~n9H^=hue71ML>VOxGMLa#fY#kZ8{ zO$)boV0e!iATvX&XT&0|z1kT7|8i(Zk0W>C(%CtuxR(q@2ONARMtV15sbv}cpGe7H ztU60p<`XG4{pmgT(1i=_03K4djB#+74L+4(S@rL~IrwW4WmUB4b+HJWeI%!(L|eKp z*QZ3PRG$)~Pe&*{4ziQ2tjC8{MF$UZ_`{n=L-4h{O?2bFnwa69l}*fKN6naNxM6ID z!#>f2>8el!D1Ap8=Jft^iO~ez_AZ$I1wpW`|4H6<*~lAWx=`5Tfl9i>bl%0IECT(t z9=ao3mHIUaQdv2+3hQTEml-DW9e*kW6s6jk^zn*J)S-HGqh$ z&+HtP10&Iq&|#??LGg!hAe46i*Dg_>3Qfd~lC@{qcfhmwyw|Iy@<_y&G5ZE08*qke z%W#a(6(ZCrir-Bp0fxWjJ|E6aW+nd7LwW(Dx3rioDcZWawi^L1N8Lymp3tTN!Pe(c^P1i7+qDari~{pD`DqIn!A&J0rLf zN$iYM5~kvsJ=m4dW&El~j*J#UlsXO%pf!MfY0Zq4)xnLCqO~*U1<85E?$)R zf!+8fBE6_CWbXaj?WG3LJY*B$td7Eh(Jfc&o+bI!brhm%I!oZ98ONIw2GrV~w98T9Nh7&(rk- z-eMZ?Q3SxC*i{#QU--y4?dKwv3W`zH&o=}GQ8g+TQl9j=22?E8b8O6UJHC7S_JggK zF!?G*BVZ$mK*wbb0A?NbC}xCn8o#H?9KhVUOAZ{S5`idl{D64Y&|=Fn3VTEX=|4)r zwZLXC7odJh1m76Z9jCBb{fY$nalD>iFZuX_spx2mw08CC5}M+FJy3O|c+i|`0|;f- z$|y90BC{$14XbH84tN4=%j@KfKPwki7(B)L$>z7Trcav?the7F?VzLaIQG!z^uE%N z6J&!_p6=MoYRKwO#*8AzJA--}qDQV7d+Zi5sDbh-koY%*0A=xwhhgwTf>kbcYUF3<`Bx4rSh=-pV!m4}^Q4uV0`c~kEX zWVB=ml*_VR&Py3~vC=D$W>d!ZL=N=W8)gX|!OMde*$rf6)v&?zasCgz_G5#r!@s7U zSFtgl(Xbw%MC9G#c77s=l+<hbR8htpJ?b$1+B_9b5_Q_a49@c8lL5H0l~Yy%*dS!iDrLO`(|JjKQaSZZNZiu6i6N$#-8^y0 zK-&-Bon~T8|4Lw38a57V8X}^~iwg0X&g_)*uaV}hr%z|tSj}=A7vWe7u%~G6Cf27M zs=+VV^EjJ{iVo+P8a&(~MBR$1eUVAdYi3h1QmH-9LfYA7DW^__;Ea|&in(8=w=Mh7 zBC5(--#NHgNvw7KA@fOET3MY9edo%URt2ud9OCSouCf)JUTw8ED=;=wd)OEEr+~U1 zWk;ETByQgC|1)Fx`zMZZ7nnh(7j!uQa25OYB6P)!?%ZFb;M;oifT0LV)gEBoA;Vmq zC(~Urvtf9?6V9Y?cDKr88z`W%asqLMCL-Oklb)U$9U#L3n_G!X73aHb0ck{p>(ibB zE?xcoc|dNmW#4INF3?LAFb9UZqfM5rrEOD18(`5-eF zTsnww?sapIU8+f}rLEr+G;ISGKC->YC_`IdpeY^t;6{YZpsCmvle^!vccLT(URfd6 zwFwFQT!ZZ&V;U|IX~AtZ`D`7^6fr}FJ~+xcnd)y8(@oK_I{p$5Y=|q@@kBsB20H_9 zahZ9GXL=&sv3ABQBbdip(6osXl`Eio_cT3!C{34Gk=c>?Y&)&`@pU0wQ*D$Cnjq3S zx!Br7J71acKR~JKVX}-6EVplVDMv01Q+-_J`e=k>EQ5;j<5(u35Z0}3B{hnNN0RN? zd(WO7Xnz?YyebW&>m}q9&3H$yZ$*yG4vD=2l#{x@iLFm4lO<&cD_`!P#)KYnypi2+ zNf(Wb#dH)oJRkeEfve&Z5=3x#;4zoZq++{s3$2Vj$a-=M3PNJrrj@56Ts^l7V{4So zt&C>SMY?1H2%@=>5sq*aItE7)5pUm^d?RVcB<67>KEI;HOE_Hl^7U)k&fv3nvG?v5 z406yaXOWBb_u3wH)ol^9O;@Rxv9SsW>fwZNL%SN5_bG#G42vXto0fw>h=ADhuX?7F zg0SJ)l*<3KXiwgc8?exTq4arcf%|gkAgj9U?l~=l+NUv1k4uF@#I>Yw z_jb<-igXmQV^=e&i;iV%%z7$ZNLu5xe|}MCYbvNt7$2THZ3sL4&{t~S5-N-`sj*q? zGjt#$UIKf_Tud~A^YSwr0S17g6EiYmi=5~%&>5K?69+T;yV4x(&EqPba70O5)~_!T z?nIB>bOZv##NyLRl{^E*;24?+MPQ8(g%EJr4`mty3NcH1SlZ>(8;!VnkPU10G3}d1 z;jg?A)Jc5&W9)%5+@SqQ@5j5$qh4bQL@NV)PB_x2qa=Y%IS7CVtGS%SZ+j3uX>xN0 zhf~+F34KTBE>Mv?4gYF}z-0qrKtO=~eo=*8{_$am8qRxm5-(4VJBz^tKI>`ASMaySNN1fQ4b{2nrrxmq(vK~?2f9Nc=bJ!h zS`I&D^#HY*HC`*3&RhdP**?zT;bLRqun4gtWx(nL%;%L67J=o)Ts+K7nPDcoutY+N zk7U48QQDGqUbq5OA}Wo1e-5n%(4h8{$hrDCQ`W3qJ2vB_h&~uIEExr<#6I%Ng6}9A zE|9^rby2}q%Bsg%Xb3>gO`5HS0jAk8Jm1JAKHk5_`9~CKj!V(EY{U!Y(|QDkdQwqC zGv+ma1JoJQD+BpwtCF6GC3y0%g{Gtbfz{=S9_js8Mn(`it3eyYw1b7;N|X_YpNDVT zHX0(8Nab)xk=2h3{3HWubQeUE**uYju>MBg8KiT{P*e5Qh_LaYGD~|lh(Ay_Ub9x+ zS!@9*#I_IVre>OFa1Tg8P@p%@uUrW_|C75>I?54s=6`(IbP*Y@>Pln~U25Q3iV>Wi zix)HVkWDmw%|hp99Erf&e7l%k#=bXMi6-twM@}Mkfx%avMa?JTR)-X~9o>~Sa*ste zTF@TWzE{U~Lspp6)i))(4_iw~2HxP1GO8o}v-p-7+H#l7r)Gp*%j9fH%<4+Yrc^TW zX=AtNKW7Hr98>z8r;KJHOoD^M2G#a4`$AS|H$+@bW5L@V;F$5l3su1~6{^aM2Q z@P&$9tmtd8$q?)DemH)17ke!8#o_DGqCk&bX{iKH3GhW){wd|_BHAPthPtrDkTVs70{<)Di3E? z>&f}&B%QJOj71zsrx+0!{mtl85R;xK+Ivjfs&vGt@4wxW8|l*nt+Ow^yKvIh{BJ}8yBb>qCgilabpP3TpLbT}mZ5Dy4?YQYq z{A2{ryV`vh1qL6S{BwAPYR8G8Hp7Cg4PuI}{$v9RD#Ss$iQ zkzI66zca-pYZ5MpNX*1wVDbkpW;ro7z!fAx3Wn^dR4(++!pAH>d1ykB*`j2q44D>XNR&!SMF>eP zB$<~fbBG3&A~G#9HB-h)p-Ao5wVwSP$3A|0@8^%_Sl_j%&*%NV?`t@(^E@v)w^li9 zY69PkQ(U}gkqcYHl-fx_M&QoPCL(p*Kpc_siyE1bVcFs)o79BW5c4iV&!+NDH~&ec z8?tv9LTuE_V)L%*)vFvuSsy@1RI3$*xr|cYWGKX(Fn;^)ow+Xi@jQk+Bec2rK={CE z{^;R4nAYM{8#@jtRwNwX5z(|RJ*L{(+ZtO(9*UnGw##7hG4mrQyiQCrxct{B+ue(H*&e<- z>DZBtQzrK^Z@VDYJW{Wu&aZB6S&?7tk6(?yUAoJkGXW}}JRQt)1PiHb&+ZpZ}Mok-#n! zU(1Hyzv{j;Gu2)zB4Am8tRZWXXiMe2tXn1PW0JhhhGNdYP^k(1<~vQ*$)V(OJ=9k} zY!iMLcWvNH%Ry7QNDU}}%cvX>Hs1M!=^V=kGJHan?L7j*w16(VIt!r}+<&m?I~MBD z6z=M*p1$39S8%~)7iD?@j2ctYoWZZ;x0#-(Oa&x=J6xl>Lvf>oBw*P=p&-9O;_>jE zJ?`i4Jv=*mDB!To?AZs@;y8)arpjaS?xF>4@h(q<(Cl9*y-nyq(ijY&sNIw4PlU%& z8AcnJ?}et=l*Qh=qw`%H_J&}=>3GG^dRhrW`X5gw~oOJhMQwflAa`0-UA zoa%o=sXCn~93=Qq&6}OSGSM_mfE{Lf_ZNbYPX^k)#}qw;m}h#DF{Zd>R#sNJC}4H5 ziP9_ME>9WzR()^HsFB5wSrXvjc6T*6K{T!=1!s_>vKFc5qx^2HZfLs0cxF&5GRuaC ztjZ*mPjab%Kt#K1>e5L^=iy^(2BG$&63)E2zGf4XA?MFf;lCAL!ZMd8I5?Q5@#lv0t7?>I2G)A_85~FA~|? z{x>*hvdjQ1YOQPt;bvU;O{*4-35`}@wCV2+dRYL~2A1jEeV3(6$GHgVC4;uPZ%a!> z7Gg9Jqo7P3J;Q{H7w7FTxPYluZC*f%j zvFIXCJoUwk82fZm4rhMh(DAsH5rCCOFb9x(wkn?`$6|fP4l%`t1R0}7Zhaw_Po_~; zV#UZ%GQ)xz*Zwu4g0%zj4}=?Oays23-tHNvlyJLXr4*E)v*M`e&dQ`vSZJ#TSwQP> zm;>QZ+-Ruj^4~HUrBC>p%RBE1^!q6%^lYrl9ya6P1kl~}&a^pm9>n*gWxxS(#h4K&ns9nQ{TG6L@F}V- zRPFDR+4DMtZQq^=KdSDmUqgKu6^=!}UocN7I6N;wOk$>4JbWheS&o@XFXk_Sz%dho z`k~$zrPYnU@qv*2A_M$_gMwUW0SxBADOAVT`D756hI4MWQrI{2IwqFzRYTF7UNF_t zdKcK?CiDd9ltwURVsrM`l^w6y(5i@tqZ6OEE) z(5A5x!%#M)3i#(I@MRHXXQ)0n?v#SY$clM29O+@U*=Ve7jJl1IN+cbdd|o-X+M zG_*)9scQ>=arQ#^hA#?0SiI)pMg$Rx)#Te6Ulbaw9K+RN$r~9zs1e+0*Kmf6>7>KD zBI4b>>BmnL+m14Ba<+2+&o{?clg59c*s%V35CZdxmp2wwvs5v>~afe~>PQoq|+yb@kZJM04u5nrQq z&0Dk>SMLWmk5ZvFW+W70p}PLdQ zNS%E8wU)zwIzD2`vv*X29xxug{(!UO$wXtsULv5%i@?fZ+ADYQljm{vOgv^_MD5&v z@*uXskMsV1A2B-CT!)U#$6i?sNJ_TSpEmsb*zeX>^ogQj{XPh_x@^BHI8%97OE(d~ z5c-$vDX1q^l$U~)T!w@G-<;V=pVbIZZxUn!P8Z2ZJ^&-o=vzNcIp=(}HRDj<8P>@? zYiQI4Cy@mkx&*OUhUJNRG?{f8Kb}WqmXCA!u>Rw3UV)guzY5 zT{0FrstmdP&dKqGdFQ?y5k8^)Z3RRf5vb+F0Fl)~7}n`6C>Or;+8>bGz>#;eiih;T zKG{dp5t_7Q*d#_1d434e$Lz8x{}#O8VbEOV<}aYU(D+9_0v0(0(W>ku8YF&Z&M0IM z0nh`3z6^(#M-`OQ3|atlAtT3%e^0gNbt(Hw*g;zA6dBRt_XO?tA3t8SnTb6CpwUh% z5cQ*7c4gH;x0I+@J!;94f6_nDk#u`9vkDaKQuxo=`PDvp({bShy7gL)3KSn(lcJ}d ztZehLplzV;3P5~n{zEpEG#mptNuSijAMRgU;y`_@*Z{b?>fKPJdPhy*a)`Us9DQ}H zCnqOtdZVD54E_7g;-}(VvXN0pgknB^1$Z>+>;+uiP9;$ttCD2ob_9DIW&!DLk3gWn z6%7UjZQW&jM9?ds!OAbHnmzjWuW4iayXn}*fX5@_{@?%DP2nhyv2(|VPo5l}U_`Ry zBl7;Opmp}kB2a*{W?qRi4Q#JCQ|ETWt(bD|*#4Azxw#><`mcmMBKY(sYv$4D^;Gh) zZ`wgnw$nMWwUvGH#0$wR(hiyEU8>URm56R#Z#NHY?&{Hd#BIUm(UhuGukzHu=*70n zk%-QrQ7OWmne%%^@+4QKK25w}3R>IP*q6w(pH6MrPy|~=e5z$VN5Yi>ZCa+-kH<<+ zw7+yZ2Ei1Uyx7d?)5vnN#P#k?<|}Zm-Zt}GwTf+Ak+!Fn&{iu&Pz7_p^-Rs#;7Xit z96@OV-{WA5l}xO~GxFRhQ_Uyoq_~a$H?;8b7MmIvKE^LiC7X&{`?WhPxBxx3R(Au6 z1sOX3y8@F}s76i39_plZ=Axd>W*0FJX6U3PyCMF^4~AFI&_1(zKNNj@Fv5~Af+cTc zBR9BlE47Y{uBBw(eV1|r`qzEyrSK$V>p-%Y1PZqiP5Ptu3D7p^=t_SyaEZi6xuU1q zrDmpn@Prrvd2i0RreU*%%pp^S%TqYnM<-N>YR;oj0VffAO9mkm-J3mH2$mhhA!^K# zIWBG5SUbu$fTk=jDz;n?wB|;vs-4&Rc9=hX^+`x;U&Sr*__Jolb#MAjrv76r;b#6C zQ#)dqo$0%eAMgF8i^2)>%5;bWwHC}>M!Nvzgc0t6-8A+)J2|-)r3KIWA|<+TOI(;c zcA*QCE#n)*4js~?1{iE`2`Gizn(*%$!u$(OU6=pm0tmJ1^VCOuwX&xJhzQERwD-`V z&S{ChbPhEaBze$g)6C1R$H2{0==+DR-CnVfh*!>Lu-rJEAkAHSm^9j8m3e(SpeI}_ zt^KM8tq%2Q?_yc5iTYU7&kZw4UBt{i5$d5s``%)on+0;*JU@PcCD)_HUhztx@xQW^ zfz0R)Z()>~0#g0`T*flciPJy_UYI{c+=f!{>VcCubYL5W{Nhl}2PO{)hmTxZ3j8dw zX>cK}5EPE8Hz)w6HZjHJP8fFRpA|sRcQQb+5J?TtrM?|HhIHmYqVm!<6fqIczC!3r z1dCMf3&c`j+4Bjo=rS;PQoiMW|LvkTZk%%hgkn>6yZF)4oZ@0q;0o`j(Qh zwzjJKn(s7LZmtGjmIIoDTSc!eDJ~xLu!b#KvKAHC*>}11j^Do)X7Xz9y1{Eki`!RY zBBKNA1R<|L@mJaL9pH*l>E7rv`a4=_qmV6N1Zq6UCf7mh*8hwh22!?!x`-Up1nZ#sQeuP{@yEAdH z400fwP3ZSV2EiEnq}0hTaOn7}4w4wn%+`~;a42moA*jl_ zYq-MGuZo*9BsnDot#%n?%i4m5SEaM38S(qrKz;n#D2GR?uZ^2qm zPgz>F&KU2LFhHTE3T;*Z0Z?Jp#lw3zF?aE@Wg;|2Gj?~Zfu5csbYs`}Elf3FR5yJ= zg;4nA%Y-W{WKUYy6j(}Pq%`U@eGp|R22o}%u-H3+Xq@(HfG5LdhOMV%V2w|Vawe)$ zv*oV`O_(eDA22Ey;>N#`3FN;&WYmO0xvK+}upl}l!ubIpL}NG~#$PL*o$0jZxOFbu zD6#`?EPpd($$e;4xNn%a&ac3(6O*u8K1D-x11*7BAwsnTZN7Dm3_g!fv%ljx-1Q;V zsP>%vd8_JbwuS*^d05&+E2yS0D+z}3W3dzUCU#VTd;S6`$Kqn*GvSg^V2Xv8l{HPC zDP;W*Err^EL`Hjr$X)OYjJZI(CBmtiETJ=}8NKc956|=U^)*}7E-3tCy|+Ketje#m z6gj_lc-hmhaW&rlB6{^6s6w^iX>7IHSoS|ttTn&jdV90IgChdGPLNKYuEP3H9aE6` zJ}iyr%c^l5hFH=Di{AP+J+hgElcuW(-0j#4^ z@#~uYl~5v6lY+}M3B>B=XxX$MqpX3$vOq63CMNs-3gIomcaS2rSQ1b9>uVOdw-a1% zKLl!Toe!`eD3i=~F zbREEACLFx1e0k7-4#86y{u-{{4xB9jQp^*+`=cSFO`S095i|7Ee@TIllKuTZIMVd) zAYN*igtf~oKHIuue`R)(79U*k{A`!?HRY*+e?pyuwY1-}vZh90FRHY>YpQB$>AYB# zA)=dtNvM6q+PPm%G-Y%MPW>g>2ooQI6W=;T=GN`sKXOzKalsRwdF#g<$|6b@rRh@B zL)e(10s1GZ1rnm>=o(Sjdn`odh0>wJJ+82<^Jrc2;^j-(jQpuJ3(&<8F}w$*3ILjJ z2`kBi`1+bhM14aR6_IsP2Py@rg{)<#9H(^Nt%l^^-3OR#>9du3GAb`$1Rn2789~Dw zz351JlmbQCbr*?*FWG_wvKK2)qeFN z(Vo%&ahwu{4#wbsvUXZp--m2t0|hzmclFk-uEeA8et6o5SN^;?+&;M`yF%!9E|o4> zv`Dsf>|H6sIjn+}TeVs+<{}6igVc8iSY;I7^$LMQbYKu{8G}3NuKtv#0kvz`Ncu0S zU#x23Weex*fhwGDbd4cZ_UUd_zxe%S=1tFQ-{LI(VEWjKntcpnvW60Bk^=Q7dy1RD z$9uG5>ak+oJA6YvDFOKLs#SKj?!2>3mOxrphS}?{LtN}5RZxjREm~{ zhMpOAv4-0HL;{cqf)jHZmfp{8vTq31oh^k%?^=FVdV0D?8a4-;_LKrrxGr3{S-^EC zRt!V73;y(%)VGM*88k-pc@I+%EYzTH9mq7)xfXN}4hKgZh?f4gx< z(A6w_`;(fY{^z%?&c{R&A)5VmzGOqEZj`-b$yCZix8M`B%gjq+;+PABf(+Xq9>8ARIL z3E>Itw1OFaeH449O4MbGSQFb| z=6{KqUo<_%L=ljdu^Fs2@xWx(?%lh2dYRRUByd&+NxIdw9M)8$1PNXO)Vlop@Nz%B zxv{tDYFn(8(LXDE`ds{=X5PBHuhRuXS~Xip${O25Re7n&}{wcTg6gHS9z_}&v;c?P$`8F@Xep$n5nO;8{p~78#V3Ue|{`J zNA9lJ4~PxEi1TFm;L&~ZhA_rf4r1C9$`|#_o29Vok;b_bCoE~XZ!97Gi-p(0d*B|D z{~GFOl3_R{*!6|dPHo8Zs%jD0pm@~Z@_n~SKmX5vY-|(onEPy00xT<<#;Vy#-Wnn_ z0FkWKR8szZb9|%E!}d>aJ)8-!(Lyl5wG~~th#}m{^Ie|3e7S{`P((%=rX*8J>R=sT zRyPMy+RILghfZu|V#>Hah7yA;{I}I?=-B-5sDy14D@BG^QRcHCz$#chN_BWT1<<0+ z{#3>^1ntSBM#gn;81M?%CY?Y+Sip}PJF(a6^{QIdyzyX8Y;eYbntA6s<;?9}whH#@ zc#4MuRUy@nCQf$P9qsqYT*CY9f$#^U)yX|X87GZUE=#PWI*JxnOpo)1nJldwaeCFe zoQ*U-XpT<47>xCZtRp^T0PEqU>Vc8|UCCY|(-%xLFo@ZNc$p}SzqDl3zR65Y=sp~_ z)9X>q%=hu4l5k9LQEd81aJzB=l_BI%XirX-)oc_uvfxKwSaq+H{SV%5LsfN~wB@A3 zRc|HmWPzkMlHMOK1mCCahtpQcHKzQ_YxxRT5B;M~=MdCf8^iGr)5YEraPDcdeQcFx zU;_|hSXh9Yp?Gy~dnTC?MPlqZUaycULSzEIdkSL+xz0xD@aS5{t|aC7@{FfHaxneS@Du-lTa{@SYrO0>&`8~ zc$h!elnDxRux*^*BC;j(%~H$l_$tC*Fz(rb5fZ;KW|1(Msa{6xCCoBYnlxt`@={n~ zla{d1SvB*R4ENZXcu!34D@jrNhppRntP0Fjyt>6wzR~E)!_XVUDA|GStMfUl>~q?( zeudsdjUgW>;ay?vD;<)4ic;3#DXZSOk(XTc)LqK4K>A z=sZ}UCvCGd+c^q@Xc5G)*K8TbM2=PJ^~$~ND%>amWYUN@@Zd zFaVEWSGj!_Acq;x#&0uZYr$DQzhnq5?eB#sHq(enj9Y3JK$$^K75jX2hO+xok>7aB znPbWjTzz$@;uHew*oPP@2NJ`kB;NqR!2Iv)m#d6wxDq+{U~8n(BEPTbrR`nckjorL zrB?J0{6~v-+ZZj8()i>XgMR$QOysB!of*hvYehe5Xwg&OdIcouLXH0Pc#9S-y5nhn zes1~1W`oyY(?=z(^~uNETWA+bKD@S3l#EgU%+l4)QC6#gR|Q9?Dpsl|TovPf?{gfa zr@Vs`Bo1EJ#Cj7dyk`D&DtKK}9E=8Xi#EfqGxw9an@n%;j#njQigmjHP!4h6>gQm@ zE(?(t+}#w+}=@qqZqiP3(Ywzn_2~JYP&{}w)bujc!K6kVX{LM zE`YoJUsUYb^>9}Mc`(FDtufU0Pgbj&ZO{ffpa6?+V9H5F+es-6-FP|<+KzQCbDtE< zh-bEPn65(2AWeO`X&?qRIykg8-SI!3aUEPT5;mPM0k}ytKZhzySOr9A#WYHHgJWA5 zwrz*3wHZsdR_JucSi+%Y<`Az&hblSgVM(pgy)N`>JvLlkk`cEiI{aowVDo#H( zlCG%g9r;w?$`b;a14Ye}% zyvh~4r)MejO$?Ms6{iE49dEsb;O`JE88e7^?#IY3SRwVOG{c7nx~H;R#^!iLx*(6FL4caTXMPM&>2+wFqp3v=|r7UnZ-Sp)-w*KGnm(!1Sa%%a`x4 z@*)^*3QvAm)3aAEL1H!YPsqBZlpQo{U51J9mv&YdAUVD=^Xd4K5w&n>eq2gS)Zyd3 zy}=^B0O;@#?6QxYnR&UTthx53aox;u$UTjSGyPb9uqr{XmNjs05{P6UKH)(mE68(^ zi}W#bT!-kAJY+h7N9oFZPV_4mwU&BjM4Zx{n*PJ2SFc@_4}Rl31-0|eEH`)e5XARY zLl-Yuk_I|fHo07@StTfs>>0}4&1IubmW*mWCgG}e^=Y=BKw=GiEXtQb9s%JVFi$Yd zP|4iUW$0!qd983UqDE``F6Qcm3u)93cA@Z{x-;l!8uc`l-KK?*O=d;uw(MJVfH>OA z^+PO18;J>vpBNDbT?s)s?#!5FWav1|ueMT=^z=#2Ya7OT2I;ojSvj>}qX)$n_w=IH zYlg=ZZC=^3v;_5t*Y4lHf3u(?Z7tJeF%{3;M<=}8vKp8MNRaBNZWgt+{hO+aaf+g^ zjk<=76X;95*!Tv3Ft0f$r4sOjy(wjS3g!rFN8*#nDK{#gMb#!N+y4ipVrUYkz}`wY z?GA3g4m*D79(|w6SCE~mQd$;a*>%;{swsZX)Jc{L75oOVHQXX}WOB2-;MO*V3q}QEt-&9L?Y7(W&0jD`(i$qlhP!(AYpBOzUkve#axyZS(X-4S zA%c-TZcR=ge+hHpQZ2^}MO>j%A41*!Z%Cmatwn_ ztxxE;O?`<5Er$uvNRi%DFul zFYYt3$He5(xw#{+6hq1hryjbn0uO5kWAQ;FLl=Jd(30&xVgGtU;+U@u{QD>`FKgpL z&QjRXrjV;e=F@L~Vo-96jnwrG8|ZfFF?UACegi5lUnZSF*!voZ%~f^W3y zx^UfC7hp+)+8$ozva6i?RC?;dIb>)mpOPEG>ZRb-abOTY{V4TLlm71R?s9KOWl*~d zrki?&0~C2?BA1Cm_tE7PW*FpFC1?04!s_iaL?#aq9|O0;m5HZ~@Rh|nA&@-bhQ%Zx z^YkR!2X^P!QF={M?mOVHN=i43zd4=LKZ#u`utf(C$xMv$b~dYEe=z0iouT54h-~2E zfhlw6?jW!ik`OQA2%4Xm3u7pGqsF_%jaIm_=HoLx(#+?7xf4kneofbJesjaG zsI}xZSsoo}r95E#e%JHeoU;@m#a%O-@cJ@OC5}vLV$R%Jwh>(^X>Z~viw&!~LVP() z;0|x*W;Z6lk?<`(aseJ-fI_nRn8ax*=S%*l6#UJiL#cdq^{DmRG$yw@34T`;>+P9I znO61T-`-(XIs7a$Cs;RHWZ`+mFA@JSo2@%5#UwYOx(p){ePAfwZ)a;4uqsNjaD zXI|d2GMqF)p;m1{`8JO(Hph^|bxiRlz%C{-A|Fg%fgl1G$K8(4dms6Ou7*<;08|C1 z>wyIbqM6A5*N#u>Zj}_e%`-mn6+<&}@}15-?G!z4fy^}JgqRFjJ(P;r$XLFjU|l-GbqGM38eC)W)qSQII@q1iqdYKmnGg@}jUeTX>Zmcu@` zX#m8YF1jNuO5l8GBb7cG>FMIB_$d;L$YO*#CgJpvii}syA8@4kX-%tHpxTpRsna6a zwZ(_}lBo(4)2z9NeG5XKeU=&I25HHAV z`mR;KhE!9}^wm*!PlBcqz-lu!S&T*5kiC&O%lvAGXFcxhkuQJ$|Dg$B42_L(HEaE; zQAUeOt&i=a&A;C?PP+6tW85+pw-P;0y!gqLL-Auha{%BuoI-~oXfRAQhmn5KOslGM#NTgw+-k>^_YVr*sn84T=r$d|`#*YkWwg9^oJ>Ww+xd_7#wm8K zWfdXsdgm6t=|I@L4y`^gIx}Os%psd}_%O;HO3bhg&jcboyM{TlurW7Y;<581?|5-P zKVu5gE$|#<*FuE%?}Fd(lDWd`iXcr0L5sdaBN~H|65@LO96IVdv~ee2tc3r@uj=VH znFoNaddPPx%QaZe*Lmjlu z!~$7!x-X9GBCqg;!?c;$d(fF;m?!Jxf=9I#x*O22w>PSO{J2Abq--0MdpyFp6`NAH z>I+_Lj~kP|s;Z{OmY6$uZPAAhTW}3iapoh>sahs|9@xY2FIA*^h8yA+YOJAgeq#9} znaZ-h?9fu!SnqyFO&~jeKH&iwPRsV-PiQH+5Scx10@RW6d>_C>(!3pz+}JyI0UaZ4 z%jil3CcX&Ko8rv~Aw0$7S*aOYFtGu#1MRdEyjEFuuM0zd|e_3#-G;{lc89jt4kT3Xwb z-lXlWcc(MzKN3#?Tjp>>hRpvN@^xm0c^+r| zEl=)*ci6|ZAOxu}Cj5{7{S)xc^?Qe+iGHhcu&UP#wRvnOZrI^Ilj8^EVsgHX zv%|lw;9#cO`RV9!K#Z zzIN9AW+m$CePG|HjPX=k@@H2`V@txLXl}UX>}F9xQfqlt$9c6Nc;z zh7elFvtNCk9UWydxcfbsB!kqDxZp501hmUVK}RuC3(EWmh9h2B{Wfs*X+=Anpt=pA zR-WQ-)XOo|1x&645{QsGoBpGIY^%ju)P2;7n^JpI5>gwN*+LpP9=%$LSY^}#TQd{28g$=L|30`PLJS%Hl1bYK%!r_BdGmxm(^F6y$BR1 z!>(AA6iE?un$#3RUva@v5Pyp7nE9mO7njwW>TuWY&?*g@1TifXl8o0{zDx*+mkrub z#*eW3LpZzNeED>O9c*QZu)DM-!?95kh@mlw|192#)bA0k7?OyxE6)`!mK8<=O6o0J zwK|%aNTZ2fNb^=;V|{g-8;ZQQJB)T5Z9xZ9oI7uMW+5o7$1uE!NgMdxgS4{)9wY*EiIwDELWu%P37^@#<_(FX$p+Ov8FGmB9@v-ztY^DO7%4g z_*xEEH*+Ipcx-Iet5-{Ane#_OOM(uY>h8m=j+xaK8$P4UYix(*Q`@Pjus~EZ2u6ljMV{Y4*#aE4Gw<2= zUkfvM>0yK0D~O0;s3Se1rcXKDhwMlb6SUVo`}U2BZm&Mba(^}7`(*R!R`=FPT@t!V^mE!a}is~e3AFt(EWr*RR!@a#zE|~1xr42 zX4`Fb1)bM;$&Yc-|0jirDQjhPZzDl=HVR4{2|KNjVyBhN2h|?4{}&mG@)W-OR?(KB zinabqi6d?u#oG2`5}H62kZlW-lk&>Dcbx>d(IZV{O7tr*sV#K3emfr6b1I-Tql2F1 zGA?6*BdN;UgRdrjP^w}q9Jb^+se5U-_bm1VZJ;3W4f_@oEK+-F;I`J zL0+Gtm5jCbU11WF<=c2Q;|U>Iln-2s!A=CFi=NNpP26=Eojo;NCe!OI z%U|d*4!ow#5+Bq@v6m7O0=VX*w{;v9lZwfaaOlm`(iS->l+cjyf2NA6OGJ)UbbH6RlR47!Sp{P^#n+Q9=0W4~8 z5_H5zCIrmZwZBS{LRW}d%Q|2r)O1I-mqx!*?O@MLi86qbZ57X$KIn)j`5-OGqM^(r ziQ-xK=%nQ2$f8CiJ$6XY*){dghb|^k^jM4Y53Lb#Wb7xO1E1V(Qn+^9DZ5~N&nOrYhbpRYZ#d@49WME8mg4q>1=jM8il%a5eNj#pQeVW38d<38@I9cotrj4#R zPlb2mOoW?XXO@;garDFJBZG*o;BrM=v zZI07a>WuImv{=jv4`09j_q%V!1qIKS+{A~Q+%Qj*MSGbbi#iDeWic}Me?VEm2J-Dg z^dF^ZjC_Ke0zuo6?d6!IXHYAg*bTBV8fgtE)9ou49Q};~k5&$B_cItU7?PSsrAoK^ z1;ejkUTtt`g)|VIUhyNC4-~nNW%n+O325~v|ET)*Edxs^gU#CkCD)yNe%fgh=>GZH zE1`#q*Za>?d<;fRn_6Vhiz2j%94vc|7+m;HAcTz}cTnP#pJzCyO6B*sh8AWNCSWR+ zVxEIJ;mf&@1#t9RHv2w=Q4j+)F=A&c^|2NE~n<#&Q zJ41$EAK+1GHE}z7W=GayKX}>2_D1<`#2lW~>M&)ACdBO(Y$pBV1%(>-jzs=RfMHdGg?-5c> zLW`Zc>%!WKIgF+*yuIr6c_v={W6PJz4q5<{GK|PW!K;=S0&S{eV_mTUYBGx1RnbOh zd2NOgndHC{>o=8A9&$Bu$}~H7&Kadm6~T_y46Z}byLY3<&j8vpx3F+(%bSq&)y#^d z#uBe|FMsxQiDa?!^fNPqNorLNTi<+5o3(-a?Z`Ub-Br>hXle$E#HxM96a|eM5gTEq zo0`Zo#_l+>;wzz5^Ye7xVrvt`er_CBq|ar;*<)bCuW?R?*pP@(sMQxK*9udc8u=CF zM&qS*zkn-NSXiigJ{-Je)zca?$xagKH_q`z7jGQSh~eX+;N*h#90`= zH7qQZ->ke+z<V&{vjDA01S0M0V0~g7mO*OX=aaQW^@nB zvk|soR+fr0$+}?-UcL@xaB|VKUEw;qOd&HItfqtA2~(kx#mvj3eN3UE>-+Heb9Y}r zF>8j>A+j`N3zPDTvwvYhA1f}3cujO=wUJsYB#u&oxjWbyrYDOW2p{gSXdaGO<|iXZ z%7Cb{cpleJeEhV^L-iIs(mZ?W)NP>Ro?V~<+AuW&9?0~m=8C9{yT+-lI^f?>-vLh3 zXfmIwP}1>biu~&{QCAFV*3Sjyj@?J}PB84F+{ZQdU$Da~AA?U%DE$mUQ&zG+n+~?g zItblV7s$UyTX7vbk7##M7X%KZq;Qy&d9au4)l>@RZS2G@Y%zp+-24MkeAX8I2bHvb z(l@`WBD=~iUO}q%1 z;o0b3sY#+v@IUq`W84|MHFyJq2Fd43k!b4T+Th_gA1K;|ZfN_u;73r|o$@_Y5ZSsyVMn%*^^(GaYoIn2% z^|xDX_khl!yRUSoed|nSE?spVsv!k$jIs<=Ahk{&dQFyVz`%w(-(IX+I64ayS&6p; zY0HSK{B}C4hj#VkXz!mjJzrSh2*3Pacs^DL2dM+ImLxDuBNG~G9R&N;6P>oSYs|=* zC%U@vX4~*0W2T8_kKSQK2&aZOu-jqD%9ZBgw2~M!Cc#C4$0W4ey{m1dzI^AFhqyAB*M!qis#jcV0$FjFznEUti3 zEW=k5Z}w%d_hhnotb8lE(}o$(ssvka^{w&qaLthcf(LiW22f;cKI~Y8k_|gXI|aPt zcgo=mTZx#DW2@(Q@#@twj{ukX(!AhFHDIp6G|#J$Nekb-%WU!#_9r#5$=|Riq#`l$ zWkdnx_`HqY6@L|lI(T3N{bEx60$(TW)kqlzkM>AAnp;k7F57luyMd43z`SbGd;VB3 zQVA`Q@<=sM-A-LS07dQ2x5CHAHzezC|6ku*LCr~}_;{aPi)$x9#^kx|AC#>pBgA6| z7of@a`nUzS=v9CHs;;ABXL=cBpQDqLnFdx*lik#9O`68A>Tpdc6I#h@Db?S;jUr!d zv?Gjf=zr$)X<*9P^AHEKtkLYbd?KbbQ#GDD2a+Z=kP`TOv(w3^X@S zgHw^$1yx#6jSo}L?5gsNW77Pk@}{68^Amzkaz*o*=~)|A`7% zka1jMQGk@x3ki&q<=*eXc7+3kzdPWzhMn#3r&zaE2Ch&Y{4!~1q@o!$4$!UZFz=N5 zt6%~o!xq4s^DEhODJ5>zV0!hgi00h(K4okUhqLBY+pR|FH>xTtEwSrxZx+{QIbtH1ooqV>pRu&>syL8d8|;Tn&1ViQ2(#bRT%f zX9sdN4N`(yPd71#d!c&6hEpmtI>S64{J7s8rlFb#MJVY86Leg8TpnB`>x}|KJ16zFjQ!2%#MWEcfZ)n0Rn$8sK;4)B=^x6OazmFv^GI) z6l0qoec2*|Dk{8{L_$!Kit*4eIkkh-jY1ER5sOFSJf+*+1Mp2oV``I_lFBrPbyFMv zv1_icLGK5ZIzR!uQH}HE{_P@f54zkg72MHnK|i$W`PYUfM=<0J;$&#l-17O+8jFlf zrZUEQ>Z1$v?J*!JqULef+(ap?MIv8iW|S-@%Gchz0Q6TBaZdJ}RL+E{0~W<+Zz?G(>v29b?#dNy2ob*z{(HJfB=D$AVQyOpMSHl<0IML{ z60MbBqa0=Sl#xKxjP5|Gojm9a)PhIWUl3)|@;0NO4f|sk0#hGwG&!&PZ3kFbY@)>~SitCoOO!f&l+gb$ z^EVGi0UjD2ZcuIbvS;$drR>xz2Dm>RD?8{I|HnQe8(gsGPtWV*JS&d!iu9wd0MZiA zlMWl3P^lk*7icqp0;NM7K|SaHEE>TVIL6o*x~Q%I?t{X)Ao$F=b33r~({P|UD`Kj0 zY3W_8;)xW_zBJ+KyoRiDx*k@(M+{hmbjUH$LvK?$A?3b74wK0m?_nakqNftXXPIuHk>2H;}F5 z?-BvK;296>ji|MdxdP0?f8NwQoWXadSlF6;;5x5q>3Nzu#j)w zM)HhJb0Z*?X-o?dfe=+Hex@WAqNPJ1TAt>_rDY1#T)l); zwDKm66FPxfr}^Q*<7KA&ekMN0t^<}s|5sDuzaSyWNht%#mO0M`jk{{aqMV3Fbn53A z{#g6Z?O?A|5ecgr7>x?QXJ4J|)kcUB*IMIvQj*fPO7_|e>qg@hL?AyE41ybt58Fjg z21uHh+jCmQE^hnYsHn03TxX#5-;&F7&{xFd2mrHH5KSm(YmxipGg=$hG3v=(PdbOX z4fJGFIi0Qw74OS6Se}p*?Twq!Zm6|ZGVWqNKh(fZSlpljbNx0_!1Z1QwM5o043O+2 zRGSaMafP9|ZoLkt^y6s>absU352%un!$PxBfo#{EX|DHxu)F6q_&A?L)1RPyh(>8z zCz;r?&r)|AO#mM#c_d}E`Bc6Yx~GMi)59NXH#Hn$xwwV#XN99(;x09f^a^;~deTe9 zu|wrw{`PM(He->=-ZAS=q7(Z~pfUzk#`%;YGAYWkwOA(_MCQ8YMe{FGD0Gv``T+cE zTepe+W2yFK?Phje09GiAMtk%f{aB?0F8)@uMG2q!^T9-8rE8mG)PC+r&;g>TPQs-Y zM~Wu`L^G;$(w~kn0@y{lWmMnUEqQ3M2f;P%ZSKIFM?qsd$(*^CsH(jqs3yuXlBoPe zbZQn$Gj!zpLZ6d|$t8g~a5)h@RS#2XJ#T+w$^UWz;uU=Pkq~P_lot8d*Y1qF2-GS~ zW_1DeqOcxDUnjOlM@Q>ordtt%%`?zv$5L+Vyhrh)Oj4mn&V8JO`?v+%tS7bXWI4<} z%a8vYJsON$TO(&pMSd6fc`h~(`dbny9OsBAz5z+IxaKZM$N53lyuU_tsLRohn>J~< zZCvT+&wY9purTyOObX_gpfO@EVbPJb@rc;o`k1}E`uiXgukN%Dzxu5XYc{5mu)^Ex z9-!r;dv}48lasuM3a8n#yWoiY{vmonHMl}i6>zGR>0p|hFxWXVSK0DlOiUQGrN^A}x>^7(R-HbxvyxKJ;q0)dipUE%;8oNQW1%M_Z1AbpDJZBB7JevNGCw;X)&5`UY|Rnaix58}L}_?)^KEPw-Xa z-+Vz>h)Ge-jjmWIAl?ehA>T{ld0oMYaRItlxb4!xpN$Ft6-WO1J36|MJW_fShh|v}5k2hNgBM;TuZ-`$)LcDKB6C-DcjhWoitbI`%?n z&E(Tma~+H9$m-r6wqug>Y>VoH6;0UYMZzI9BwBJZ1j~%ydeXSqS~mLtwjAw_Lv~jL zUgOkzh3^gB9Gt2#0gF%-nFH!09+E|Z53YmAOc-lh4!~qZJhf2eplQyYLeV8t)rv}7 zDGcBWTd(u+DIsqr)iB^kf*%#?s_HtQdrBUZm0(ErOAGqUyrfL8KRx zB!!4rJ}g6Ry=Njknh*q62}MikPDCNP?-V<*w}?$$Cy{pXw&xTQ%h~of?(g}auU3Wl zo(6I)C@OdM?h)FkKSVq5YvAFFcyRD)rVg7+-3?;9x}_+Z__jZ0BAlWs5!08;Dzx^M zwA~9|Fm?=q;NoS6e6pBwp>q_9rC<~v>OqJ&ONhq(XQi2KJ-z8+zEZ6Z zGF_aT$)6Q3Ij073Upe%$LRPVua_fcT*Xpu~JRO-YPvAsr0Q9JR!Jh`%ts)7A+If{QvSO~%>GVEy4>u6!{UE$N2-W9{5% z0AtCP9l-HDnJ6s$xMbIdJ2}q%M^idKZhb_};S!!Yt%SN-AU*?j6wR9Sl`27K>g$ zbj_OwkNN=kEHFlLHTCc60elzlhWf!oy6iUu&c4hFRE&X1RHoQQy=2n+Yc*RMgzUxw zHlIUeC$Z2N_j%j$UXohy!t(x!41Gr+I2S_zuU9W#I6W?{ zqim2)%Fe+da`kFtE~dNJe|*-53~p-tLQ*3_?}a^aR`0guiEP%POS%oCV$s@BPxcN{ z70DdmH)dEl4Il`KAd)|?Q_+gkZqYc02+aZgEG)Em8Sd?J(5U?4HDq6_4y-td*!C0d zE5=Bm!|1N34y}hkl5pvgSa1G5cH(#N}jV6*_nyYS8+h9Z3107~L6{Xf_om!(L04FPFty+2KFH zu|>q8SOv@w)`*Fw_`GS&Cqe4%o!l~T`0h#KY4-&)>#3F^M?0HZH)!UoheT_en8|Y+nn5hHh0>_ASl}E^PN@YZN5yi6Hxuu?+2p8L%r$=a^5xx?@zDcxV;?cq zf|VDpkIg%*e(_Qmcl{2m-Lx!GKf}B_x|2S681$IzEHS|Vz*tZ!OGyxLjZGI@62qOR zc+~yYGK&)lAYGT)XFMrt8vl5|1n)-^ja;O1hiCDz#vV4ZJh{kN(`gfIOZ&`Mtw35C z73(Vl6o?1$;&S}FH!-XeL|tK0`1!USzQAd~@*u(-j%LbNX&GfE0jcK;8|3boA;hoK0b-f>rcFH;>-TC8M!szSPu4z~tuCMBwP6lAb7nY6AzWJco}+-zx|c)X zx($aZpn`i%JStu%LvRFI_bnQjOThE8DenJrN0)8nTgbl_Su`e_0aLm-fdPsvi)?(Y3-I{RvXhgYYp;R64`qgw1^>}jghm6sS@AKtA*7hwE>KI8CLvcBM0S#3% z+D7OdI_+$2-_IE&c?HeV<}y9}=HN;C8XdZ%x1WdI2FHs`pTq_Cw8i7=$09x!KcR)i zH##PiHuK(_z!U}RNnH81eWDS&@ zYcphFRf&`?G|(PU<-c6!!P6%~E6#bm5M7N-1BbFlS_S!wkugt5mb1n#=Kl@&pw?-C z8k+}pM)u*0dh)jee^dQ22sr;j!nSA@Fo)du4{H$j8G)+h>rbWWuWKZ z^pxTgQ|CQvM?&QQ3|)BTJ}-_z%aMP>7H`n^uCA`W4G}6vkzQW7-xVA=Gv5sxc1A~M zZ5~O<%oHM`7{;DCvGDP83Qw7S*|gyG_YBcyM=9eN77duEpoJTaHN~l&Ji0~8m%ppS zH9u}e`_;!08_i1gI}qT|f8|!)G2u7b{t&ob3TDGr!=byoN9=+8RMWwShU=l%o z$~DT-h<*FU4eiXxl@MTLKPSmstlWle`1yar0sjw-ps{J(YUue6RjPhlOjmOn_kZl& zAS*67K1v6OhMVzD>Eg{=DKdJ@_wld;xS$6X(?5Xk05%5u*LTL}L)S*)72kFv zJ+GWMIxEc^aOYL2obFC&1+$(Y=KjJd$Fu$0pJ#4=?0X^$g;-346}rr*&De$cr2!4zg}xJ#chgV-8eFRFD!CUi2@ z)+Cnhu+QhV-_{x+qjX3(B!3XuWezj(=cei%<1TxL#LF*p859?>TN`}TzoD*&n5;HQ zV$uYYWBcAw;WOKii7Cs0_&rf*_H@s8?qG*uSlt!e{DJ4CGE8+yUaSt3(SO`Us${+LW2N0yQ zG*I#c;B$e{uprZH!SlKa7f%WG2Qe1xNBQ{A;o5#TB^)HIW z6m(4VJC|G;xC`0UKLjHO+WStmsF3!K|>(@Yn@V+xZ5FV@`Yrcv0ObY5j zvXxj|Dk3gGr9+P&U^SlcGb+yU9xOcB%C%_n5Y+uY?>{4@gAJUSOaF3HVcnF}wO!gb z{~mhswBMW)h$dDXiVbIZ|74yCKZP4C47=w0`LDdDlKeKfkl|aT+YTX53mNY-jd?yw zZIwpDUDS$noS=(DP&{%3Jy3p&u)HX?z1eQo!& zQ^fvauA%QSu4#ANT`Uu6r8rvklu|<=dfAWNXBVg9a});W`Ita!!&}MF5J?(X*=YQD z&OQsu_?n6Q1?844iz^zLCy>#VBk}!j-l9c|?@6Kg5#P7H_}s{=EToWjnDCo*V&x3J zhphQv8~5_2&sxEbbT9vo{x;XIu9*l~2w!G@a&+MB+5Hf$jBG1^ypf;aLgh5Ym#Tqi zqO432(}o$w6m9x&ng$|BTlQAhV`vuhP3bkA{AgW=OMLB1EkJr z!B#0%MoG5s5zj%9S$J3s2XSM6zYMUwCAVp%GZsPNR-2lO1Sj*;x#Wg4kg%*@F-W(N z2K5XxKs+_nCc?zHppx%Z=n!tLYfrfgacqo zOZwdK{iy@meWr3VMA+g1Y0cl^W$7UaV6CxDE&?yL*v6I(NOh&GtJ}SuQLUSJP9Wtx3PO~`87dsg<&${8Y&3#bM zo+=D8l1hOiCFEL#Q#wkjL5wo`sep1G9)+`Mwya!T4;`rz8=VJ_v$O3V->(I1%?{dw z)%dXWHKF$hf{^K85jc1=+>lHdu!|CdVn6b@*BJ#zCzaRE0TKZ^zWZql@tk1tz7U2D z;+(OM{b>im789=m_>Lx4TAWKeq>_ILj`s1-p}MxO$ESWi3!u)0SreXdr0>q*8a5_M zjy`O2II^*l`KI`EMoi$2i>2jeu(LwQuETEQE~~qDa^>Hn#$55f!aW?@+;tiIZ6)fc z*HKAT0&p}uplA@`Ue1xTFqL>jJ-W??3`-$nICtW-{wUxs*TR&IyB5!ex2t9>P{E9@G{--zHe5*HT*WTFFhU4D9cn-a#RV#l zeJcf1899+*)P|5f)QPe=>m@Y4PD~9S&toFJE03jI5u2_rvI9@};L}?mGtRz-4X*6| zmY1al%^FaCN|R~LCTZyE|M8!bPfqy)jQOD4{xGJ(=Z74d{gcHxtk>?4q;hE0p)Z~| z;m|XNxcEac`){KBk+#YM79_~~xb&P+g&nc1qL#gV=T1mcE!`5!zBO$D{pE6hlqJH< z0Dis&D=VbS&xs@z>QQO`$OEqh?n?Bo(oc=xuZcg$jhQTJMBlv94ubI{Fod`xad8T) zG?=yb`=0Fpy(5ESX&GYOi)*~iE*H4!jK9!frz{ty7in$DRy za|==4{BRjCJM5I7QB)H47fREEX=c9c0cqy-hOVW=sKkYq-M2*} zp(*!2rax6rA4F@|v75o0+P%q18^pgYriR`%T0<4#(V_WoJDueZF(k55f&cdDbZ)ZCB<9Is5TK)*zX?A6v!VVlkR`ymAmZ zm#hh@n9I<8{(=Q6Ztd{=Kt;FE5M#XU1o|f~BXPn4Ejs!HnRF`BD|Icc+jdjK1GNrj zm@0Xm4E`t3v_AkZ9d!JmmTlUMVL8vmDPXt}pFYAs7u)6uhQHqKook9EQthu_Ph!w| zn+}~xsX^y1(q*eH1nIuB5>p(W5l0Il(LGL782EYH_C+h9He?csswm`|bfMyQbavJ{ zLgOM2NIrMc12v^_;i;-$~Mj7E4*vCWzwPpj^{a^`~Sa= z`?&6z3;F%N-_Pe<-sk)MK4CKyY2P4N4*57iw-vq@M5?jYKOE-ND81+8dR$o817ONI z^Dq?hD(G;_n@Hs~`E@7Wm z`-_*Ch9-Rs`ur|jxsmvV81>MTNr2eEHJ&=C2NV_NV8A;__5SS55lmmo_AS zWqTR%^PFT6@XjmYhFw-!85_5u^e?FrVj46UrBt7e$5j4;+_8~Fuk-Ws=g(b6ow!0u zr3NS&gZD*Og6&lFYriZPFX7ovsSysZ)=@~whAf*d#DZV&ZVou=nHj2QQq8EV@tTAhB`L?|NJStY#x__2t;_QlcpxH4^-&ff8YDn5St^qV&31x3zBPorbPE>~UoU;n!-Ef9$uf*$MzEMCsHQgbZ;6x~F-_voMoXA50f@VhAs3BUS$E zJxRRScTNUx;X67gbm`Hf9p_Scvdn4UJ&iVcaSt=`CEq|~sc1q2C(%;zrxag+!h>%& z^!)Vvc4*Si&9XWGKVdgbsYP8+%tTFb(m)FAyq1{GT?XC8AP$W}jV&#RY+w#}N-h<+ z=|l(*!M~!B_JE%VBeyMlh3_RF`&p1*IkmN4&x2VHF?_Qxp$e=pJiHAdIAb9I0kBTB ztq`=C(XN6o)RlO^-1h&|=l{Q4fPeqT#uf$Bu`VuEvR?~GV$=X6AIc<1A(`~#TY7Z; zP6Km@!X0GI??DSKF?+LWj{M}}Vn;FE?zWI?=5`0|+X zOp1JfK{L@aGBUF20ow*Ay4|4%Cc;RP*>eWBok)J_zBG(HU*(|p^PI7o$unZ9>vI~} z{|HR+_$}xAurjkT#tvdICWw2Fv0wej-pbx$J{lKSUERVaVsRKFrq2>7-L73%nF)Db zAwUgcoOQj$V$AN4W&NC*!ec^l9zZ{DaodW9G8_!;v3OX~y$ySX2~EJ~cgFU4+$0J0 z6aU?J$zsFJR~WQY4_GIm?iPnVePUAVIzLf`x*{HqNM*SXnu!6;g(KF(4rSDsPlW11YgIzG{zJ=05xpyzm3} z_Z4cL?0N&Ov~j6Czn~j5=wj<6%@>`uhtnkhpB?Qjm6GTx?(B-arR_CipoU7FtcsxI zi@NOU;-WuCJe%q6;#>-%mL9i^@L5psH}BpVpt-)7!=mG0w|0xS5EY#A^pED-7 zY+$A^C|(2E`>!!c?@(6&aR*iQMBtlYQWwNbF1c;_MHwVtaM{^ zboAj??%UZ!x59ITVA{w~?z5+$8}*5Q*&Xy7wEJ$$0W9Ga$Hi=(Mxwd#Nn8SHK#F_! zz;cxD={lp4Hz^MJNvsnOX3iS~Nu!nQ%N57p4I)e6x!PZa_thegYuHE%?joYo^Nj92 zdgvv=3ajF%Q>j-EpC*k!|EWb2Pw`iS*jK!)V)X|7NHe|Yuy})1{dJ(_|?$;3#;13?7PaF6Do}lZkU|km0@OXUVxVCxeZeH2!T$hg*0q2rMXt!dCeXu z8dZT1dBl5ATd$B$pFV9pA7}57Wf5~$5|}}$t7l|hx|Gkd^g;LX@)X7T14Os-S1Sq= z|H`!RRTvM>lF`C&q4vmrnVVokNj1=$4#m*=Fwx$wSzz{da3FvfKw6By}CB-O3)Cx?0o+c9ys@-)Dg*6D0Mx# z-8CbL>N=>JQL{G))n84JUSi$uv&V2boFKF*wf0||Kc_owl$CzqW=0nC_=d8WWA%LS zOcnxFJyqjC(NoBpT7@4W6JjC}m-CC${E#LY4}hqKcwXba2(Zw+n|A@pZ&T12Ou-?= z-<5qGHDH}3VFYp(M|WVhQ!-8Mt$(X&f)UKqa;Osv!;+mG`t~i~>etUJ0hZlJQhC4B zeed82$y@IuMTGE{=IzfH(6wD|YI8<@pU9fw{@(GVa_%Jh#g{cpRdep6lHySa~2|7*F#l;Rxmp3FP>h(Cpgjckh zfKaE;(Yf&X9geZACk8x0JAol$9tYa9h|7|MolBAoujN{;cA;bnK;ZIeT*ID%#P!H2 zXqTl<@-t+>D#q_uR#DAj=XhfNUu9(t*@KH;f3AMrLXo-%aaTBhx7iPjvUtKivC|RN zf=cbb&j{Ea{x&M&C)0-qH0TG%5mCg301#c%Qkd10TqXNqbJTL?PFpe&a=_JC-xp+D zYS{(8G3@oDRJ`2stVqJlOW$SRr(<1=R{Vi@3euiudmhGKXeFRd2B9 zHT>V$`1n~>-`UOPW^GJ#mNC8iT30u>d@u^t^Xvo>UB{~ylBf5A1vghrLEVK_{B@Hw zBG4>`^}}W|`;Cx2xiL)4a3x3#o^feqe_?+2_00=@iQ}*mlz{DyI)Y5?+^I~F|HTsV z8Q8PCq+9<|0l%P;i;o)0{X#oF6oS76L*m(#?ayr6GicDN>RYs7hwCg|vT z#!sw0Q~nlDZeUxA>J_D4(|l(xTe?)Y>HJyE$j5zmw|%&N?D0ruPCx-|VBc0>-VHmE z%@4uo=5C*b@12aI{wtrbSR(^!5wMt?^am4e2N~}E=JdhEcQ{;|KJ!9&(q{66)sAyF zX&QsU&B`W|*}DKj&#E#ymtD9l+QhQeQSI+@)?ExJ!Zx@3{{3Z(SRv-cGaI^X$Bs6z zl_vLDv3$9TKgGE>L>7~yAOGxjd+u&VTHp?@>HAa^l}261_6E?z{$xULi-6DcAfzG# z57y4U;TM&>_sVr}5}T-`RK~3T_Tg99n`K9t73hc&rA_B9UGnjf zTUSZor+W)JMx-5M;>}1*jY;e2_K-5gU73gJH~Th4A6KmLS0N^`sx=ZK{~#uJnOh9L zT6}!WI0UrAcjEih+pk;_W+kCWB8h%4_=RYuXdejD&Z;nT*{_($WU2 z7$i4|87Eubl9WyoWsz6auX%)<&cYLgAj6clr+5o|*BeQ25d-1QzcG zv!^YSNOw#X<0E*5(UM&oGZE|f?FHH`iAlE9W_M{G2O}U>7!j;8ao>%BBs6Q|Lt`^0zVC~sdc#HxZxOxE4aRmnUT|{aQm55Q`pXw=#L^3 zx0~d1cqS!vdf>lvGB`4d$HX`U+hr(>vwZP?mgTNpeJ3PqjU7eEWdPCJQbH+%B>Q<} zTwZFxN@h3A=1XqnQs}x~FQ?AS0ToDy>QtFY^^`WxR9|0#6S91oaLjQkNq1owgMN{;oYdJH95pv8N z+L|xoMdt%MU;E@Z-iXYPWLzEm4m;HpKE3h}!m5R(^_U$adP33Kw;0^BjV3%t3?l&>T@8gDyTfY!RkCML=d-x?n>PLR3x>vOTW zI<1^wGTmm4!rwRXbw4w?~*tBJf2x>#W5k=gJw)??b1Y0=kj8;w8t_#PYFfke*I&!@KPv3(WA&xxny z7E%n`DTis`%Nlk8(~V6w#obzPb1yicdyw6F{ZhwNbz89q11<4DxnKWRW@`YoJ2%9G zo~M=epb;lc=}-a~QvP3*1)vEnZSCf9$=hWlJnVmc5nqqQ9Y*Dldk?0c;Gx>$L5nM; z+nZuykFb#jG>F|W#2M{(A|C}8kA1BFY=XL-^ue&1$6Ie*3*S!S|L4h?Hy*ag0)&~O z1E#ewIL?Sl>^tli(AhDyO&B0HTFM%_Idi1YW+=fKEsWv}0-8vNIeGG0-S$z{?u~wk zSrdV`tteMM`vL?JhrC_=u;Lv9fzKug}ufm%56(;KcOhjZ6+qoIQb zyW~tExKc3~bu9owcv45-n+v&m+EDyysW>e4mr&b+{m~aWj{88vCJl8@_VSKk%XX6u ztn|MB)W3YG&UQcWhK3p4Asf&m?bxmr4@sa8z@-(U;on{h8c!)$NL`tEAmozC-a7KX z75J^P73T_Tp`^d|RMn|(sp1sNPBCR?rsdBup|OYhU`Eyb;`x8};grk)bvEWD$!dB9 z*@4TH?P<^o0ts@-pByIahI+DMVJT1_BlVC#9*wj=*6+WvRN<-3*u8_}6u*XiIR^(r zOiFg|1(CKRBhLGkt*lXu8{S<8<&ce9mm~obq`n-J1w&K;{>vZYZ!+4C@@H>Ib)cDXJeCy{)vwVEu3FlrqOKjqEDdzFJYH2jOzrj!0RIk9`+^RNh zdV}SI_$d9sBY!fnwQx*;#?&}l%C@{HVF_PWV_g~joQuV>#fvvE$oXnopr?I48(2PV z0_D7*cBnWUXIimF5x>CDR#D_n3XB+V2%8O>Kby6BE+>c*D&#{;OGOQA6|u=*D2)dp z>&g5ttSq;+onr9oXBZq`HsL)IHR3dex1(>WJ!Bj3Cc>@G>F6TI)K$Wfq2yTL!-()m zQP{nsnBRT-8_J%Tr(Ej6B@xqP6T>I{F5&SPBc^bogAD1#VrL$*)h$nQBx0Uoo00(yOf zi=9eWcScQM7?C}oDK*eUeEER=1mn=97KMqO*E1CsnuWxgk-XhQ@0m^?9h?CI#vP?O z+yKOPH-TA{n{73aS$lfPQ5N8>j1JmprP1)vvdnvhONaDDzx&YEfDR;1KVaimk^|JK zI~@^aJ4PzvhGck>r#>i2WrXL~XDUlf3+p%zE?uqhMiw@s`k$ouW=06h4M=7}aY)zX zgw6H$e^NaT?iUmheolfhEu7@oW8(Ad%|}*x9O7k>raTF2ZfKrx<8F+dGnCT-i7{s1ONX*woy6)K z1JAY{rhwdHEBQhbJVyaW+3$BjlbA?sCQy&^pp=0+`ZW9m=9U!xt_kgNAZNsG6Cjuz zqwz#lzD1$*k^Vd;xs8v92x0|qGjS`K(u)Hr-q+`*{_=}+sD;UxKei5hSb7{Q!qE(u zAv(V3h{J%8drXqa3gStoGnKzx$oyZ$m%NnRe-8#Zqb-lb~?K=i<1p#d8Hl+epQs^kFvtZ)5!mG|JoWG6+9YcY}?Y7$Fc-ymR_GRTbWcVaevKC`$9CXCOM8 zwR*{E6E73hAiz%?qR0IH1h8hv5H{^8ONGu2X%_?_LXm%M^{tYUFtFM^%V=YlAV|fr zUuQXQ(amJ44FBn?+}kt6wx2okz)ORSAjJmNbx9s|6!C`$x+N3&og%tj zG_3RzlD)ymX^q|Yj#%3j1Yp~X!O}G6iME`wqDJo%zKq-``x8v!5fWeTxd9?4>!WrA zuE(1IJB9Rq2HlTe+xv{1$w;26bK2D0;!fRbsTlY^R*Nq}WPm-dHQ zj(hZ&cX(oC|BP)=UTQrNTRBi2uteKw{UA6@BEIS)8?Fdgxm58L<#^n!YX0)_tcDNW z9u9`TJvdBUbjwI5W1522{R(ZmCY?=WD)Oq5ony3FF$uU8DHu^lhMpJ zE3%9p*`U&7w14_ZIGg2A#n^93aI~--MGFkwwle1spatXF+v_kp5)b=rubwZ;{ua@y zwQ|aD5=R9$4($&{BOZ^gM`3at(pcj*W{hU9UO##LT~QH9!czVULt5!5Cmlt4I7Hsm zP2_7Vg^VXmS5r~3BjQ-?B0x#X5+it#cFBMqS7t2z>Asqij=DbuvD2bSJz}JX>k~tUC3F@g{qU-?)H(sIg@$K zlxTVf1J|rsIwdAS7utY><4XLpG+;;q7SB1q4tpm2HT0)5plv0c--{Et+{7n?-M)nq z7Ui)H*JH+SDyP%a)4j>lc8LUiK-I1d+(2mRF4k`)Uo-vLSBHIB((&Wh+>dGA>+*|M z>V=vT<^a8j@*3lRT<}1NiMMOpMLZ0fVKVk*t!?af#K390pO?v&8aM^l-~GdhmGueA zEudsFA1&or|pb^v;hs$mJ}F;VtCfyg62J*n?9eS?a6KRTHb4vArQP zP`jO5lI&|{cMU{Wa#6^29uh7?aU&Xq3PV~fnfRSWtwLXJ$>z8#S>j?ce$Q$DDe zh%cWfX2-Z?z6mD?W4xcEyr0~hck9*`PXp%iV;mQ-q-Mk@qp`&A>bknC@6i~KUD1)K z4w8>}q!oe4d;u`*Axg#XXqYuf=hKdn3uGai|1c{|rAaNpj8C;&4*-D_NgV!+xie96 zg7WGmh@~W@o1rd}ebQBBK?2*zCPZTtF)RP~hB*|j8WTl4Lm6acGnr+0t|pLWl<9Tm zcpW8%@w^_TL-X;$9(*uT(LEMz_PIAzBDa=#=@@071oyy}Z{NPIGC9D|hfA0n2$H{% z(&I~KD=#Odn##&bftPan_3QKcu{arn*gsh<9X^#}a04?ZnMQQ$g1C#Sb+S)E53NBr zS=z|ZC+j5{SrXN$XxURkh_pfwH>mqhdj_?mt2nowKC?P$x6HH*!Ll`KHK@ zdh8H@m)`%5_Pds(*tiH=6K5opkKum*tvkqHH&tv95XA@Vry+4Up&e+b?U#3q1Xj&& zA9hH5>wrY{TwYhEw}ria@XY*q^X$2UYki;!ahK*^r8>CYH}Zd53jCK_@IAZj5dv~n zG~Y>x+YTJ^r(w$E0>v3$X zOduiGfC_~iS;+qk{xWI6=(#i192;q~#f$uR(Ev1BAF#Sj+v3r zK0)EXT!2bmbqd;*F~jyzr%+ECjc3oWim2b4$qo9sB6Lq)( z5(#T{HxjMmmdYL``shK8

?+e?2B!?yMDHhi7+3U$%1)!~uy7H*_>MY;~pJuP+~S zMsIgSmKTREpS%nvuVXar;&N4>)8#+&bjp|f1w6img){tps>bXB+c^6$hK}ACJCsN6 zvzgOg_8(n-Ou7@fO`Thea%baF@Mmo8y(7aKx0$MU?$pWE?!;n^JOu;>Vj=AH2=5lb z1X`&Un=Kh3k`{+&lpYA{c3kVfx!1^^bTInIHLVART-tu%z*g?*Z?9C_Csv#y!-G&P zu?XwkX>T3@SszowK#PFJ*LCG^;x-&PgI>Ctk%E}9mBwp&cG)8Ac8+`X^D(?3p%ZQJ z$_;5#fo)X%Q?CBtOAiU{RM7&3T_gnwD>)YmY{0KH<*D#e-rZ8#I&rZgTLEc)a&sO6 zAfp=8;Iyu3!?-Bq4Aup*E$@kG7e>@zKN{x>s@gBA3^E7IDzn?Oz7df< z&p(o-FeQokOTWtk0xvJSKw*l~q^@g0wPA@l#T_p^7>vhm<@E{5zcz}&-~N&7x6o-> zl|eAG69DppC@0Y7&SxW72Rf1ct8uL%1*A59W`H7a(UF@(c1mzp-$_H?qqGL%G_Q$-c$Ovnl_*2%|iDH*ClD?Ta()@R>M}I{cg4wZGwO zC9xckN2D=@ODd#qYkgHrLZL>o)W?2U{Jwo-#l%kxF96fxFaF6U5E$NfX23bC0&FY@ zr{D~u^MH#b~qZAG1cFr(8=&x^g5{3gm?OLwbHG{lZTm?=Nv1iW` ziW2XphU%F8M5~REPpbKqVL=Y#XEyvOnoK_Qs4d{pl7xfDlRKd%x^8(v<2mGYZ^YG< z6Jm>SvH9b}MIPDfhPT^Q(Jk&zwm@rw#6>VNy%UTkO;e&q=?WH5WT3=f@f|)Z_6*J` z=f%kl@|oUVCejcSbUbMHEtnLs(`q}Nf8h56luV4?po$s zgk{}wq@V35N(Y`7D%J1WS*yXwqWq4NgfFgmS#(f`j;Bxtn7g_mGJ}h}VhFP#j&xHC zz(>`aH{<%unm=EifcnUW5g{xmT}qsiVYF4s_)u@t$dmcj3ReMCfM`DX)9oUT2tCRt zqoF!*b1%Ouk4HJrFg#!L<&;U23=?1c4fz95EE(3s%>lGqvWe#Y4ALdH-B15-IN$Ql zhgC52ON~23DdGWh=2G&_l9CjN88XgV%^BC<1ch8+XyzP!j|1s(d`nmnAJ7&T?4Q0ct^|!w^ zWxlEZgk#v^>>2O979RTj<@?pyrSAuC<^OEdkd`^&i1=Xz-FWvWLi!Xa9}vxUi*DDh zvh`_|Fa4AV9&)pEt+h{25J-~MO+{aToubB$hHb0eI*Rcv)CyJZ#nmmpzy)Kh+H?hg zAiAk3pmP{w3aOK_?{kho1jnx0#d+t@zWaMEMy|5c+YvqdXl&;#`w+%uFQsueNk76s zq({xVK{PVQ8s!cag2cBDk8V=I`tZ5R+p2d_oM6c*se;p7-f;j9A(X@B;~B?kAu>qn z(XnHJ!>EokDH_O<)n8YwSm8`1S$W~($CV06oj-x7!;#jX&l8WJ1PnPvCp;!2S^R6c zf=1WGZwD(f)%VT(g1BLq5BhG=Z7PZgO5G7JWsMbD`0tH>t zl-GLNGz#dpr3V_`cdl?WPnYATen?pM{EBwc&mc~ZrNMxL;T?B};EZx6_K%KpMn5q- zIOls@9Lp|D9?qOP)$L5$HBZzDVy>kjMup5Bmih5Tgmkwtk`_rYh2!I!rh`oi(k-0omhQI8BRAObet47}gG+b+picEy0F_;}pBT!*-LUjP{*i~a zC@*9d4rW1i;6pIs>fAsnL!lPX(nVmoE&Ip#JD!D8OpYX_SK8trdKHp)dng>^ya_Qn zqhTO>o5g@DDsun^Wcp^ZFL*;k$Hlwe=%j``^R9{(lqM7XJs(*f(aep z)YY{m^Gf;sSa^YSG$!vxvV-N#%S@McE@OR%7wyUpD!2O#@8b-3yAz+3d(@NKt0UKoR#i zkWseO-TjXhOtI3caBAq1BpO&gPrTQaW9%xXA%AbfPd`XXcR$8=Vgjh*@aW3gXy$xw zDf4_@&-nG%F0|8(9^K=bs46mVx=mZdpWx)h3h&DS(Ui_u?mQ3q^A+b5?W$pX71^TF zdY4!8`tCvOS22#n<@Mj~ls|Ju3Md4VFdPrTi+6>rzkT?YcmY$mZW*JGP^zldX$nV2 zrqz&kt1s^b4dHSPPQslhipTcOhB-z~etxLyiHjF^Ka6v_Z4j|ZDP!f5CE`GQt!dqt zMpeS0xr69UtEzGeXsB(4Oc?_^$C%UWY*I_ZFC)ag>D=|_hJIO+ zcY(E(1RQkJeJtlKSm4Yr&f4#_)j3G6GK62%dzZ7a%9x2?59i;|J3D`zIXE-2|0MY8 z0z9pMBwpZnm9LIQH}LR5$Op_u3y5%)zof&=;yu^Cq0ES5!(7c906+npT1#HQWy@+5 zUOOUC&^WkwismX4ipx1}1?Jw@E#55T&Tm`}!`{OwvUE9XX@tge`WacJtg_Pe=+6~< zqt+MnoAg!#lbHY$gt6f`J7Xo=_nDpL{*F3Bb`qR>UI{Or-Iq?Elqn5?J(dnO zVF@g8-}efG@n~Xu+D>sU=in)<#bp>=MlNTdXxB6jFeu7;(_zZ>}us(QzwJ>d#XECut*dmC=+r_oWXlW$tvPMIsef3rQn#8zyIrBkCt+f9Ah;~l3h@_c zzKZF)%34I^hZBw*J}g3ROa_J4cX~#6%Mq8Zd~_E#H$xjN?PH+dpD!(VlB&>KhDnf# zPi>${wjVuuqiZo7U9Q0C{e`WClVK)1Wtk=ab{;NqDh|gXVZ2NV&-gCCp)pTD5YXx{ z0SjXbkN$m(jh76)$#Gcx>CN;4JUJ^IgiO=L)iS}%8#86yDPUwrgAtGQIgss(+PG{Dg zb%bf5P}{`MwRm#Mq#6g$DN}r?FqGOs%NY&2n)oNPjB)c;5?omwz~r zl{eUj(si*BBQOi$<`OBS4QJ!LKc{vh{Xyyxn`O`3xtm&%S5b9gXmbE}F*i&WO!B3T zqjr7gJ~Jyoi^3!5cOJ<`h6eWH72o%gmi=(%Vhesuoy>Bd&Gvk*p5n=n4ui+|pdLCA zU8wE}=FXSgPtU>Rec1ik&z?M~g1{d}TMh54i4j<)p|`5>tIT!f4XfFUrowVS(Ar4- zABGR_0V-ea3~Mlc=ndGNAQAlFLxe<}K!?4p3lM*#$VMJ-dO{9uk&M+-bfk>uao5M%GX~tZv>%4W-@f3DE_@ zft>=%+b&|O(I2c)%v*2QXHpHx68q?#6mZ}I(NW_vd?J(Updx9XpOg2`b+o4op5YbD z6mna+<{8)Oy;i`q0z-x6W6)d16@K&!ahZ6OHWdZq=pHBeD)JgyzJJPAijm0_Xp@-P zo=xnDHd)9t5cPf%-nOt z1+XU>ZS%Qe@*%1m-FzR|UtlKwK!c6?$k7uTMa{`y+}z&bM~j?>w!GO4NDGGA`A7%D~RWfAsbLZ04(+ z=KZZt&RQfqcoAI7%fcf8&?M+Sof;6;u|rP(Q9lPW(J3k>)sMOM1EZ(Kj9WV&<|@j% zlcBp=uplA9^97beH1gY)BWmxRQhNP*9{n-iq33 zap|BaO)U(9<*u4tC=^c4caozS4F}Onr-!gF|58(D8h*X_fEQ4BO!dD1&#QUt-etrgh`$pLX&`u-_TWwuG zoB0ZIyW`nYP)OlpFeY;YJUJ%6G6AXY8xb64;{YX?yAp7Lc&MqZoma=ma5K=I%>=?! zF1|8VZVJ7$QVKP>lm?DPsJ2bRfRA{HytIFlaEN%+-q^Q+R`|B0&BzGuBn+*+!wjZUbgyQQoRt zA6_sji5b-R{rG#uFbW;fhq6Xc@9cEC0gNKcl_>XXQBB4yQA~)scWmyY=)kAwGy$=# zj^g(y!FGGJ|D!BDq5gBJye83D!qZFKWn*Kb#R`a-B;{Q`VCc;;v}7A0eEt#3e!qNL znx@}BG&ZKpzdzvnH-Qg1UH+lbflWkoMce8#&DmSDYyPIfs-|Hx8jIM=UC))4ZT9NwjF!hO z-@9TSx|jAY-kzL$kXm96BL{Ug0_%tMQ9MPj)y>(X1DodWIgnsBf|`_6JfC3 zXiM02reWh4#xnhdahFEkDTL$0ouo>>b_Jf=x@Hh!0C6G0RpXU zTZi64)L9R9axRhV#_irmegnY$7X_Tu^8Kqt;yx0fV0(Z4!*Sj_U_cd&6TAYIIQ`Vg zh%?lAOiB*4Ah0lu&2}UtM3UAU9B9~kFnp_N5b}>WmQoFBmPM_Y=Ws!U|FRck*C2WX zK!YhhYm@m%r)FEtGu;-ZVo+m)ybrJ?rnnJG80^n5e%1c@hE}Zu$_hs+YB1D7-PBox zfP|GDG8+$0S9w&EH=Lb-46coD41sYeU##i~`~hO`Y~wYouXy$FLI>~^jhzky2k6eE zdL(z-BxGK^=s+*phIXzEeWRc zhF%C~V_E#hGSv?NiP>1J?AJEy)WD{a=)Nc4;@u49kV9)yaVKEp=Z!tB042hBehN+DUY75*aimoi zh4p(}uuW5gpAzP}H)(bHOpk^ChJ$i|m<+IrtL;Hialu6R=cP=4SW9mmmiN)Q`4&qX zXzMnn)>4oSjOg;f%KPMFXgTmVM3}|0zNhKv8ipUs!v908CjgtRqDYIKxD2z0J;1?V z=KTt5@n|En#RhkFwDVDqgMkaOqIVY0e+?q&za&lwk~(5h9m}rUJ`5i|56=$z9KsuE z@7CAU)Q;{&okS-*m`P@Q2oJm(}phQrDF~1=5dCtIREzQN$`Q-A74`w zOD$}}o*ba`N}?nvUxR47Go};fLb{Mo9CZ*a-!I!te3q3_@&vTNK3a@3NXTnpW>r*Z zHobXxVe3@YwLw8_=u#%mbpFa9TXvi+IQjkSBeejAPENcQ{wg#+Z zZuP1?eY$wL?Mt9-!a87^-N(`G8pz~hZI-lhXmiPQ>2nI+2I;oP!;S1c)OJ>p29u#> zKls9zMhGlFl`40cshDK|st*$$2;#BmyYI<4js+6PimuYjY#o>&7q$@FvVnfmpzy)a=B`aZ%+>Yp>PHvvNs1GB2I z@NinrcEe8dBE{%krgu(0J_dTbLF%iRG-!0?f$Fl?;nY0xo)R(u;4SJ zn%iycx`#m7C)Y`#YF_QS`M{$8aseo9j7)wL-(3E3>sef&5U@u3Jm}jZ9#T98tS;bc z(g)(X=x|;v#RUQdu8j)mJs;eZJ8W!*Zf9!?DT&X z2qv|niJS8?vLj{>LBHnRb2--}s%@V<7@WiT_B-ahd<)6(O>)+#jyf7 z{fXB@3S}|(OLGyvGjN+~7=>TI>bH?d z_c#jivvH{?fKjjOBA*?Hul?c(64iOX*Sj_wgvC!gYnB_t@+6TeW<}HUi2rZ@eFV$d zBk1(S&I|z8Q-j~w4YWDNQ*ryIksZ|3w$0TNM;IQXyRA=}XNn5lyX2RKhl{ekVH|m6 zE;R<0(CtRhgo7PHf4i-4%hs(|x$qtdZIzXM=ox)G0d7^no^m)tju8<_WoqpMC4po)7O z?=DX5PkSqFzsGhl05v-t?ii?7jp%3{#zYdD*DDjq6naDC{j-qLx z(D?ZJE+-?~El}L9*5N4?@7uK~0kpY-Ptt!sdv-MHqFJn7(d7|T+*`*Aljvk}m?-67 zba)RCb^xM_zVT>!cMcxuvwKSOr@WK@klBo86xi+4?W(id@!!?86#!!2@_%H31BPZP z`z&k^n@v6~yNZCwwuoNs)Kd225U1YRfZyv67|{D^)v_E^gj!E-F7l*@UD=)&vO#7vwq1v$-vWwCc$#mR|gcN)=WU`}w zn3G0RKPemkpa?2ooiOn}#_?O}R~hACwq6|EGmcmm02W2XVXS;nx+9?+i%3qE(j+Yh zK~`zs-fw9T+Ze>o_>26504@cMl((F)*x-IVJ2A%S&3;2|+5ZMi(-5e-AUQ*`&3UH?N1OM*y^{BJM)|i;>4b~Jm1r!ezlLrSirG%NRX7VJ~bFwPTEdi-E zn!Xts4A6%V=&U1?6mQ#ARDaGtU1X@68@^LEXXEluqU z%66!XANhSfHss#hq$#3JvKuehAoH0p{iP7}MSL1uk8oo3?}UN&G{>?zAiSJOy?FE} z``^CP@v-z&b6{8bl#l4pcdr5y!{^iy`08> zy?s>Npt4@d4old7c=YLZaEhKhY!{bo%v@IF{UYp(C$ip#E=N;0dgb1^lSZyF(uV|ll3UAy`)GhlddY{toqjD&6rWREC~5Wi!>`Z_>(V!BB_G5t#% zLZWK$E*QN@&+v8V!3@^FzNLQhNzy5JYZbqy-C8HR`JH;{6Y6{R;1=1K8vPPtE}T0q zRkSY|1@(?~Vi8IVrj}_Dys3A3y)V5L@7aMy5GGds`Fo4xU= z8wA4v^WaAHoeu_DL^f0n0^+b@G@#vz(l9>d&Til%SpyN)#oS{9?4k6sRm+zb@I?~h zk(==pi)?~00mKeXo$_(}UYQGmh?zSS7^`J0fES_h565xUTm;DPKg6SDm{N!otW^2eg6l^>T z{H_9dxM@1bs%y)~SRf^VelZbw>q(K7puU8X+!Cs0RMrAt-!SUr_#xv9es1+hR0rqw-Mu}i4qD{&8 zR{t!5)nycVn%)1M=#>foaj(xyhODx`j~UWG<-!f3rfRT>JlM+W;8ZM_X{z2TO8EjC z0%XanY|Exid_J?*?>L_z2u}O1fuDc}UCZfxbepsNVw5uDu0{WO+-LYnFAG*L9B=K3 z$AZXm`1~&Z(Yoxo58lkpr9r!9D_wJL0N+YDl`xS#m$Z^?1ZHW>Qtg<~6|bPF5r$bA z`8J;@|M^p)(vgNU(d=OkQ`Ip_u%Iy^S2rMa_tnSSWA`5LB5BDsexP5V;F~q!XTAm{ zx>XWvFEJI`<;<0b$gi;<0PWDh%NbHd^n7~v zf$-)RFJJC!u6g(3$ltFHLkxJSHSA5yrF^ZLDD$oRGcK!EE9~{_()N(FNhiH6wWcN) zeC-ZPkrm-ieg;_oh^j0Vb-U30Qs2u!7N$b`hbo3aS~HPd2$Wb~P9tOwp@|(=-B)U5 zyQTCI)DGRRLOc4xVWBiutn*dK&WG2^s&ez(-^IvRbtV!7pB}YrKDMN!Zcl`u+f(>WXG8)K4}GHZe3Qx?ec(lRfFkFpW2|;4}Or8 z|2UP#fnrBURBbmi&o#3u`*q?(hwP_3S3`h2kQwtgZS?eQRX|dKsOu8jv*8QWS$zxL zg$Ej9I#;>6hedCx;~g5E z@2uJ`S$_>>jtUUy{>`w#79NqfI8=M&N(C%}#N#MR_ha@cVG9;4%ppCeGV%B)Np>-i zQoj#N)psaZmm1_!@RtGBh0askusWsSm!XC6Zd(zE65hYz!z#qf!rV$-y#oueGpX%gQV zEKE&ilq!I(s;$r}sGUvP+}^Ks`bmP-ETL2@zaq?ql*+CW5PGG-1P?LogCM$uRT*k) zNlr%o!3EpTC~W4)J%4!NY{AMsy`~-~EyH8B!O2k+W$dI&EvoxrwA-LufE0s@hdwQCS-j|ONC#G0vs3*%V zPCAKk5#X9GTzaKj>SvcAiJiH!j~T^A^iCHB!PH3hy!+Bgw8ixPSA8ZZ z4Caxpj6J{TG2*wyX}4g9i25kLnh^|*@u*$uyios&lwjvK^xd-Q;Mq3F0Z*Piv%U|! znh&FCTMy@h$r8q?N0f67ohfA*S*kSUnzw=9rrfS? zhyHqIN*iH_Y05!AwTROUfDuEgeLwVOf>mn@US0Z`r}nhPqv5%#-f|8$8*vDytY@VM zeu`L)>`^#_v+RkJh+wV`5-0Tq*~;y@h7nDcrCt+;6IcKA^h=%Tqh@s}KzPEJEIH*9 z9UJM1-c0-GC;DC_;5B-keB)w&Q&DvF+DJNF{M84p4dSWAxcsXMgLJEah&RL|8Lo$` z#+~nrdVj-*=KE?I14cu7GZ`uiWWCaoh3=MI?PnfZ7o3JY1T630qj0_9%Dg4O(u4`p;8ngOD^bKLiZ#gWXf`i9j!>U4r7vG^O zN7uZ9X6{?LwJa(T-j3S<0|P~lPS7gGe~`!yMa+ldBs_L?@#6?NC=4=?B)}|RgJEzE zCZC;gvtD>qU~J>_PG^CHkiwMa5<*>ET)Y~<1RRk&u9yX_Rw@wnW7NW4oZ9j@nb1Qp z*(GMCs0WgaoZ%J&dL})UMOQR-(TrrREA@1Bs|arH*XS#ury6<)ZveKm-f`q$B8gjh zMy3LaiUB6Fxe{?{ZUocT4ECTlqnY)Qx$*5C$1g#>g(=iCQv}u|M`7rHn2xPV9zjMz*yt=3k ze@gz)_i!JFH}7h_j#RAeU$cvIPgrf*3tFy5cD`;x>TN)InT1^b%t_jLS-ep9olHPO zFw35h7A5{tbFBsrEI@;hki$%a`p>?Hl>cPMn?|W?KKy1rEhAQ66!~Y)^ip>rnVfHg zsGOY}jB^8wi;B33lbnVQDCmbf@uSlvo)e6TR4nx9VNT_Z%9b3M%Z_YY^ensBfM zZrrgNb5+`avi+xsB~szGoztUxcm4kTZzhASU7d2!P{8DsA@5YI; z!`629QX6uV)9BIZf9^)D&KYRm8o*r>_~g+2)QSU40_2YWwtL|-81dzK@@X^P4r0dsIwIT1FPrAgz2;+Z>f#*Cie1FN@EJ4cJZh|4|F zUWK2ad%zY6>R~(L1GbP?m40#fZ8`MGqGPnCl=0#vapBpKv^;eHBcOw||X+lUCMwM3_Ne=h<-uzknq90h5_=i+Y*BBCIQ$d%^ z)4Hx%Fg`A)4W;4EloeCy(x`p7>|jNCs#Yd)j% zMfCM99<#;&fSrzW?%^4LTcdda$86rKUTo-Pz~KM*%0AmEglH*Hxg|O4h2*+xC8ISy z=$>5G0_lK@gv!Aw0Fx8a=5!CHF06NV8IN2%8r{r+WcW~ak=T5NibB} z0p%M%9E8Ibb&HO9F&kb_-DhbC14SW)Xgr5tDyLpO1`oSx(4K)6yhEE0=N%f@wDALk zg;?)IMjb@98~qsa^<;8mlAef^S}PN;O$7I}ASL;~G0G8+%PtT;5#hdyA{HSgZR#qyX!38ZqCC7cVr*=rLhlA9=&(#&6mWGi3}h23v8Q!l&)6nd0TQFS+dLUS#A{(AOzL~(r-=jB=g*J(AW+4VaYPacE1 zq+BYilK~!!F8$`^r2w1apTO3Nii;t_w?7&={Xw!RcOeI+qXGwvPRg$5?AGDU!#};(9NqstM?``ox#M?M) z4n>96)zQ!UiduwzJhipgtXZKlA&5blE8Bcx(vjA*Jx3$50UC%trMcBlpbv;6^>6g| z$n`o1mH-$O(afb3@pc@7sZM0;cAvOaCb|mC0%~TL+Ly;NJz3T(@PaC)$ zosUyu1>>I4?h%(pHjO>fB1egaUEt&&UVzm7_3HfkmUS5!897(4Zmha;@nTnD4=sE| zfe85Aw2P8x zf{@XD80x;dcXnn6vVvL)3vw_)xY{X*Z`OxWt@75QxpVEn+$yUH#Wf^1DPmvDA#>oX zm+~i?jo|F>U!v`KM7>}yH|sGy9(ZH~ zz<>$vuf{u`Zv>s+##kpiEqwv?*rBQX_ygzoU$Q|*Jv~)6BzLtIHWCP^Y5)GSy&Hv! zhgI&d4){X}z2a#v5=1x)$4j3u`x=kPc7Q9ljfr&Ldo4&E6nR#kHw`sfcVO<*%ao;s zkfXZO`FYxLTMr@B$Zg=Hicfbu&bi$fAoWT0$7VLdwT>t$I{a42L^Q!6%0T)jOr3f| z@kbsF{W09rXL>p>mpfPE+P#6wByEl?x1idsOw9+l$0E* z*2E0Td2!Yx(nPjACeq4kljz}VQ? zZ#p15`$G@Q!7ohu1m#J@@zW!ip8rkg=n)~q{9+4d9kUvP*x!^mQGINwA zTu`qo7Fw8^CVi-2!h^-%t`^5fhI{AWYO;Yg5O+$=20@|3y&#sE%*6$f>%)S-QP2oi zltRq9{dz2`n6ilDPnVnQh@lOuy^Qf`s8Sir2P3eGAL&R1@V)u{PetCaqq+}W8MAuo z-U+v>amGnLbd99_9BJlXgVVlBK?c#%H2gWnG9t}hu$Fp3)`mv!hHE5QOlcQ*+kF@H zsl$1-igWRohB>p*s*KUey-2XEVavE6-EDR5n6`R8l7TCXsm}d**nHgNoBN>saJ>Wk zP*E>_Rbln)5->$rSXgWjii==A>5u9CJ=2uH$rvR6^}oaf|78rIHb7UvOI}^gs8SIo z=9)b;`+1bM$$uQC2j^`nn|Tj6v^zn2v4S}lN|3{0ie8&3_1 z2@faw=!$;2d<-iNwTW#S>%?$@mMi!jcS1%lQProb^EcX9PG^BYw?Y0n;(G*6OA49T z3&Oh^a7jR5;3TC-7kRMI|ZXJSDC8P%&by(jN6Kc`P>6vz3&Tq>I=1 z0;#e3%T&_hngePZ#NL3?J}n26$3kHU`O9ttN~fH3XpZeii}@cpIVCmKTl)dqWCr3pJW*g{E_mhY`J#Th=c3hk#po#a}s|?a81*|6Ks$H`-s0q z&A=MSoG?MFgcHQhvfWoC&C&w}Wpm?1j6HP^6^R7CKgKaFHtpAMTUj3H&*G+o5D$rQ zhkG5`vu961*IGHnJgwb^?#Bvu&x9EUt5wM5&P$|Q>tS15}0YtOaBn5D>m!# znsD;uB9C`>1!Kok(E4w=uDJ~YkWs5{;Jb?OXv9d7Cvqz<-V9Wg2dtR0O{@XHMd3|h zj;{j)#`;e=;nD;oHfJlPjt3(Qm8Q_kOSd6hx46-G+%)xa-svv~bQVcrxukBOL-)8d z0|hZ^njU5*Mp5

`RF4WRiE(7lN)#>tnj1wiySoq!F$<<5Ji42Evu|Op`-2Qk=Z( zK^wzZ#g413{Fs~YDi859uIQUDw;wprn%>d4iXy2w`2QV$`a4|Y#nXhyCevgqyR`br zT_U>q&T)%K#?$3pcaOYx>sAma*Nn>?E@;)~hHJrCJwVCS4cAT01I|tV`p?auULZ*T z;fyJYsBt|unrQpgA1oaCl1mktkpYv%viW^oAXi;Ju<0bEF84@s@_5DQtE)c;%|0xv z1pXE@&BTqkGb`dP!7WrO;4(uoz+G*m1Q%J-S3`j`sEA!gwY2K#?n%AmhPLwjQ!-mZ z15;5xC7{-eb|c7FEWMIE(SAR3eH9?tJ>wr<^Y<4NxX`+GYnThG1|B|TgNqG4k*Ffn z$V+UBe)GuT#q^&-ugv#CVaC=RZLIpEnCD}p?Sb&Z`NfXCdt0(tOV3klctDt+$wpQY zgtd2>9Y7X8(Blfe8e42ebEj~_?Au8vF8wU+23Oz4Mn?a&z0%N7S1GmISs&VAnNvd- zlOC1VS~cZT%+_|hXLCJLCmMI_7Bf&IdE@Xz1r1_q(pe;L+m@G?Z$>|oI-Vj-E?xXp zx>*}n?N?a|udyS>rYo*tu34{nGEU*|WOCUux$%}T7Cu!CXdgvM_c~8t5LWKzrD!cB z+j@whoZRpcr2gDYSOs1N^d!C!G14&Yy~aAF*?HmRYE})-+~6Kk0Vt zSl~6w>yXURAW93^f7j2RZV3%#E2F4wCT6nvl~5RUnL=0+r()Ov5`CA(4EG;4?J%O3 zp9dYV88V0%*Vl<;)NGi>l@@pk~+>4HjR{+EnyzVPa};C6hJbB zJ^y34uldr1!V`2>%a)R=O~lK~8rHL+)lF&}BjTuuYNk9+Ez?_GINaQ}>u|p;0G5HK z*zALOhM`$s%3h^b)wX-{1DmtN(=Qv0nL(x`xK@;u1TwBrW0HV1zI)JSL~tJIQ)6Yv zkD5Jv>`R0i`V5X%U4x&9AVxq`TB=-B~&*{|3 zH-iURTFP()vA|WH%1cpI9!KlN7JYUPz#@RFA=OK@MGG5<p~;MrRj@ielK)|EXWqP0Dd8gA=Eq zsXQ>^2*=Qf`SZK;ifqPZ&}3RgUBB+%?ffo6 z*DhVQ?CpXNz&-@m@jJ{<$zWlSk=}>V+WVmSKd_l%1#@KZ=QmVLl7w~kB8vD71cM=C zm{S#^KVp;VOI80_+Yd5Fw+&kxT65X>HMEEbln-R(et^3)8;{r!|9(g_>Q{+Kd-IX+ z?m-Wu*>X7!_c3vDMm6k1IwA3)AT+^KIYamD+sDMV8&BduWRcZxIymF}ca;yK3=(s7 z6Gl+lO%Y^Na&k{}w5_=e{gvQXDc2e)DV7-A0!)6Yk8@JqB;o(U z!ZwlUDtYDWXn{mnuWN&QrZ>rh6MgBYI-M_6d*)ZT^S3 zd6bov(jGqH2%J!h_Nu3DS#VAU+3=2(F1k$bMy2;KB7FXkeDMlzg3UO1gM3*Yu-;Whq1ykt>hj=K0G<_- zkj1*v>}g%10BA?)&2+$dfAl5|61qikKQ(LGojc*hi$IOWPn|lyWgbsaZ!T0>C)>7@ z4^z`@Qq#3bJ5SO*p@X3h3y^ojqi;^i;RKX1!)Jr>&29i+u>I=`qo_CVeA0Kq2hrgD z7|OUWSKI_IKO>hxsl5NgFUtBg`Q^+MlL03DPEVNtX1%x7=b8K-12Zc;Dr$@ynEp_* zr_i9f*U4-SdRx!FJXN>(^RK&Yra5ysvHA|m2U}Yu-R86-SB%Z`(%S!~f3T5llupElTK zROdlUmBdldO&rispjJapQ}9JrXxIMuLCfA6$*htA+^9+4zHgRkwzOArlUW38 z?v+}cB@B4Y49etta)G=~dkk=#!Zr7z2?@+d9jx8$$^;5Ie*AWm-ZEU`hine4ra(wa zJ0;!#{CeP50y8K<+T`q}lu(>*-Cwp`%>k-kQhgXWXd-rJ2X85B)Q?QVPDpl6Sw#Pxf>uiulH zRC8zM!LC=gm4B;fSa;Pwt*-gdE`FxVE>%AMHS^wudn-;YyN65i z5dT*75DJDDuhE&=V6$r?-kt4x_c|_;F`;OX^4~By#{iL1_zF>|;S5)P&R;jm$tjDD z0tQ)=R=$uZk{UOKpKill zr4mLi%@XJw)gV}x4*U*}swqV0b16mczsu(0cx~AY;8!w-92~geMUwPrn!&HWRk>V` zW{iY6x0LC486x6>Bl|fE;D_?heE&;CMpUZmdIV?zqP}Oo`$B}J z&-M~OAxVhe?auVqx}Qv)N`Frva>SET${-;Pqc;ntMaIPC`Sk9x&}PdB1TjHuf6q=5 z+Xkb>5|wFd4F4iSZ4II?v-48VkW)^2Thia-i&{BcvR*L23hn) z-~k{i)V7#ujmz#=iaT}C%5h#9SrrAGzp4Hmf`(yhhUs z2lNc?bi?t4=-Y9z_o$~~!);h_zQvfMqjG7-GZ7Ndp}F^z(I7H*yf$2X7PN_~p13!| zg#!*xt-p4CZ)b|Fi<;CpVh@V@_UU;?$>h}&=MR9eWFUmLHGG4IoZF|3 zRJcg7=or2nse>tfNw@qLe=#s;xu%dLP{|Y5fr5mDg_Sn@R$VRF7GGw~F1iPHtYh8< zc5-4E2Nb7qa}W`~9M+wvIn{%O2DM0G?46i$)w&Fc$(d47leLXIi(6fiOzuu*70Ox; zTF>JZIaZI|041&n+?*o9ik%cL15m>+R^1`LD4q^~(?58OB`Zd!4jCPFd97d|DUuk8 zB1vHm-4A$0@$@aPU= z2H7u1GYL1=N9?eat%_^;F0eTfAOEv0ayX+s?c(yJ;2f8cBb(#@TsIF;U)zG=Fru54 zB<62>x%V^BFJ+!NQgOQuDa4D{2v?Y!y}O3i-NB&QDzGClCR@G*pUuq9-e~CPR)3ijg&3f_qfJ5ag4iUbYG0f!BDS~Yk6d^u;5{%+!BR}!e=;`cydHbDHn zqS;P6tzeHVok5hHC$v(N}e|pWp^-syDrT2;?qv z&R5FdUusRGJn>$0Zyf0Wz@9Md(cQaZWz9jq5c>icw~v%=Y?11jq*F=M%E{APy9^DZ z$T5A03SWt8BlPMK5exkOF-!y%-lT9#R_M<|WDVIM>CQD&{MmKnKHM0txaWH>d`>bh zP0%#4ABNDyPq=@EH%ZFifut{>v5aKc9;ec>WhSJxt{23%CV;1A(|J6z*u=|}vAL&f z*U;D_U%36qo)?iWZP>MXhEUO^=L8)@3*}0YJ;a-XLqr-w|2OZ(vFi`KzAV$tS&{Vn zwtrotDq<^l;Pi+NL0}`YxFO|y8EKPi?vwosny++(zBz`0kn1JD3|3D!jz4t=FjD(U z)uP|zFmON>vWGu?8!=e9tnU749`d<5j}`%!30Vlb1BENdLEN7h5M(2Fpz)%Bz3%tl{|u*;mUj&^sEVIdpiGCT z1%7h8kl@nh^QDTwzMfh<-VMO1BoV!B$=Jehisyo(-vBBg&&saAN=mNNwCfU~uN$Q^ zNpjPQe)7;{h@6@ib5BFi1Q$J!HNpz!`_!~O_?8e5y1N6-ISq&k&Tr1dAK8GCS)QbF64saH-hm`Ubn!?ia*xUa)oa7~xIfA9E)P22(M zTLdZHvf-D;#`)k?Lc2xHFrw-qq#IX3f}j5C_9uAz?Wn~Kv+nQ%N~_R;iuY2*fh>s# zo>wv1nre#3kYHQT-3M+dF0EOv(KX(n?>~BJDf}!<~b} z5B}^xUOhUB{NJv5?vMJ${X2T8?N~kAEr1-BFbHgW%~I1}Dh@NIPj^v*_gCOV3)Q_y z1ECbnF~psxENw}|ORhqKWyYKg3lCQV&`UQN!iD02FHf#_WT=6Ftx;q%ImK9TuNotuDbY$g{C2^mefK}yP z#xDBnuMr$%`@O0AB$MRE88)-VfShQp;d8SFA6{u{{CP+%@ z_KS{DnH>UPM#$g42v|_!?E3GZjH4sAxYQPF07uc3fa}@W`Vza7#0A7r6Mydy7A|OP z?+K{I1mtcaQ>>_IGzipnoy{M>F3w+B0rw46bmUDXRssRJyXVT>#9s;|@kmoibsMR_ zbmhuT;C#B}{H7Njd0+Us4qd*yFIJD@aN+Z;S%gJqavJHrO~zn3a)w)~2z~i$kQwfxvyZ zCyrXQ73w!&zS0OV88!}b8$q05+UewESO5FqNs_FEn$fq9%Q)vrHAvY2_+&invr3NF zI5bJtC#u|nl+eg&;ApsU1qpf@rX$sG35k{8_(M2>0j&^B26?$1<(PB4)H5iBTtOJZ zsA+Q$1T!5%)lyi0j+fA3hxq-6yN)Byl}yNAm+66lt=F{i{q}An?WyUqZi-jf_7lq1 z08GkV@p43Rq(LuA*dQWxR@P7$GIcP*_PR>^9CC|1-J(?XaSn{1G9_nyE$4Dfw$_sy zoVufySIVKo<>C1B7XtubFQRj?0dECGnXuyo3I}NKD22BBm4gq8eUu~|-TsUV)xz34 zb3WZN!;;-;jK3D}MPcC+$JM<(dN8ERnmuj*j~3ux@CZr76zBX(w0?(HWAdnch&m}k zlmspwiaoeT$W}?YxZEBzOBVa$MlNl`Ge00m?NFUsFQnD& z^}}wnqJ|oyuNh^9?)rn?Rw{cVqC@VvU!(kYPi>?M%nuM55=XA>ZA=0ZWlX7j{XwRL zalU(Z%VA!+Nyvj$j0&O(y`EWw>?_feTn19dHq&R>BkD;JJbeFF7AUHi)#K%I0G><0 zIG5m?Z{UzuPun*T#jLfk_!2ZS>G8PXzp|XYBm)h~h~`bD3XQkzf^>q%%m8zQ{eI{J zhQiJ;no&(bTgoQfw^Yy1Z{PjT9N;2=Yk!zc$&tTq6E8%9C^UP-@_VE1pu z3ETabXw?u2<^2{9u^EjEH5Z)a>{nKhB?5icPc(4MRM8ET5NCQKB~=RWqb_qOB{36o zJ01%0N1MU|H6A>bb{ot+HP#o*6TI?9_&(S_w66$t;_et>5&;T45knCCAn>3cv5cS% z0$e=w9D9Ojg{s&p@kfqq?6W>NUZcjCJPZDPrxB75Bo&MnZxgXbFbU|nhmSdEZ%RT( zt*Af`FQXn^uTiMFPuNc72uo#h=m}FyD32->kn3bR$4RA2C&Lka!vyo$rjzbU7AwH7 z#kW{(aB*$@z&qWrNNy!4Muju_@<@Ieagd2VW5$ksbCvBQ#$&p*k{=iGut?dCV+0_S3fs4#B{a2-`gCMH?y0zmF8lrJfZlD$90*q$ z+|rA@RRLg~9u6KfNNmoix5^qQ(a1*$N%GErZXGZwy?={xspw%yGz2%zpaHllUG1HL zq50du;gn{GhX*&D$?Z^hhe0IIE- zJ-^h@11JhJxsIt0+2ma3({@nq;U0`QQ31XtA+9u(iX}V<7^I1c<)1#arh1vO=r1oX zPL7z|N+8p7J`@=;!I`kc-ac*o_vK($-z;Oxb8@n?ONd02c`Biu;eiI~k8!G7=n)F1LLXkI zWRfXH2L|Cop|OgZwR6XgOt6yCqrTGZ;(Js()1hNW0m)PqKX9se+R!elqMJCkjZA4l zpcF%Qru;~g|7Q}u7nB#Tb!S1MzK#_tgxR$umzZsjIahl~tBmJpI7Zr; z`dXi&Con!`d|uD|r$t42kdo;eny>m_AmRT-7k;9hV90gsJOXhBNxwP&3fRJ;qBElt zZhoP1HVjTr z@S2m{=%>Tax;l%KL(p@+>w~6EG6SU-e*1`m2{7q?${033kGtHcQJ^PfMTuEMw4(J< z1`k9x6uz+0x`IeWjU}zw?Vb=uJQnQ{=#8My0`Hz|BwtA^T@TK&cnNL@O!_8PBan}= zcKz>xd0D1^MF}K^9(Nrmws$U)P0HmdOU&4e7U> zd@o(NAb#JI-jo3;GD4LU(UW0ZBzpNPNlt9bzr^*riAqo)4Bq7TxOS&g>{?V2?;$sM zzv8a`^?~4@r#p*Q&~#nx63YmL#^R)*$c!I>!$AmEYdbgvhAxi&C{yP_Xq`r$mAZSL zP!JtDWITPk_e-1j8QOtUxm+bWz1n5`_y8egn#+Lj*2~m$CDMyZDY+(TRJRtzgV8h! zEot?HIB71N4_Dq-R&K+enOI9OCZiEXb|Y^`fqP@&LBF4GyR%%EI?aWMlH#7TPrDf* z3~uNQdMCCG43EsF{r3P(cIw8hCqc0%y;;c`%*HK0zx1^O16wApBp3{frh)Hd)mAAw#8y^?`>e*$;TGK6ccd%<_GOt z*~{7d1we5OTRJi}9}t*Ap!Yn@q{&n+s)Gsh&35(1zAJXS&b3nX0E`!G4{g|K;3{pj zB&IE-pIX9*l0+k4M!C{F6rF}6U9#_=Q8zwb%>?WHthVwjB98yny~Z_b@syq z%U-;BsoxF7l*cv&y%rUrAW!l8wYEr+)|{MR=lOkB6UIVi5};2QZ9R4c4FySz#17(A11;@_Qc>-Wm~a36H#hDE(Mr66LXg&p=Xf|0 z)Oh7Stu3s=YkfQinSAtPgIL1-s2Fuj$%#}H$&LlU)MmdGpkZ8iffQf`mlz$8#aC)LjEx8}#Y?Z);fp;H2ZjaVT?bVzsYk!Ngraz^Dmf<*Q z)2B^~-81fUHF+_gMsT5{1*>Px0n zJ29OMhsIBX_sW^yKoTPtOPZ@5noRM$@ERJJQf9vk;fgBtUE=sF_`{K8j6iTOKxQvh zeE4wKnejcFiGXTy0HQQJB4WWYP90jd5sR50p@z}@N3zT?sMz+WTZWP-h$_1;p91cq zG!ft$amRe;kvP=8?L=UcbIkb=YS-J1*DyU}+0n=bi0Di?bK=H6@K&aqjV>q1w@3Qn zdw3%$J0IL*X{G3%IKKou-wvr~`eShiff!l3I&0~vEkaOz{I*?FSB=jUz@6DyCj$1O zc0m8(%7DfB(_#Mk=fd_rK$Q<%S^&kmHSzcN*Y=v`;^NA~yR1hEtNE}-44_Z{>vyOe z@J}3B%D>nw!fYP@xvTdyMU;f8y?{xrxJS5_RXZR$5-g?|t*at9-fSIC1=Wv0{Dvz( z7G)oUHkAPu51O$++weQ>ZgYxv$+qr*#1b%@0(0zkfwWeU0 zpK<_87SCBVn169RBRjquQIv9{_c!2yvTyV+5egUrz_@)Q8B6WAc>C>p%k=KC&95Sp zl_-aznfz|btkervadQdse#4@7Z#5R!@>-kZzG_o}%;)7ExO-X#@-2)bp(ZjqS_uS~ z-1{A&a;0wG+`RG~@1LMC2fYINF*&sN+{fA_Q50J%x!gb*Jxr(0oEc7~H07KK?_`sA zCb^*P@y~X7vbCrNzo;>^Y47wt`C4lc;i~Q1yJwGdxXSx%ER6QuVGko1v;B1-W@h(A z*06i>S$h^R&Cm^uFowx%izC_am{e7IN*JgF5pI;kWiDj>;x?^YZvs}_zg2Ks?-x#{ zkBW-Iz*^K_!FyRzou@;~Sn~Sg0Nv~GGBhVh=3Q&-3+jX}nx|!CDAHPY%Y-x!M>~3; zJGUBjX!S2H8wuaGr~?KOK;AB#W{_W5vIb#%1`vTT=1#YH_~`v@6b6W1<@oW5-bh+U zaB2kp+up9TK}9R1^d{zQe>!NyNT@ptOFOhY@m8g&b+aL)K zU@p)6o2<(odSU777MQrQ)og*cC2~yB!tC?tfcr9p&cPwK+n;>>`D~*g-s7CsTUCGiis_J)~5-n-Md}ume%~sp;tvt2RH$O2RkiPTg zmp|5297Y+JP?UvXdsEzjpj;+hKHI|or;HzdCze1-L23E~*uSmF+0O2yvdKYyWvFAT z@HIs^w30I>9okN7W*tQBUw|z@_W9Pm9`b(^WE%%XHWs*Ne%1*YB z(dmECP~eM^6ykbsPW4Sl+J1$yJfzJH>ww&1K{eL}rtuu#YD`F>Cq7`G5X)-l!g?N< z#g;FW?UF8gXB5Q)!n;;nLLYSW*Ilj3q+{0HKb|FDn{ zD}?{2n;5ugy!zd=Q(K_UZ{DcUXLoRX+dD_-yGHhCDE!q(NiIU}m+QF25VR(E$b1CA z|Bz!6HQlAl*ybNADpZhGrH-YVvd0M;r&1nt0nc9Qf zI)ee%tGh@Dpz$KC0;{Wvn-E%a!z>8osJL)^i3?~%D}KR&ZD>tXLgN0{jf*v7L2oQj zxql$DbA5evU_ZRmX)s6USMlT|TySm|DNz(AEPo;ypxsu(@B)-pJI(j>H1#m}XF5A# znxQ*f6yuRP$Yf!p?xx=!A}0e^J>|jom#EocnS}pH86&yzccQ6G=<8JTxck(ZigkrC z%xM%@s=>%IBzk}FMbDd>m~btQacrj3Soe5{AH@y@$K%Q2FWULOM+(J<%pf1Gm_EQR zA+&}tA{lPH+z)n}BgCdo0_G{Z6<6GVjAjK;x*gZT7Q-gRm|u74C|8#s^jB0+-6XPc`0C#3CypQAz>-|=iKdIzFt>OZ z@;^|wnb+~Jh?w+y5=}klqw(#byl*uf(|}ht(jj8LkV)UOmMX=yyNVI7J?5-ezUU3) zj>*!d-jI;i%7uJj=>r#i=a@2E@ov&jp!+(RKAg1#{I*wRI`@?*IOhc`%?AJ89(>qV zsVJdLG7KPfOnPW6?P4~bPDBYC%Ij5o2&aTJ*9~dkuAPjv4!+I>W{PYAGHu2GFMI$V zYch5v4mR2}!&~))BM(eXrYNI!U=5Y8P~Y%NAPT2<9xF5+P7z`Q1nB&Xw&nz!WuGz&#b zmCbrWUm!O1O%^X)7(_*8eG5zlyO-_rNbUgCt7C31{M_8>kNZ2 z=egB`?vqIwhe~_qb^eRe0hX(TdQ?fB&$wGh^08)pkJnN#WibKY^pE2l&U^g$Xj6LB z9z;eih`9 zZ1CavLIuJ$_&tPW%vw87hz7<&Scs=S5sH{tY#QJ;Uk)q5v zlqto`Rli?h!kf$~udHmt32EuYrj|X?q#V18M8mCE$6z2jvSTBbuUaL$wJ8~CHDb|F zJ51Fo%H)M+&!0VG+~7_3;asF>x5Ti}J5ne(4t$rdDHtl`TutNHnvk%1Vqqtr=DZ2% z@#@g$2&WOc(mSDeoYoMjCqNT>yN2V>9DbX}UIL25*=kfvg5s+Kqz9sUZ$=Db*%kHj zkvBGwcMuNP;rR8@FCW7X;GuF(zR;lch zCC2*T+Xk*`nfIymmsEMwe3_T}ba=fbsB6CIoDYi;SOJCI3dn|HkmyJl;R;v(AFtb! zpz-Ab-oxUO66=E(i1C80AD>~Yd@jV|0+|3XBun?hIEO=Xzw0!g3suCMlYl~TomSlw z^X;$Y*)`gS9PEmo+`83*-bhJ>E8sfDf}~2uXp97KelmC&@GUVOah;0UR_N1?oaD;h z7!lFx*E!k-xf1vr5B!zCcdFJ5GQ@woolRiD!YL~z=IGsMs(-wQ6ccxM_uAM5? zJ|Gvy(CoYR7!0-8Bp$!KO!x^%C7FrB_;Pp9nGvv+ zHTP&G@H}Tt;wD?-jI*5ndOj_!VS9|tH`3r7JO z-tsll7bW-u_id!IjCyw4Zi}`kx|g;wTRE-vINOa1bLn$V9YG7x2A2*;zg-_EHgl>) zH8(i8x-p?DAW3cb07@hIDB=qG?gQ`Y0!G}8QnO#`Q69P|T=ynRtSK!awz~KRERI2K zRb&D;PkyY5`r3^kvC`~qGWg?1gh$GVe}IdL~)nt>_*VJJKW4C0pD&$jCI`Cgfb` zPzH%CaoPut)zgjdt~EAn;Wf*UCMAg$vPJ#X@676t+lJ2NFb`!D>~NmWYoO8`nI|#J z93eaZ<=Grf?8EoOdO~Ji(+wS1d_<>Bv+0$DEnxOZV9s>DC|8xWx(=bjqnQcf8&f~2ePv)~VSM|6$oL=PQ5-jceZ z#P`v|hZyE}JgiADm||)v&^nQ~vDGy-Bj?UNVmqmRr1hcyFPS)_r)^8<+~s94q^ysfGY21asak_+tC$>+#hacxRx`y*I$bd`R@X{O()azojg+^cLsD9_A%sbjy z{pE`dO{cEfSloGg{*e@CNJbQZ@~27qgeORN=b9RZmFs?JG3zSi$;;lr z#XU-`C-#|^-_|AMUpRl8!rYD$HuemqG{(X>%o2C>KGKZ#y=1g?j#?m>4nl^E6|}*DDe4)U`ura)K#EzY4ww~-+)U;BmoGt_d^JO; zp`iWsjYkRxNLm?&R%(6TqCP1_Nu z%^aWR%KCp)`ZinA&h^95gMJz8sT77ND_bUXPtJRYPJOhe zFIpaHg61719$rZfvtML68H{C)@Mhdx%5A#bse!eU`g((obsh)0spD1ZFi--E5Xld( zg0;Wj?>aeD%vn*oD#>`_I83YL0xGdYB0udST`zq$v#Ntc@V&$AK>KZ zD0uGmr)Np^(^D;83n3?Ape-L!Mv-G<*6yqXNliZ!vUw;?PP_TsaapYM-=d^sPL&ak zj#~n}`OJV^9=+h&U&NubG5DJi5r)Rbl6P}LiO47k7HE|k6JlUnN2Qs~AFI|%R3<1@ zl|``0jDY*RX~|||8>upV@ysYAerA>5T?LumPcvNvp949?5-pg+-) zFhhrKRGj%}Ed*@vWs==QnG>RPMCv;Dq1??1l?LX&oWu0I8lhK5*Uu&)%V$T>8t%>B z{Weg2BJ5kUn#>4Ox)MbhTLTccJHQ)%4B2KL>W3ljq4p2Gc88+*%z+x zFrlLz3kP}_<5gP7U__L>49}l`p;!hIG(P?cqMHv9`l&U){PJ!0>0zdRL)U(-^?zkZ z)J)t-iIZeJS{PD^pi#iwF?;rCs^Df2d79c7J_3D#z3DfB!g#RimzY3Dwt?!I=EM_r z8M-!a`(0+2QZ98jU4@oZW$NbvF!mDk8n_h4K4REgO$?+X1Fw3{NNE!grJHf{VH3nI z&{D(+++v!`!_G}Bt;D%%WdDuNiPpahf|cUwd@mldJ$nP?O56lSLO@;@*u&9E&|B}r z&r>{_^Y7F)+I>q_XqON<$+J1N&yD0?F0MTN^zyMB?p!Q4*Kt*0#Ms{CHO;BSn=8AE zGMSY%ZRa}5Hu1wotMmCp5f>eYMo+w2CyqwW-DkNX?8AE`w2?&3`M!GpzNPSSbAdhT zDcYPOWZ*32?+%k)8Yi>S8;EvH(5rA%X-jV0Ksy0|BG^mOB`yq+E=2cSxU=d_RCu4Qiq^wFaJ*CRR3M=$r)_)AE9i2RIS+s)xmv7Io9J z3N1Ognv)y-pmIG#lPDiH_UYL(C^6AE;yKyPjIka2`I1hP9{Zd~>*bO&{CO0>SWv6O zr>PY;$t~v+6ci*AYJR-K{ik_F1)0B$!mupk*h4~XaaWwLlDRhaH3=`*DLj(?rH09n z_`gpd-o48JH6uE`&-S843l}EjI4ZdKBne?i$=R{7W_Xhivl$``3$3WY3Z#p5)CwRm z(Up^X-6xH1_NI2`iwaFy3+aW0G41U!WAlBS#<2TRn;$rD3UXCHw$yx!|H0w@Ic(Zk zo8CQpW}qZ1{Og;19IgbTHy@}KA%()SmrJAEqer}!2WY9&ANOcYh3>-+yK$M1PawXr z^k_yk@r_kZNRT)5s`<%Tq{8y8Bn{lv3%o>tKKF#RaVBLn8_mT*m$O zz`g<7Iat%~Jf#Mg!bksO=vh+9`6#oVE=j^3zv0srg@Dqdlx1LUZ`LrLeIw-GAGsn% z2jc4I-ov|yvy5K_yn1~;zCBmReQ+;-sbo2f3ut=!cn1&~lwQs*B&(?fle` zt~LgG(>qEoFR*#^=UIp~0Fuv4MN?t8AGlCv0P%N;>8c@t2@Rb)cV5>fKEGW$fl0=G zxy-jEPAT_{f|F?O%$+-JoZhD4DoJzbH`f0k z_;}!fiAoB9(qMSq^|2y{@+xk9wY5H;T;_h}Qmw`xLo!-puEU7ZQHF}%MUE*c(aIy z$HtD%kbeA}&x8m#XAyO4Yv)Ax1J(<9S-J`8IPy2ZLOTe$S^JYUAAjY_ey61PczwoQ zE(q9(D)-Q#5#wc&oB(1PGx=t?J7kdC>@SE?gg0*qFQNm>p;txOJrc?Ip{JanbUp_! z9%YqMFFS-V6!zuV{ZFvKq;U6z#WPzaL}1KGm+%CM4E)2k_FcO4b&fcX$3*nL>bPu| ztXxWp1fyHG3hg!a4M$Dw&u_}8^r zFP!6|SG`_H2nxvuM~2PA7lQ65@#;jcnHw9bp|SQENAKFDjQW7tY@Y|?+LX^Cvi)Y_ z-{7Wr@>Du(*i5xxRF~WEsEuExL(76?yTiEn8Jam1IvR4pHQCJ74_>aa6fqDRN<2kX zgBaK)^WuXXsL(`*@!=M>1vhW+apxEe*Ae$ZTv-b>wP4Vf&a1)nlQ9l;_)2NVQJWvo zkL(T({qh%x(FKCq=O%fVoQ@;tIxWrO9ozX^kg1aT7}8A!R19>U&yEXU+Dt*5a*P-| zc0?=Uid}jx71a~{gydv(m{YW@_UHkg3~*l1^Mx;>987zMM+`?-1!`8Yu36ijkBqqe zIfgx~dKmwyss}vnO~}?(wfopv0B}U?KmKVgDb!Mgtp;o*XCZ>S#D-Z*?+xH*X&r`% zo8#Qy&u>+J#AN~onQB4V7yiud>oD524!6m~6FGs}QZ|h!eYx-zXE_(^1%!DS_6v&Q z7~#xrYvQ|lwPf!-G2pLcrtr4jL3t0h_=>^-{CcN?JMKTYrk%$M{ef;#5MB4%3ta^B zov3|^vp~jISt5OtVVt>#p+~znsp?d?W|uEZ22-Gcx*QS*a1@T4i5!z6n*%L2#n6iv5ei= z;hU&`9$7FwC!ThQ+u=;6uJw zW)I>M;jxoz|K*UR=H6YQ>I;FAT92t(H1fPkl|{Dc|0>A*B|++DgO1xEftcTmffW>D zbN{VEyToxE*yI!cAkma7SINi*I*?#`L9^Ry@e|XaMD}=&oD3xIt;JEN*dRWP#g3^{ zrflMu?#M&OB*8IFgQfc{D41xV-j%qX$If2&DS^iv{EkmmfV!3E!MBK1xzjrA$G6f` zb$CF?cdF?N1nRO#D8(~pvdAYh{OJXLw0BPdScG)e0n20@9`oQ(AB?JJ{cf#c+GZET zP3Q5(ZrLmqBA}(R4mA{i#j7iOn68Y`?VhhCOZdeYS_F|gf=_yS)5D(6ZS2vv?>uMt z0d2{eYI+TvLDZ*sOPZJG4sWz&e(FoBfj;T|L%!}=;+Ifib66Y2Ppjt5o6pwHH<@W_ z&qC7b?+c6z7P`8=HpR9il(bgJ#tRDc7h+vC#$#E6LoK{F|1JU{+eieU;pAcVt&;RW zgh}>Zl$JsknqCERq~tfTy-Ewn-q_6g?=TqaIilTA*V|~tkr^wsg0p1y$$Z}};!mK$ z_UtBpJ{D_A>MsslsURNHAvWQP_naE+p{@}c;()w!MDycpTN#NRw1BiO9D$cjOHD&E zy_qTa(ywkzg1RIk&bLN**^&I182c9gGs(%n{p7^qTMrdLXS1ju!>k70S%FJ{fpz&P z#xoD@+4I|{c8Yk>-Ck`caA%T}752LB>)Q=%6$`5c%&yuqVc%|UY`OaDXmICYu{4VS z+zFP7_PKu1EfxE6dNkl;y%Ed;4(A7#MIdu`b9J?vvJ6q9Q<;i$9XyR0Fq3^0&oZx| zpv;1LKQk*U_(VX=F~b)VQ9DfDs_0Z-82LOsy$P3%XElo~nVf}C=i}h)YOSdPMF0yW zoocy)l3R15vU|@M)0I*~QHE}~L{V#nRIvO#wHwyhSF?#Dgi#1io55a*h=b`@UEX)r zMcX7>?m6m+8(Vcj;K`F2a+>l+mf`+e)F;!k zObiNephVyb{tB3%9s=Q0Y_reR)tOu)F54}k69o0-(w-NfXE|BL^x*E@*KeAR*rb+D<1;WPg(_jk zVWqNV#Fsz}=KCRbNMmHg${A;tX~s!zKldF}L)56FiDbKkKY9A}bg2u_iTn+4N;mW> zpPU{U)LYD7Xg$2j1ks?ATG|;zi8IW8??q@j8B-#0W|{5$MkwhT98>@6eadq3 zUy`w|F8&h>sftSZJ+M3Ky9ejCWBHNKFP8)gUw_(q=Ix%&r8dq^Z* z9?)LL&sQinHQKiyIbKLG5@4!NaW=|D_zG689Xxc-iYd;{fplFH-_n6fcw=scIATep zfkDgL1~MX(5tZg(kRFJ~-})VgU0z|KiSuD?0Fg?3^S!purFyX=W2fdHh*emXs2j(a z`?)tYQWBy!EDYZEzM{ek;iv9wws%ZytaB7%0yQ*XFFtaUN>*-71*I3-iuh|tzNG1R z;+Nv5cm^=T4MD0LZ|7&}Z*P153a?yJCY6@+UmoB8K%hqo3bX6<(%|cDD1{b#@r!b{ zb?uk_2*7`tHx3xd#i(|fJh>UjI3^;~$cvmf^cFf5*cn|Cygq;}fV>y~a3&iUecPJE z?DLa-+HkpwAky#8Mh5ZZNoAg-u8ibuOtn)^X`~-aF2I3gE5NB(kN{= zl(BUZj}`xrsscTU%(S|SUvcY|%VHivJSzaEG6Xam&(L><)XI8=$plf=yq}SnZnZF$;Y2=wIfDJR`;-9Y!j*2H9?Z$bu4Fml3PJwYB@O7|$rGzgg3fA*Mp zY_biQ2DHdM6R~tF*T`>CwlA(%x(TlE)nhanb9X^{oN{oYVs44Sa zycpZLR5D65H5IuS`0hTH%~lsM_(B2$(niwDbB-)bBQ85-RpsndC%`yz_%~vqvrvRC zlxc4>e&X5DLgDwS0qckxVMdo%v129IsG zoK~%uXr?Xte9cKXzLi)fXjIkcdk2PYLEG@<2LT@v+<;?gn)~!Tq%SIo7|4_P3aS5b z?wXpLmtG9L*Cyj0wT&oEqPs%J6axl(YVF;#N2nq5_g{!uP@J&cN4-z4jn6zNy~z`L zqbqaep;wIFjZxasYu!US3YF==92=#hUeK>kpKKV^QC{332FPP+5>B|)@|0vuLbdM3 z6IPLi9Qm8+5l#adE@X=6{51Ng74-jL+UP?q2j%Gd4TAuAkvKi8p#m1eZwDL$Q%Lo?-yOnIY!J~}ND4FQoE9yjJ>4)6hFlHL zFXJMLFNUh6^M?K}$o2o^$Nc`oUtJZBfUxxMAbB1%o)?X1=|m7o6}14__Hl7Hy_+Lx zs~}%?$Uui8$47?Oy0yKh=V7IYp#W5tok5M|=xA?A`W&9Db(`qHNXzQXem;~hbCyy5 ztO#*m7_G1l54V(=-jd~3#!!_$;5g@-z_f5J z1pv*xJdHzwdn+-29PnokVetfXKTVxRPwhNF27t({k_EQ}p@sdA z_W@E3;-{z|V=~@x5BW9xm>;BHqPMZ>C&pFMuU|LVCiCf_+}^g^eqUQzMjdliIq6E6 z2g0XkGva+U_2#0SC`4_D5%O=8>cnrcd1l#R3kkjT~PCcw{CSoL_Tu17)W%nVc zOuS77hp49zgh?tZ%-051#ug>IBXx{Uc;y|&N+gq9aMwAdQ}Es0>-#UU`Fy$9DTG;U z;0WyZji!NXZ}f&CNP=~Abh4NP8gTW&~RjB6*ZtGXy=fLNN4c6=Dcd z$QGoDUn)9UOZzr(cW0S4)Z!?ApUv>I41X6ir%5*Lfp`c)Lx;S0@j|mzu?)B7lGOI0 zYI8#_GQli1^C+oPoVoHI463x_*0QW#COwNR*^l9=1FWVYbkK}2B`zxiT2R}Z0`ItQh$V` zH2RvXoy64_^CbdGMBvAl@1Z+2C%q|a<98>iM z(E!^=`7j6^Sq5%9&w?%?;}sE@@_SBIQPE9|N0`0uvIR>JJS&0LC$JDVirDY;MB6U~ z;jqx4A)gx%+NhAAcvTSBjiEME2O`#0?aVgsNoA;`w5y@(_ZRj5me>1s^&jK4C?Y57 zk4vL^&Imbt`2|ED|qRBoG> zPk&upv+kvTT6y(`uJw63xm|MW%AV!^4)q#4rj>Agi2yHQd^$(Jk^I_UjvxfGLs#`X z_dvDoc5W`?)dx$+!Ra&5`y9_r&wHtBO>9oJsx0va_gRIa+erJ5PM(mJ;&2!1FqPy+ zn)u5XLo(C;j~0Nk(yxXZJDt0AbR-gP4VXZudb@6OIdiNN3ul_tC(`|4a9-ohyVK~> z<=(3o+|A&j;YqBoD21b3Ae3nv8hSy40zMw;$GN_VSdMbf){NH{eHCB^(*PnhDig`j zVvl(WiW)GdnHQ=kBuL( zS!JdOhsG?WcMRsX0G83++)PJXHH1PEYuxC*Y!|SyHc?l@VL3EM+N|yVb3jA6e?xPY zb0%Q07vhuA`(m(SYy-@e*oL$xaLOza!)cf$w=?wg$7VhFWu>Lbl6_5{RIIf1l$RDyhSNIW&8l^R3zx|79g)2Qi0PmQcv9ic&n=&u&7O3d^U=Co9jy-)k*Vhg`mIs!9PQYz(D5gP*+#@oK>lXie87cTQPgE$4x+rO{V z>k4$e$PuXzHevWEuct~4{SELYYZX%2|T)T%qk#O?mGS)|8DX!H}KYYe?6jX>gLfVTH znK*m}o~8vdP2oMamTZenCs83908#0kA&N!N)+mD0Hc}8fTE>dtGUy&joz$X{&kHF2 z5p=2Cv#Z{*WDzU~jNV3=J3cKe4B?+gznMnDqc0W( z328fg{3|=*vc>;|bb0RBPO>oWSLZo%`dw&`r{%#s{0DIax*n9QM9Qq9%Y`$$#Z1b7 z^7B`59ey%p8%kRCN6Mj@%hQSJ3qKS=jK0CNf&uV%hK7cDNvw{J{9iX6YJ?GTyL+L0 z0)8>90)-5OQmB;KC8kX0d?Bnp@sWWa=8yrux4)9gN0?L!>e_00V=lvV&td59csKD~ z#<7|a{JyC{KV%>djQQ`lm07tred%5|0#WUF{;j6wIs*yfpy%@cY9nLi0VBFnO-ptxHG46jeNd;c*$fhc6K3Ahmr5z9;5>4=u?b*q^1BKl!ed<5o^U)t>Nk9b z%PAZ;o?(!%Dg2B9`Z3q_qR608%9HVyoVUNW@PH{vZ|;Nh&3O25r{QgKstEkJ?thSg z9-L%Bkw8h1m>n;ZxBTgKlw>QOpn!oApE>R%s1!yDS31k^kEo5K!d~%;jNY%I5T@?D z>no|{fVas#qwq$;jSrKOC+EXq#Y@8}pa@_@s+u;th9= z&QGS^f7(Z%AtLo$<~0ZNY>XdrNaTR~6b2jij0oDU^=B0kHbIi;>v&G{6W%)j=cqCB zC5!Nh|NlC4e^R|}OI`5q#fu$iYjY50Jn6j-9S{*CyYBo$i$z&PsP zA*v1xQRXf#KaQ@%@Fxk(GM>usC{NbG_yKtm-QbeGPb4;nN0V|byRcA4?g);7#)Fuc z$^cKILi@9KE_Vk83g5wkL^wYbrNx@oxJIlYJ4T*X{rIn*cESDod9=e@!^J0oi?iX{ zCtow;BVctBoG&?k4y295QvqIQS+3E8y0PYMKZzeMPOi6aooCXntWkYh$}Frc=g4y2 zdQPMCJ31pw+&l6if+V`TJbERDEthG6I_23e ziM5jcjkpU}FRvT+;gYAo8uN5$&k8{Q7kO6z1PK&-lfa{|cz$U>;J1F)>Sf4fhij<& z4yTu+{j-7e6u(QuaFQE1{*HJxa;VDGo4K-wiSmyNurxMS2Kw_U%0t^f$*d5^(U0+|2Vr$>*`sdqD;KkrPVd8VIP(@BOZ&<`Uy zcXG!-(y?O~>4b0FHWEi;ztwCt+(XIe5L00Xz9Z^(YyU4z2#xQkyZ8qI5T+f%DO}O`a6P?$#KPOX>C}Z6vUG*#7rzA4$s&5GnO+i_X;a-WS zX?%=&3cYUkZp%5dRB!j$^UQYhDvD})YnL(HCZZfiQQ`F%>6I0WD0Won{&Is_sT3&7 z^jXfcw*v88ossWqaPKObK8h}bB@mYBTqiDXqyms$r|2?r9${H=<*U~YTV18V$?4e& zgKQv0&YuBE%xqv}_}m;C_tCq3_@w}UZ#&vr1fL%X7if!w`?xbdGK#D#BxG&7gNbr9 zY1kgXJq^jbyT1=)nr1ZgVSAZNrkh1qhg0nIbV=jo%>Eq~hO_kZV3KY$d7ozS!m*rhVvCA_ zaLG`Lky3g%rqg9}(C8#LBFn?@ z%I2B7q2FvU8Hu$S0YHcGA1|5jI0YLawi8M7JK<+jm@8XVm&QDDA5{!!~o82HlgK12h6=J_LGU z4=vf_PCd_OH2*P&UQmD?MNjA}5rF=i zz3R0a?C!kO4z`~+&zKAJhZ=QoH}!VaK@jq*%-#*HnwZ!4a!lk2F~CzW5z_w zW&Fjc%Bso(-tX;$@p3gb+*8owlB!$eqZpbz8?5}3z9d7J<;3| zX2_y~Zqb(sI`3*ZV8GOjM>2VftD*eW2Jit92^e;jH7%-q` z!jPK&&@IVeSY6}3HaGdz#KAw{{{Qoj{tD54!?RWaG5`a<(W{2ha>d{*c=YX$7y~Sr z4l=gnUpj1=3mLV)(4kxS0J;^(+QZAfeg974-EPmCMUZ|AN@$+nt;m|0&6gI?!YYR| z1u7FBLOymhJ}+r-@%PjVtF^FVcQ9 zBpLHm`)jbBiX@?yUXpGu#v70(dYmT)^&(s^ZBRgn%(A>_^%eozszzq)i5=x#B! zTvU^rNU`15*eyEe!__Vx#IA?8vcJI>^!aW((`VD^2I!(NXLMv_-i)-bQ9$ zFBF(t_nGaIZ_ojpcq12_J$J5@I!n@)s`%$kv~OphaB@mUWbRTHwM=AM?8iDN{Lj1V zhBOWk0o|RG{n@6#0i*6$JYeX;{mBT@faq6mv-`q0zD$iF(lsC)yCy-K;=bA1PV>9T zts0JA0%2ay8V=n}BM|!x5s)Zy>sv$AEWa-;&J7E?igz*M8s${_+(|erZbS7>oFyfc zB~cS5b60m>!rsA;qVlL&THB=tX8%sDze+2b0K@9^fI28ESM&8*Au}&rh>cL;4cO5u z8Fd4{-=TDy`b0XJMVl6QdtWE2W%YLs++cz~+D200^E@_}8}y&%e)|!Twi@l)ZDhY# zu88m2wnH-g>&@ilpk_iX97>j?hS8^O`6%=*Z#Vz9_~|D6|3w8{bV9qX>h)(G9uwz@ zH{IYpDo zsd(XBvOt!P_0V3ar+Eh*EpmkX*}N{ip9d}5w*dbEEmMWBN$QpX?^yv-YhSKX@3}9ZXvmr`!A4)mA~`YRzpZ-%i5$`B(4mFg({-W{V9r#% zCf9u*6okPWH#%$o0oeAp;dFu_k&;QxWb=U%3Noqb4ukj~S~U{DSmWR3UlLdv+4_j?t57TrvO)f36B!)hFteKXZ(r4h~-dVs@vdmt4JZ znsGCWPr*9Ltc=l4&?T%oldgLy`xph`ki@U+4qudhe>Kjuy?M0H8q9v-^Z%L57^wT+ zp(TxA?%6a!y1O|*#24+_iCxM<3%XAcdn-ZHsmw2>zn!-S69^_Cv(%%;xZL=0l z$jQ){vpo;2L6hN<;=a*y%wbB~CN=<^gJ0mnn!~-{ib~Spef0M2O$aEptm3{|+A?Bw z4}*gC_bTC$WnMwam_7X&+R~zv6K(Dy$~1@mM@pT7PZ?tvwz|m>Z*H7!WhPVXP>e^O zlx*K4M~o~I_)KM3yp4}zBHX}r+~K&w-Io@7GpuOJ8CE>5&j%T&hRMMLf`45NJW>p) zanp{X0*+%*MqLPG&kG}M*x@Rd)^>eXZ6*@6LH^EOksD7E<`j>`qjRZ^O5dQN(g9WB4HaU0MUSF%g?L)DP4?7tos(G~BzTX?GwIpRU_7$m zsO{=Mg+D*-c7h{>gV%|feGV=U)!V7ZZ=UMxT=L9udklDBXMW)asgdD%3j=9{cl3Ee z93!`-&P)y%#$w-Y@`3Gchf;jnKb(M2QJTSFmwM{FB1BO%JSWa89A&vZxJZ9qhP37A zlrf9^+t!o-e66ng-bXhb=tf4NMD%bszg}}(Is2Dphwps;0zW9#4FLU7O<}VEztWIc z#JCC}$7OZ-5*r>G*=(_<0`0Y}0!g=h-5O=!0@q-wOs2Qt#%eN`!kV(bun9p#5iG8o zEgpmWi3jiNC4*4S+&&$aZS304XLSE-D#lX-9{hiFy?0#C`~Uynk|H!n5!zO0kWD32 zva+(1lo3MKG1`lY28xjEVNa_%M4@&drb)8w$#A3$Zu>7nh&@fEqX}b%;WO9WqFWGd@x%u)!5CN35X4|1>s*U53D>CV{wWv&2O)<^P zdCiQoj7$6X?Xyqm3&5x}mye^EsaZ$(McC-+RCPG!>^E$<>eYW72Xpt!Y`&DYKZ(KP z{CrLcoi{2i`O7*bR|J2o(5wK^B)y3QZwJvM-%AFs5RC%FihpamhWNj~VeU*~qg);m zW;iwQFtnowhez)QiWDx^5Hmv~i?;I$*za7~C03g=S8$7UzRBYAsA)&nN5R(=9HZGG zBp7hmIe{lp#(?@iNZu5`UrkU}nP0AVQ{ZJXjI@hncq5f}=Uwgj(=FYXsq_6LQsG)2 zsQq&qD6g3U>BzA)Z=#!<93JWp!w>KDTjd!V?ze~RN(r@}ufjL*b<;BIpRFBbPiGjQ zs-UWl_|`exG|5`=8g{+^be%!IFo$pcI<~{h3=X+l<5d(lB=4G*kLvl&E8zN>NxrKX z!)V9eklfq>138k`tXXrq)C?g3_Q+LFH+`I-)w)3r{Q9p^8VK||OSrzqZz_|1%(Oyp zqGpX?Ig)GyrvXc7UG<5yIw4JY@CSCG9j;v0OJ&Nvd_Qw(Nh*MH1&>1CD?jNJfR**> z!HkH6D!&1mp*KIzazpkj0#e+JW1x@hS;KW-*y0Q`AF)+iQ!^{J4)lhi7(y~OrWpu@ z{0cUI;ZwD|fJ#Y^jm6}{Y86F{-XM-H)|YTvi5mV&Q)r7In91@#0Cd81xlcY?U-5GaO$t?!B%aKALwF& z7*Ul~u)YA3&>XNO696Qzh4>A2REiL$tPSvbtlS|>Q|Nc<*aZ_e!&J7WDL7Tj^ z*~tA(*^_wu$**U7s3xf~%+#*|7^p<@I_)^Siw$jTbb!d?`|R`Fwd2BV*-R8zYsWgy zJs8;mt%hg8i*%N8V|Fh^=^Z9VSw2zv4C zwcrJCyM-=c3o=IJmkn~aPM^x?(ssZ5BXzYt^M zH@u^&RJPVxpqFLbVNuBhDv@EN**!9RZ0*TMgwE1=Kl_Wm;=e&@#>lpK#|B+jQnyN4 z+KUnlV**;y^I5Ka!p%psa%=S@q-orBRXXFZwGEKe6FzoG2sIP22}LUBu3hU$?SDA2 z8ZPncdDba`MRu4M>7Q<%+q8ITum-UljnS>mSHp zm*GWsapf?VIAo=Hglv&}<6wPvIl+y5PGPHY^~#A23I~VUz5W0FPhx91XH?=zM5ufO<)@t?Z99f~)V)gGVJLIXLIkQyf#<_HHB;LB{+Jn2!XCSO#r@YenTA z4z9FpQ=*6nMLxrEWT8w8g3(b6$jG4D$a{5E32aSMOk>%juH({)YH&%3o1aA%orE^& zyHa6C3n&*RQMa^PMsE68N>~?$0+FGbSmAFYxAx~urj}JUw$#B@j{u8$>=JNec6V$+46mX|ed1xj}Vs{wY5~U(~OyjAP+V zRL(yw^KqyTvAiDY*u77mHe3;Ny8dFX38;H}s~}4VCEoOw;)#C3`JvLLQN2vfVJX@4 z^VGdja2Y)410*Z~EMSB2=GNl&8o}u#w9TCsa9Yw84U;32;wdEDy+_wzXJiD3%?Xnp zbF>JaARWKO#!p-?0F)*6Ocu?>{-5dvEQ@->XYDmot7wbQHf5&^*J#$>>e`4aJ0Med zbxpAvmj>+~8qg7up+zCCzSPXjZK+>)7*rysy)@6d2Yu%t6f<^WPx49kGx>|7uk-{> zM@t+fDtEu-Yp`IabTy;|vHhW#8Zw8@yad9S!8|JHhl{w=y$e!A0 zUzWl5Y9BvE)dmjrYYcDEom4m=)zyGBL2>gLz0U1X0=U6tpqobqc8ksu^-79|OxiS%+-!^BD_z+}# zkorNmEOXZ+yWLkosF^&~Z|nm~EdIQyC-dkKuoqN5VPqKS%)KqxN0|P*H9dH>u%pJhf!tuhY z=?#=JYQaNSSJxLQ&xwMOdnn@AOH1&AmU87vO6^Sw0J0%*&$vbFG}bYY$5 z_m(soClu6V^rLS!ZC8JQyE z>QLpJc2#5f1nL1hqR6*9tfw<=b;Zq)7UU{jY2f|Z7GXW(k zTrQDLqVAo6acOg1-I@U>@m)qO=|Ps|G|8}e|K$7@o0f7zA>2i|>f)Zpv*WO_e!`}U zFIp;d*H8$)I9YJx#se+R$yO*iQ$mj6T;*6;b!JnRg~FUkliEL6n2&~M z4MK#9*U{+bYKB=V9dwj7$@M;>f zic`@*mFT$TxEYL?!1hNm3J>J(3HrORVGG@Yu+3aSXl-He_VesS<$G=R#5}cc-JfFR|Mp0l*nY)lzwqhi5MH0(;3xAX1amfSal_S zSydePoB$=gxLxDYXTb&cd7%`bw&6&U8mHq18X(JkVmkm94f;k2ZF<`-Z|deyTdERj znp`lJ{&S8nkdZ_7$`sM0;Yrh3nLfyAqB8vr<;d{m9!zJIW@PRq_;Yu6cgv`#|4AiY z^HJ$Q^Wgo|e%pF$jh%dIRt&|1_DTS@V9L0ar%{ec3TSOx%uCQmdyaWr$-mrol6Gx} zlVBlONd4Nma)M*!3lxJ-_m?O$D`dTX<3C{ zQQ%zm<{V1;9N~msz8Zz!Nm<(9QG0gmh~V7&QUQivg_$hA$5?^aS?DpnFnUQrd=J38 z5DR+R{OSI=o10U>M*?|R+tPnuqDWke>O(fWRQ zxwFSPcyVOAaQ1;Q7HQ@7fnNZRgazxp#OkqAVhvqPGeAGt>htUZBpBGy zbjEQBxVRLFiV0+2Z6Qd|GLps~`*D&;Y0R(kcp`KPT~e}wsBi(z<40VgqNe^_e;sV(=^vl}{b!0`aDi>*^=+XZ0qcD-EVG$vDei4dkQ1-+~)t(i=8w^IU8+ z3e>eHN5vqYKsiwk+o#lWZ3UK33A{$|a_L26SNJMp9W#Nhp5F zifu@M(3+_e>-6jdrMgHy9}GlgPc4+MV4$mOK6Sh3Q1PQ}&ANdS5zU3R`QPZT3_U+S zz39$_qp62LT97V` zY`1GU#mu@u+XNAQ3@qMn;zm|K6Hlu<$l}qN_Pp=rO`9}3bt=B~jKE$?lAoO?`~&y< zHUg@CGhD9KM+9*yln7ob=ny0eI9XI0&#rB_{rw&;Oau!u!K~;aC#B4i$m<4zLU-b; zKvCDHt)4{d5cHkn_UytkWf0ygQ}~DI7JSlry-=w7ls~{js}tSktpbZUwW&-8r=0BY z3&Fn(uFp=s@I81v0kk7HrlJhM=A1LZdYZE;*N3y^?sx8l2L{+ZpS!AdXyccmDc#Tn zgt0$@Z&AlGJ()Ww`iSG~Jlnk_5rH0mF_69!q|^F4n2nIY>Tw)P zfKFAwW3J1Qs4IY;-TIS~|hcMCb6Y z|2>HC-~ZdVVcY=B;YzMlSuzDy!4o4Xh~GZV^VI1=`T)HtZ{aT(itF^*1H%Zxx;_Is z_r?%71|wq97-MAt`qJ1Djj#7<4Rv+oOo}Z;Prx65W{K1IS9ehnURX(PpiOjnm~ipJ zh0~?LL*1s{XEIlRWz=7hDNH@iIiGnz*w5y*a?GV1{z{>_iN%*XCPa# z(I<3O+Y(36W<>L@3%^q&8UF6k+epdq(&ft;7G&6LBGrI2Ts^8Wisd7nPwL5ceB5HAurKFZnPz9rX_A1T& zZoHwkBHxEf0scNV>k%1ALJQ6Z3V*qEuGN8ZEZXXkYqozHIWDHtP@YRiUkib@h@E(w zw*%i~@}D`^_{~Qjt|Sl(e*AQ4CYJ#s%<-L=CZ&cC;PDfV4B;}OM!3#!L(ssPW{BH_ zcBQmu)P0kRzAGiOB6dGb7WJ}5D7c_IbnK`rU>?ETn76yq;fBzoEk1{XoPyG00ot@< z+R3hn;c#V$vC}UTazY%w2d4keSP9h$e8h0*lx!AO`_)gHUx(?5;_Msm=FiX?(M|4Lk={5>A%}Z`f7?y@ z&GYAqTwsM7=gr)u0=QMe>1;<^D+F+_-Wz>N(2offN%oY4W2x z4&Le3976!l+pQ-Z;b!riwFEm?;W}J+k^5Fky&<`rbX}}Un5M*a0-puBe(N=9gz**w zurPDrD_7r!M>V7xUeND%W3O~8Yir{!THQ1AFV`ZSrZXCK7<>^Ym$^QmZxDqSe6wa~ zJ;?zOK*c@Kx9f3o+ zv4URHk-n8=`O#bTH8lgl0_vZ0nj!40d8XDs$mwqA`!OlIKv-yz%svqm9vE6KI*YO7 zt8@afs2j|d-$E}W={96DsH~xi4kh+sLluNh4cGdf&F=aZffPCtp7E9WzQlx1BwCu3MTdb|D>``dz%tI|8`5U1J-Rq6cmWz+O-TzA>VgrB2m zmSuiqFy$)2?WNWv!s`@Lwe7Tdl@}y43X9G2T2FpVPes7ZU6bYZR!8`&MbR}iCb6aM5hvvt(bxzggw zpe5}H*6VG>YzaE02zdKT;+N)FJD%(59z-J`y-?6tPPsHi1KeBqBI-`&4Y~>vVLiPC z9x#qE`~Cby>^5U;yo&;H2ozs~k+i;inmj6?KL0`D9jK}CIMmH|L`pR$-E(H91oDog zAf$$$5L)rpN%^mgSt zpgUpjf0$cYjZYgjjou7Hx7n~{6e*JSSS$)ToH_jqD-lmH6;P*q3%f=NObugV7R1z} z=b!hgyJCS9+_*JXpux>zCA_U*)4-vGNv65ga-`^d-?zQE8vQ^`>W}j&ZXl|bkB5@pNgHcmP zsBiHYOzZXF(CGcMmeMJp+dbUr?#-K(vESgpP`ipdCT0yE{j}L(m_Wd{CCOTQ`{NFa z?lg3d?Pa9iY^3yMOpn&&zHmJbD|DLR*F70La0H}v14)!Nb&j~_+5DA12doTAu=E3% z5w{+$q~n%A%+;)IuS_w#K+q0Jz)98C5TUFt7ldq@CHIkUv8<1m3ap}C$SM`g79Gi% zTU5>=@FKSHTn_2Q4l2G7PLlX%x0>Eo{DRddx%eT#)c5d5Wvum2m$74oFc}&E#8v9S z1E@+pCfCt z{Gl_UrDOGGDsBoWz1I{~F&L*LRAI^HgXZx2WdDhEriUG1T^4n}>6hH=d;2W|f0WoX zcP|PMp*0iUfAWdgLwp6_nmCMn)<{{CXEX#r)Ry;k|88&?`wgp5hB+UXBj)#T)`AlMO}pc0V{`9AT0__fq2ld3hRIt(o=pFbG!~x)7BDOf9A> zmO~83G9<}r$iYL0a$phKq+?HS0g$==5vmEXP*IP*{lZ}ow{@%ChVwuyL}`pzC$lbi zPq*mwa38!?Vfo zgOUk^XKtL1#)e3>$giZ!34&YX5ZG5M5RgNni`%_Mda;ax!PYObwnt79R}7Vw{b4Bj zX0R@2#_@M-j>Kpio_jX0M{NLIjK|`|qb+ zcCoBgSzO0Q5KKtBIR|cU&~129Ays^OQ60q1l6WO}E0rXAc=7ov?tOEzP0)PQUJ~|Q z?nGhk(l77UL{_l(SWmwOoS~*K>-n(zL!@}+&#;pbnu0!=3w{p9X>ucg!# zsmiA)4q?jFwQ~~U<29M0yYJ_igA0K!=y|bH{&PI$+N4m`>&IES&rXpTA^ep^%dZp8abr)^+n$tIA_LM9 z=tX|E&u{dA$UIU!Hqsx)-`Y-NOicPk`}feK28C_(^wc;*_~x}w1u=3}ZzPvfgQN)S z0`@AYanBn6P*pUirisFXZhIqgVc>`wp2%)t&GHy! zIO2L8z_q~E$L%5)FH9?v(c_REH60}ZmI<8Z3m%a)_Vs;v!q8JNgVgXGIFW2YYRsQMna98t(#3$-g(H(yQZ#IY3V>*+| zAQn+p`ccw!j`a&@7zD(#pi{7&_O>TlN zv_)E?DYh?u4RrrIWM@U}6fE(~ts2y|2v^6_Ng&TF?04FgFWcI zR2KB1_OHI%zx%(fYNjUH;?p$RQBMYtiGRuPsU$p~ZF$}LK*G>HR$&{`B z8-YwDf@vmHN5b(_1Ic0pVN`!ed}RpW*!YroM4|ddFFrXx6J?HjHA@+q!7Pa2^XShiK{+*| zF)lN5#SRczN22+j^YA?<+?RF{NtcW2Ur<;vwhMfGB7okVCl6pq#e_~4?H)P)m{sSu zYzOybEIQLMlQc8`mlhz|Z4ZR6-R#+&pq(t9bAX!+7_hVODO}uzXffBwk;PZ8`xv@d zZDtiGQyd(M9a6TrN>Q-dWEnT(U~~NgS-2WPGhtGf+}SgDm&qx}9Ugbir(U-rxo)F} zqMObKN>g(|ur7J(c~zpmpiWejczx-ScGGDKsp&G5^Of;HkthXp>iETS%FC07EC%n1 z@Bak;UbqXAvdL=FitC ziovoq*#Ea?;2i@`uy6bVDJeCB-AMK>t2%M8m|~zA&Ynk!!)+3DX%RUQd`j)WUCGIE zs7UUjx+-lZDL0?>(c;Lk@5dXs%QIi|hN$*~D2x|><7v&fS;ObgfLR23=xhkoi3zE# z&UO$TtzQtdo|Fc@XLKv;F+_<%AbZFB?X8E5qe-Q;^emBZH+;YGwu2al81-Tq;J;Xt z*Ljk8asS>uyIt3~Xt@n=Gdo?cW320cu{3^?+-{~x)ku9hoG_Ahws)$Zlp>cm~f8G!hwsFE`#hJiTQj8VQj z8rGP*p|ls6oTR+%OaaruiKb&Ted^Tgf`ag(P?U`_T`kqbwm~h`M5V&GY?B>u!RlEh zoJ%rqzWyS`N6WeY(m#^o#OS-R-(CV$h<`RrX5!|v%$;xe;{*r(vaF;-&EWtldW#?c z>My@QpX$aP02~HnrwqG)dUf57J5xMEqSPGe0=igOycor(h%lUL?31+E*ThK4_MgHD0k z@(AyFZp`|jt^s(8A2UZp}f%gaH>Q?wRH ziR)3V8P}B}8|T_6AXV}#ES>eaP{gPk$SDyrK~1W5P$r4@n)WwsT*-n+-@j0$AeQsL z$#2JrR&_m@ox^@{9@p($VAS^tUa9Ra%8)JXvs7;Pr?BTqW~h5Gw}OnvTr5*Z13v%z zugoo0o{FRWG`W4|J{*<>+>J*E!p@^@?LV~$Wx3c!U0cFXgmO8S=%R~d4!YevYoZ*a zKbv_FWfo7f0O`)rZM1NCFrOE8n_^EvZlhw2K$z{n!)xz1HvW$WN*j_DhQ<6u8*K#% zR3FX{b+KEpY842sHS=Ie>M(OYh5<3TmiGpFs^x!o zSKqN`V(uWA0>(6LN(Q1t8@wta(-Sq99?nO_o%o%^LpMXlYrqNx|3D<}R~w};k1eb+ zmR?Lq;j8@GG7M?gP~MabPYW-_j^cqqg--6kmI?HLJTZL(k}AzMx0Lir9;kI5jY2LE zsI${k$T*HO-^6<2=D_o^LTv{Okk@l%H63Zn(qzJwx%|o=KaQUWm{PzKmCz^xIFh3% z=@VN2kQpqXF!fnf>g*ott~BoENQJtUAMac?j-@%d%kUB#o)>Z06R!I#SfEZD2gAPA zDIF$Zc@XHRT!*yo?-OLOq%hoGCsEq*`@O!(IyTuupkMpJIbmDcv;>4(xjWZ#GMm$wywv?79Qd_qVA(@n%SoK?r);>eok)}d|N zru4n$N9ZI&D4li<0~=PNyykNMysjhqOo@eg)p5|8K$yhfQxpq~Hx4fR4bx60)$P?* zIR3fY#&trqrHIb@*>p}2E{c1598Wo+gVkBIFRo20s7_lj%!6B)87?#Yeiw5^Nn-Hi z@Zn~zPlIHDIlIO6EaMf$3R?UG3>1M>|Cu-OR{*vkxiS4<*1LCW>iD!yJ-T*{)11Z{ z0O6||w4m0)*m)`i1Sq;*`w4Dtk}UIV6ey#tv%F2&zmQhejb*Sv7u&BE#N^Ss(0k!ak7!93A#P~kDlI-aX??<*rMr&Q>GfX|$?O}P;P!7c| zJ$ss0n_QYeGwkeR3qB<&i^Xq=L!`GnQ-;JrZ^-%!UYQ4>dy`{$+Y<4s(#CV};hXoa zH#OlB8cFE}eA)fSr?gOuLmJdIs$ywY+(0}gt1ZXvFWiT5yLR3FB*PULDs^97`W}-} zLeK-_aLaF;h5YKkgTIiuFVYXoCBP&CZbrxCepD~J!%3Qkb_;|nhHfWgf+ca=*k1Mps<^?Jb#D8pCbo9DIx7 z-#u0gXS5zSpAm>BAm^2z^aqmpl9Wove*O4ydK{5R!Se}duozHu1M>SZ}8-Cg6t*xK|8ght8(opS-PwlrMpsAKm-|1;R$h$9rxg&OUucg z@m%S{=_q)UnX*v4l$nJ=T!fWi&R^GZ)fsm;z$5{9%7JB*MFP%qq@}-N*?c)HoyJz{ zDt=FTS?>RPkaKiF*}AsxXR0_hacph>hn9-tIF*)8ZMJqBt&;f6-fpY;q(tM?F1oE9 z^Pe^E_Ruk=&%)?}_n#t`&Rn(Y+dGd2S zJ-IEF$GGqb^S<2t-r^?w?%9BJH9ONnnqlP8mI)iIW)2IyM*;twEfy{D0eFK=GQ5GI zL4C)BTtIfh)i)mBr`Pdw=X5df`PS{^u9tcAuhFKuOdfM{$K(^!^1pmTZX`3_O|T*i zjG#-ue-avb4xa=Q_GQ{n8qhU;n7c@W&M4MhJ{IWhK`vbN#NUJsdU7IZ@acRTa|Kx2 z`M$nh@uYT%CCqxGK%-nu*%$k5sk*|6VuC_ShH*9i8kvGA%zX{={xW=`8U$~7t$5AZ zEM95z5O{94=^qUY47#lT2b07x1LIoVH6u*#aBhXut;_GNk_8%%n_T52&qNPf*0N+; zc&lQpkhlO_vj5Dta$fnCeW4_!vjyFA&|S|m6o~!z=J}top1xH*_$hrJyImvJV(AZQ-{RD0Jnht*zA}y0_aOI@yhekG*wRb&^J|ZV*Dg z7CZwL6?OpH(*25@neEYQ`go8d5e6kf9N2t6&(~M_IU#W(M#V_;49>zKwPd3aU(|*= z##dV=n~I_ukHjmcd%u4E?d#VU^_O^MoO32n{pfTPdjRK*dOKve`si_QJQXt37)NI< zLu_{M>M}E6Xxzl|c+fv%99OUZb7ujb5d>ev42IN@X z1x2Okrfyk5x@op;+quu-Z|j_IQD++A=6C{lI~)&#InzJSrT zHBlE2R29(;$;jK36Wza^*!*J5@xazuk0=khpc6j}T{@~Q)!fC?cKz!3)g>tIO4Dc0 zp3S&x==fcZDh}N>r(PE&htMuh7%J1mz&}oReFH3;GMK_5*2oQbr&<1 z2Iu4FsjUq9X(ES6Te1vcEJC3OYNOw|WUu(4m0jr6SpQEistv!eXQJLk%=_+uunp6u z-Lh#QYnAV>mBNU1J<5YC1#BwORloiV+~()4u=j>UUv1>(-E8~s?^!i`ZPg|)U@pl# z5{VZqKg;=#nNB1C8~$W70i+-B-rAG2|AP<@1f3!gN{sE)2l^`0TJUHEsHOi$YmweBW^d_Pf z+AGnb2vOG*K=!fRiXER86>0D^jUxvW&Wr%pQImbP1sw5P%gB_h2yF?m5EN{2ByTK{dTQDK`@hx7BsF%pC zli??&2ixiCWibc-Oen`AlErZK{qDmps{Gk1z%R*`BKG9sUcVi_M@DadK1DD9v`R@p z4=k~as`Jq9@$khr#)u7AW2G`Hu!JUapO830OaO-DX?w0bw#HVV8ycnqC%b}M*ZNXK zQ#ro)Nuom<$Q(;5LFqunKPk*}5{b|D0@MI5QFm#CrH)WgI5?2&i+{r_{xaS2yRBim zQxUmW7o4snZP!#oa+NeGAI_$A(i;DcsQ=txlu zKB!uC0fdd)KK(6ZR111tw>t$KMKb@d!9?>jaVsR?Hm6Sa9%Wp;=HGX1{_Vv{BGO+q zs@xnLW;eCryy3<|>LXDCNe<=v_16h?mLjBmJfb!5+2HMHp+tG8(uWJ<2Bl1`C}xeY z?BvmTB!7VPl#FWsES8@;14NR0JogLxGK1V6IHv>N(bVtO)v_W`3E$G7iPONS=d+`d z2U_{hKi&HrWR{j9`?}f9I@Hk#_MDX{gQX5xqPNsA=(n|;nKX;IZ0=^6 zU3wY1A56o6BpY(Tl8tEj=ED4X{SqvSHEX+@lUyw8MUrPE_5kld>>T!-dae;=hm4gT zP(}#fm9+|21YOI6q=tr{OA!&a!Jz5iTVB_gG3FDDQZ^}!o~f`~_A4g{z*u~ujlk+L6BQO^7i?NQDeVWG-pZ6+=HJ@WsXUm| zssC~n?i#Ob1R+G`7Nx>s1(E<~-Wp9xWNd6W=W0T0fClD7j321qp+gJ)wWC*yIhnmK ziEo*upP&vbJIvKJx-@(*J9t@I9&JXsMFKC3PZDv&K<1JZd~nZcNu*d9CBcB}yt@^e ztC5YTd(%2(Cjr-*v&^`SGIEz1iI zlo@R#QGvU#n!4G`8YZLPKQ@C0@2;HSe`5(=7Btt#iw3Q!2^{d506f>-1KJ3#+J#2g za#@vQeK>^{sftP<6^Dzp-XE*kAMpF%wu~6O^@`vXV@7_vE$Zrj$04ET`mhTGo-&v6 z-qy!I7L(-Gvl>vt#_|MVM)dI#Wk=2DP|@qpF2RFqzkYR;+}V%dK}fP(rfWK$9^VO~LgJg~6Wa6U`oL63U34G0L3o&SA^iHR~b zs7eKBg*1Tu`u6S}ilkQGA^rSJg&ck!^g_uGI4)I}$&#@wIvMXLY!I-c>Q<5lekApIG1l(B5;uW>hS7 zF$JI5U(W~g*p(V!wIxvfQa+DDP{QI7rsLA)(Al)53jVrJB7OjL)z5$!jN6ZeD01WP z`h7yF5PaEvDW^(47}C@r=8@ZR>1WS_7$fuib{^y4f*L;X&-nFkfXu#8=D=Q7TQ$7k zJc(-yw{Xb`f6vUEHVOxJwr>3OW$YOJGK7Yd0{T8p8nmuAitw9CT7t=%Qh^K!;Ib97 zt;;_{fxXP_MdX#hgY=4qS#j)jPi}r zd^s&n9he)rg$~~VeYlxJ1Hh3VJ`wMU!WLs+dlSjgNZw`V%oaY44Zr(AqKyBS(@m0h z-VxF}V&U-pZ}zl(xkc_d=0+K;w%$@vT$h3joynWo1sAeq&L%KoYF-RKP~&<`u_EHIy;YkC!Ew8rZo(vj*Dc zUkEyTv-4VRdVvh1h62d|$tb(W>lPPcx6El{qV=`M{EcLf=H34dZ0K;RPxqdDek-gH z7JY7c(a-tM@H7bb8$>hjQ8-(n96m#a$E+Wle)a9oW&NSbRazh-4yFrFKE|3n`guK1 z;P!CxLJ*Y%0ZfF%a%xcl9?;uI56%5(mC?GnS!mpdks}e##EiIvT?;%pBhLo|A`jT6 z?h-Q%xGYqD@>QfGXIN!y2iB#}xgM8#HQBYAbdy*mEwl4b&h*DkUF~ELJDk+knf-Zl zc)j0hJi+S#&gHT?PgnPODt|{D6|3+ef&)fl%%)`4`iB`|fd}nm%ZmR4Y)DP*^5bkEvuanMBeKaeKCq=N*#3;vL$QizQjfT0YsZ0&Bx<^PI>R{J? zAq-<(ym<7z$LE*M*3;M5D#K)E|7W}p%3)UYgToE=|D4k6yTpL1SA8SfYCKsZ@9vQ< zD!HdFMu>w%!FTl4HR{CAgJ=w8pmar7hLVBRRQt4q!$YfKS~Hn{4<_wLg^WQ3b9Wi7 z_4dj6Q>D72C>PzQPPN)6Y4rfwb^)k(g4q+n-zf|U-z|APi6{?(m|pO+wKw=tuXEF1 zP_gs)Qvgd`nu$tF9-mbQuK3ysenGs(Z+FnpMA9*7YYX_qR+<~eo8pO&n7#-99biS2 z4>WB|QIYGOI~1r@fYBVSmHNaZg^Q8%ETDA#w-4R>D2l&gm9CDCNuQ1rsi(nMqmRZznx8whDBk2v)5?9+^#;MPdNV4~l^PNmn^! z#nTVq@K&P!1h4P=ohu#AZ)p~l2?;s{C875=MwL9|UO&1tnSNRcfHeWRwJHPz zsA0B+C}C!FmY_{8{MvQQjA}|rzPA7n3b`;TrRAtu-I%8}1> zA2evtnmg=HP*PR<1?X5gRgY%2c8+%{&8xFed`Q0=40$eTU@F#B`fX-Z|7dMCqoJH`aw~;rOKO&udL0m-+e0P!HwZ z_u^SNHfw+W{$3&DMz7svZ~>?I%K7{ZwH7VRE-YNSbR?+F>!lJ4g9WK-HEGmoPaWr5 zGsfl3YL4L1cartmnrLT#>N8)|YswR7ZfKT{%$-cQ-Te7o5?b#w(k;BskDIcEuGa%C zi3pBA#X;D}@X9zZ&#!Tm`ihTvhSI3V7AHebiAUu!7nen!hrU!l{{dxtX*h>t2`Ik7 zvf4-NcIb-dIbqCw_3<3a*#yU+%R%my)nU*YOVQ)?*cIuVcKp}xE*ad$%VN)+Ud+n= zc=8d6sG{=1TQT?|^{2$NsH}#7aE;~dpJouc`F@9UWDAAk6tvPhlbI=m@~A8tG)YwX z_YV*$1G7wE2ZU~NOLz=HQxArnm>sz`HcfH-P0P>@Hwp?+9m;Iz=w28AuzYy_qHL9s z2y+jiXAt4TK%Eu0uJ4#8^-TG}g9l|!pWRwEm`EH&S&eRpE;xRI()#}ld8cN*T>lV; zpU<*3;#&*CZod%G->Q#K3x6);4Ds=#!zLcn17WAz=)tfW^U9w*c%Vbcop@8m-ZA|u zCB4Th-_82Z*bg_T3aotY8fdSS`i@TPsJ(9OguuHrI+8q|SkHB2*r(67GHfUc+-j!v zV)Nzt){btb-GzT(J#b%!?p~vI6PwttWO%WRa%a&cbP&5P5Emkx4O{Nrp?qx(Mc@4RHb#wBHa59P6S`VywU}Gc zw>gmra;*gni2l3l+-6Y(+xw({|<0ejg zv54ZD+96y&nGo5D-~B-+0BRc^?b8grk6%iNAH3MV-mqtD@~xSDT?NmX#gX>)Ac96zJxygg(iYB02ThZFP0w%9QwH)gj2sM^J3E zwgV(GZ?A=7Vcd)vuJ@Oc#&f@5;xb(FuswHLOO6`z1amzj#fLZ+)hi+ymZa%v4F4*m2(P2n6j@_jw3iNe(?a^%$0rr<+WBFXORl6d)U+f%A5t z$IlwkZ;qyC*FdLh2>C?rPz>Mj->~j9{-}%}RO-pX^)eP^DcKw2su`Iaf)>&G0n7bA zp1%n{AIetVLVZ_h7Wp67B~UJ^CGeK?Mufi>R$@0d>e@Bx5zD*V!9s=|Zhh=;8s5A4 zWQX3-YUe)jKvGtAO_@nI2Jca*ltS<|Mv~HP%!z7B6}+@IG=_kHSt_|qJpfHFs?4{0O~Y@U!_Fo3CGBn|kQ!lP`*a^Ye5gzN_N#~! zPS!=wjziW*@;g0HMZ+eCaOGTvNCPjbCi9%CD7xxCzihrspYGjcQV>zu?I}%0KA8;? zC4$lSo9_&%`tcgP6#dWfY8l0I^5mGodytEgu5+WM9~diY8BIS1K#H03wlnyy6>*Slvlf8ZZ;Xh)+o&SOJ`m&qIAn{LbA^XF(T<-n80CK{tnP!E~PE zgqPp59ur05yv*HQ510^5cVUG%kmb?X4#ic4t&-1fy-%a46TM{iAABH&?OZ)D zA!QEfg2UMPM6+~!=PzzJd9u6;C$|^8T}r>yZz9y^AOD)gLXyDll}zh9=M@CVOSg_h zZj%J42^GnOwIdX$zbP*y`lS?@sDnRDb8SOLULY#ogq=t_L=0Ou?5kzuo^aq^(Hg0IB?ex>axpl!p~O=%E{UO<_oJ)ObVYXId9~482*wK#*{mIdidAF$+98JDALp& z0+k@%q*(v_@Jc* z+yE-$+ZM|aK}fX34JCvEbHoZlN=EV#9_KK;M`s&YagtoM%C8j;3NVy;KXQTA1oGoGqp?lH@B2lMzh-9eSqZy$=xt;qjIcG z=3w9BQV66ACI7}sle2CPvJw9yXoRHBx(z-Y3vI6y49P5=j{+G9YV1Z8D-dR+qk*t3 z#ie*M-Ct?%KgXEJH&=+1AtHRls^P|QhF!Wu1}sHBib*2)1h67K<%97gnk+x-#QtHa z8O1prP~A4Le}8-9xYG%PVN`q;@jYd|p;;`x;QpY?$B_9c+F24q*kUp#dKr8Sn`zkO zDxRZ(9X{bDxFrbL-JZGfTx=hf{QOb~)%F5eO=3Uier+cfuRyS$Q_q}<&&|!>>B%8eKNXC^JdB%+re73J3rq+DW7#=U2yb}Cp&_#s z>O%nBayh^YuOSwfu3?CbPy}?UKSm?QEc0kv!=0&l2Vz*PD_n%7l#!%QpIZDC)B!~; z576?t0c@7|mFJ7Ne{Mmvk)J*X6Ank`ANNE~Nft(lqsCX=+7l>z-;gNN^z?0p3v#cx z#cZd3lVq1In~90w4qf~=bs^%qvoGO^IhJeNQG^|Ie+$f$i`vsUWB%4Q=K?UP5P}!| z=`d#@kdsd9DQIlDbzE!YxBtIQEm14^rkpSHH(zh4?~YLJ;E2Gf)2Am)lnDnldzh;t4HSYFBJ45`1-}K=)ktCKIz`h_||u-XQ{Y# ziohPzSXORk^X&Z@;6k0C&fcr`+>2*eq|6}Bt_1+!3P6{h5d=YrTr2uA>q62BijPo1 zadO|ZmSjx4;70$P;BY0w@3X`iqd3e8PbIj_6N2pc=S}8@_ak3@`c&L?a9XfF+D@t( z&4Jvhas4D3PQ}T?l4r?pp*3A*%xXu4E}K0_IeXJH+!PRQq6R*kIOF2-CNfxW_sdtW zJgH7hck$xTGmZKTsI3K^6&=Ngu=VF%xri{BJ#n)i!$7nEu2TL$xz-$M2l3q(+b8~i z=0<+_f20VF%Bi@BqYgI1wuPW=Yg#L&xzc4JwtI6n|8(uAh$r8PrT4F6lZeN0fcpL8 zrUp}~d2kcCS4{*M2ClU@pXSUr{R#GY(hqDgM#5RkMZf5}^1!$I@w}U`ruC&jm zn;Uk&70r`RC>eq*stVme?~YWJSmhtg8R_UKk)09d40s19wY0T8)G&{TLXfFJPr<8X zYIfHv9BZ852Xb|^wPhen!_}>=BliN3$mCT2SO{QAX#00EkbsRCopA1GA0TyP7@CrU zHM|KK9jdV%`rHBiK&y2;?55+ELc>sb#VtJanAA=7jeZixKV`mq6;>)eBF~O&(+iCN*vsftxWfT&ATB96qEf$petBE8?zf{I zOb|_Z+IpdO8`Y$R(d4$LV8V{fn~Z#dV;O72oi7lbqk9w+(^U5~G#7oOor6PgqSMnG zHzG`Cs3>SI@_;W9^Vf+zz*D5UD1rVfJpuD-+2uRf(px8$WI82OfmVu?zjo*krpyNG z{3N!UfJXQql|U9cBR+CitYuHXq8Q+~d`=QcVk$SGcr{v1x!Fx+&G|ED91!kRJRmHE z!|2yfj*qNo&-U1Kh|tgv?iBDV&dh;?PXxvIi-Qi{=Cni4cIHaAcK00@{G=-sVC-`k z(KJ~f9Vb3YWB@-pU>-g2E%E!C`^t13hjx$NJEQ}=qT47%xS|Mw`JeJd zRFn8Mt9V~h-rB!}!-`8xbPVj$dtcQMnz0r8f)%_eLSA$^32Y~d7YJkL51cL{+B_N8 z14Nm3ydeoQ4GpaO9;8>jmliV0}~M^|SfY5Gqso2|!3U>DJtCTgO`ZfKx%yEr9Q+__iC0EL8&YPMPWzKzhi1iX zMR(flyFa_cIaR)VIecxo1U`{uEw;0{9hjmqqxr2px(yuI9_GaL?2>0!y7sAG$mM9dG0&~jvcV1k zr_$1L3NGQaMDObhFyV32q&uIR+Xbp^6B6B zh7Fn2=Uhmim+o6`ZTm46@(QGWPO#jc9Jf{t7cO6(i-94r{oswi^kn61pv(R*2bnAM z^Hecm2Q>WXZ1sR$MuBO?W_To}J0Nq7}2gd-(8S6Mjt8YB*myR>$&TD7O%U z)cI=b=!g&*$mjV`zOR2QZ|SMoOfq6XrX`0BeW?EoVVoHNpywW{>SmA#Zz>aOv+y!C z1A{NxPJXKlYPVR%4<*Fi@QrBK0~M9)zNcND^4@@{#e8&=X3d)?|65*GCJkMs1&AJ{ zlh<^-ktEXGN{vrWR_EaD>iv}+U!+Y2W;ojTc>7y7Zw7)^+1RG5kMteFZ7E%N`;&S1 z)`J_gODubA8h8?PiTk&_tT(*@PTrIjLp=v3_)S!Lp?m7#^E`O#2M6Ku@%6gf9Ip_c9N z-I5%~vu(+|4XdmK1zKPI_^ID)M5p9322e*QQ`$^~Pg4(3*H5BHpX%{79oN(F#W zJl4@Z7A!?>1FvLwrSiXa!UC`$Ry6{+kDuKm>q1#?v%AB1Q9Q3g!-BlL%AaPfaRq{4 z7$&6CGXj=~$ad-#>NCl~91-+9H)1ZSYNd)Oa^{lprQ%N*0sPt=w-t2#GUmuOJ)m(T zU4M-RrOddIFam=m$A}L2*Sh=1SBiXUy~=6b;)?wICWv8=Tk>u}oVHcK5v#TBIivE& zW6DZ;mXSSpU}oGcYAGc3Y@U-FeqYGA_6$^fZmgOfoehsa?L23#7R|T+EL3!&#>m{} zx&2SII;1K2@J(vg`QLOEQ+ybhIyFHcUarfLA++ z^4kdMLj5QHoNWY3jr+60hTcaO1?KzVF5EaKn$i;hN_?9ZD$_Xjxd^f#Y4u#EoR}7d z^*-*zYOGK!yj_7_^X9SUvrWH4h6~@dbT6Ye7`|8R@;AVrB~*5Brc`3gz>Dqx{}qlD zfXMx%BKa*?Xm*Y&-pg8$qLIZJ@@8lZ0)zMOi%iMFQKp#owKM2FU$fz7MdRmoSX5$R z%)Sf~HXcyTIf1&F8=l05H$UNNNqEh1**w3s_WQ+XB7~ef;mN%;f|E^u7)5*(n>t;+ zEAit%R$iAd6NQt~G~_bYF4Aq2>%a0iq+o*@eep#mRYM15rqhXq(zsPm;WL@d_SPi- z+RE%M)U(^(ziBn}mjaK8OKaTzo}Opk-0p|>VZ*^XbwWZw2CDrrZBu_F^@}Q0H(td( zn?)4Uqa?~G^mLP_vQyF*JTRX;NODZss_|l_rt~!~gH5S=x?=oM^xqDYYJ2k$BwFE? ztbfTBhaD~E+(kllNtMjl*~S0wDjOcn-91~OTC*wV(L8uXj~+Yrb+G=NCuo6Gt|Bze zf_f0;e*HVDrz{D`xJ>Ji&7qXkB zuC6Edpj-PZ41T`(>Mk#`iuUiRW#{dR>b^3DQ8d`z;bCF2`p?=>Nl~{Z+oCZLc}RgP ztYuHz`mqQ-udgTFy?p`>TxAXs!X&kL`5p@h`EErKZMOZ>?w>r@Kq`)0>c|!{-CUCC zhDj9hr~}$jDx5S_mF8Kccz9@XTfO33ta(wWBZIS3E8&x5FrD%Q8d`Xxt~)a`GKRqx zEWLzOu{69hS0u|cV}ixt!5!pHZ+#|tXB1EC8`&Jr+mexFaVMz~_G#N)W^4;GQwO}T#G=$J&BY&WN~HzQ(k=^pN&%dyR?+&t9#&Set6IEJ7p?4yrP6k?%xs> zC4p6+P9#+to|$+5FN=@6fT`gIa&u*{DLvZ&|MX0!8DF?A1(kI>rci{A?TV_E>Iko$ zX%kHi{8+YRq|VPvo9di6J-@8ooULp;C=yX%m<_V@=Ini`4rK{)WkX-O;uFH zQVllB)S0-QL!KiSO3k2VmZSdQZKGk~QfyXzqYRfQgmX*q86ol=(w+WU%trS)h>&#m zv3I3fllWWyjhZ3=Mtcsi%BmZk0`h2b-(1Pf$?;>h$o5az&>|Sd@wr|8Z=G#Bz*7;L zOi1nZepFczYGP^c;#UfheRzf169U;}BNRI9#Yh5T>suxRW$4x;;FO+&M9kQ45iA4E zM7*PV2nJ+7a?0p4dWFVGC89PZ>xo@!kC0gv3Ni`F2;%Ru;VJY7I>z_fIfC^gL$FK6kj^V12Zzdeu>#QnL%9YMD9C|s zKdvhyK6o(&Q>{9VzVaG7O25kWfS}T>X81z;LUD>g_(d)L@I(i-sFBc~ih)}<8uVWt zdS(Z&SMIUbvnaf&ZN0A1+ioQc(4zy2E&S-Gr`1r2%*E28a+EWQXnk+>I&jL!sHlA( z=p{`6`dm-BPk}>p&9Vi?mP{Ag|>b;|PRfG;xs>{u8hD(t9YHmn077Fe*Nk@9d32(7CESd1nBz@Im<3W|DK z=@#F>0yXm^nYIRRQeOt3BuDSIy+E3jv`r)!_ZvF!Ai6}WJvb-zFbtTyYS(9BYi!D{ zH78Db=9rL^w&z5 z=@>tOVpFd1TP$61|6CcHW;Mgq8@X>FtNZsA9Po#TMzgsNI`72d^78TzDfvI>59%4Z zNa1lNdNJD!5@XW^7>F(4C{-w0US78$y&3|c)m_km1FH^gA-SC_gy;6Mm&M znoybzSkGS;79!c6YOJgJ1A`tUpAv|%+hd%ry6qC(Uz!;lpWjStXEslXf-K$N%zJH= z_Bhsrq&VAuh4>#wZ2GV-=&A;7s84)c$af(P)3`^Em@_gZg8yBj*t|o8f{|y$`Fjp) z*Q{|m5sm^g0!3d)B(j3Jn4)k{m6eHp#d$neN@*AT?H zC?Ff#B8gOLGz~&d*n5L5)NPEF))K|e$jS+vpd`9<{Kj99B%UcMC0>9YxMgdE2LJ?3 zB^++%wDs$MH8zXItEMspEd2(xrMfl!=N!8H#VK_ijgy+q6f?}xYNVBpH0z_ZXc3T| zzxqMigFVYlM*{f8_TnW|kHHmGt|{BqT=ANZ5Wuo zdYXzkCB0mu^Kt<|qzHdKGH`L6B{le{6IyLnRD@;_jd0*^xE!e20p_ig{+AZO&Sghy z_8BcWQ&KI}89oxXupY;`$HnOi;+@q#!hu~4`>isY-iagRKo9C*R(jL`6DuXh^{;U@ zQ+;MLZP;!kd!6gvTy|$$#@pQryw0=?e&(nT$vep83?0r*i>58@0BCq8mbkByW=v1d z(iq|`WRu0Lph$(Aj-tGJcT`9))H~yMZ6FVt#2mMUmRH?7*MnimbPiy_hQs^zDPCUn z!83IoP&$IWvF)AEH3GnnG(!*M*Kp>-ch47zYO7EniCyL*&3-VzUcy=~km&Y(W82xW z9=@qX_TP<{z4YVem3x#dHh|tv>`F`@$sgWyI z*qE7f^3u{Bv|Yy#I2q@lxQ{v#Oy$if^dVjUGCW7y0&6*Wq5U0Rqf^Vs##KygQTmI& zaWlUUFd@ogv@Ku%hRO9~7^sClff_$`C1-^HfDme`aVz8iMwOhPKaXvr^W6t7$kaV= zEFIz6jj9@YhG8A@9#4xq!0D&P(fzAP?ncTn)6J5SfxPpUGIUJpp+f~5cW{{wL*4gB zHR$}Ug?W*;DamCgk3@W-$@QZ0MEn~JV(s~tX@~ahZ-4$R-3xD9yB(>|;%V^2)4*W$ z7TN=OZ8DKWPD0xdA+azv4SGPwB%IvbtA?jSXX~0A4h>+R{a<{&cU;f=|NmckNhLH$ z8AscuWF)f`m8_65Q`tGn%t})wGpi8FK0?Fhs5HnZqeHS%D7(xg^}F7EKHq(je0f*C}{a7S+@+;M5PRjjszU+Kag6G4%KCF_A0+Eg^@GI8chJmercUo zj|rh)StiL(7N_=z1c=DQa8sOxRn_U?#SATzFQN^R_+{o>NW*w)IXoQ=>PEL}a2`?@ z{F03Kb2xh-zSBMT&>V@QtKM6Rj6FBam;{@JJ#f@)*-~?TMVMYQ%8A>|D2%*y@uU>A ze{lJ$R|#y452jdJDZWhSw4SD~X~zuEa5lrU2Gj(9Hg{ApFDqrZ;4@ZXWqi_>D+e}H zXtyr^-KTRS!y_WbPTRU|n}RC5<4j*XFcckmuGq&6rn*eu2@Z)Lu~TIi#SfJd88PFv zS|E6JHS^L(+CSJwBAG8VkVSaidoAk$Mn)og9-ipY;HXGOQMgzV)TG5k&2lBR`FMjkdw$%$OMv zuzHr_fr=-(RxAi~0$WB67x9FkCBM4-@a0RNw;@oRW!o2Qmpj2S<)E!_{SpYqX0Qiq zTU9s*oGp#4oxrSAMSFkANS2AE=3%eH$JMSU-gSD?k{m4?XEN_mi&3_^2H$4lJj+|! zova!-wU~=HL^HnE7FdTu%5wQw5*eo|&rY`cP>i^&fR0Tb%(d!^>_CXB@8e_RJNlY( ztc3CVnhwK(2_UW?9vvTH?t$2H&Pp&O-vDvY27VEAuSt!Zti=_hpS=TW>d%U%MJ~sW zAD8;s;s_^zBvQN(W*q#p^ID~d#h8D|aB1gIxC#bTmOeJZeFPA*aOy@Xf^=n@4ch9@ z!l#f2Kx6mZ8otBQ+MpP{bn8E!_3vqWtWoWf&ho~^#m0K_WNQB+_{#Q<-<9ZF*+Q&3 zbt`{g7?e$3K>wtOyFO2YAp^6i)ra{vIPKp~D|z`+!E-b}NcREa^(Q*6lm-(5(ejzx z?7m&`rIfh_hpr^!W=P>F*gCM@C0)E#3)T zZGP%cXiawHkJWPU#)TY%Hc{`vD^wk++(~!>AC`j$FkCe(Y7uk}>(AS=+E`R{)EGI= zQIuOU^>!b`Z|^eg%AV~FX=uS9x5jV9&J)aO5GQ4~__^*AlP0jyVM=`&D7y_$Z?x-q zedOz-pHi;=NE@^A+gr0kKVS$CLZRs4@9g4|P5g1x6Uc!`=WE@3M8VZ*8?f+_)+<(A ziO>8nKHRqNLVM@-PtgH_Q;hBWi@NOv5V))EFbcFNhdPu69hKbSA3#XoQke>im3Q*t zqu}keKdppJsFz9;E)ddD(O8-fq@ftXdH&}G*EcHf8B-rW=Iu4fOs)LiaAgShb&OBSl$ zt?0D33U4C~dg#4Xyi;Psv`!bSEG>oVTD6a_t#e`{wI)~nR;TjYyN7Ai{yGV;eZ z5R&~k^F5mJU(f2ZoI(y+Pu`6six>NqMX6~3kJv-5Xh6Ac(&0Pdh1ZdyJ=FJrw&w_+ zIWz-hy?DOO7~8!VqMJ3pVe+y6*&J*TAbTP>j~0(_!;hqeXok51Mzhu#5$B;ory9Yl zmcE1%RXV^b@?a@>_ts2BV?XMAjfr9g@JD2;p}Bdq?n;2L#*mcFK-OA0TEg1XC^ zHY_S6NtS0`B5Tp$RkX=b>yR?D7jip9M&|>6+r(#x*uvNMR&IOVqu_Uj?4=VkfQ>ym zb?lf4l_bvs2+6rgA6W$sH$2aa4WQxzjn(ihi$#kUzYLlRZvrUZlg#APL?(y3b~#(5 z!=Vsb*$k7E;*kntE}q!0uipe1d9$aNzsR$Pe}YOx#VGM5UN(P%F6bxofyHZKZytsE z(vEEZVAoMQMw{C*X-{P4-z5l8y6w8nVt-+A6tnOa&}TKTNUQ;z0iLanBacEPD_yy+ z%e0{Rz-q#3dHw=|2mpAatwlh8!<9pz!*iG8o$mga_fdG)`g=Ayp@IYE-HprC!)P1Md zuyGgWSM36;m2rSF4q$ZNgbMHaVniC5a751rBBX)$H$-C_W*i~Ddp&FIx7#P3{yjD3 z^KXTyM{#EHqWDw%1;P};^A28*_8Xb3errZUDqyA-OkXr~l#fS&yTVG<_(usZfbY+Dxf(G%hr|*IrE1$Wb!G{QYrVi_WsiAvIDwFF0Et zScI_EI}j-OLZ(x7GsIo2QXf8dK=}{D)4Z)Gn@x9}t!rQ11{4Rgw~l8v%p=yUdRDJ{ zi(+w;(6Y8pW`KJv#Q|)~8m;D|%UlLpAFZ%;O>9etxxA2?%4od1`}G!2cqTaz?nH1@ zQ^$C%lmO=AjgLbL@{WPgjK>yb|A(Zjk}g-~X#W{T*q_m62OM^)K_4TB&}5~ukVu^o8zqJCqtu;qoV~Qm*`&E1rf2e#Q!pstt$X4s z?DCyiq$+)N4ZG{df+1`5gRg?pBGk8j)S%yC(i*lBdr07o9=UZt((7|@eqPw2da}Qlvl{$l(~4G5g#dv@3KaF^ezVQ#IIIQXgtLnO^I>ic(@k{DJff8?H0FR z3D54a&}D^Fe~b3=LMX2#?W^I%oefXagAyip!T->t?xa$J>?TnnyC$FLeZ?Jg?>?rYD1L+2 zdm+-8a8e^^2Ui6wFLkX~>nOiKs=+oZ(;!I^ZMIaMpsj=R*rsjUyt85jL*R0WxU)fH zw|INfQ674*!`#fw2&k+05c7X|#t#|g3l&*aJLTR;>rr?8J7xS5@E?A*N?YJKh6q|} zurn6k{=~aUAzlla@+CuD1NojrDzgq8noPc-fgkii!TgIEw5AEmZsvQ5W+-M4S*Ojs zT{^QFmqzU4Q3b5}E*sZKSBGMM2}g;=_V0@zc8Ckrvz;I$MMXu)2f@Lj0q-o{ux!~3 zpgb>W-M#akM$0}FhXb_TU1r+8y@2*E$Kj+?-vzuL6j&R{Ft(ATx!*aQWIx*eK+)@q zMCHaxGBc;cw)ZQ@gJO+WFqR3y(htQBP2O;xt3EvLbIdjN&fzH+7e~3grz>B6s)?rN z`7`ebj@9o@rEiXpzrEk+#xs@5@e?KpQ*v-I%^1h($f=$9^#V=Zyu>O&#ZKsA=zD{y z7d+$#pbbshiw;I+5_IG~T@jgEq9>ht z0M6nprS|6L_|dh3rBaP%a85lnzUzvSTw<44i{P`rvDy}xHdO(J1Qe#NwgJ~8-FEbX zDPs9FXiIZ!_A7VsEQMP<_Y=nM>nSTdnNukO$eH4g_@M&OQhZNOjamO)C+{(VLvnP_shSKuf=fa@K#4Zm~> zIvIQ2@TCzPI(ULF(k{@10>QbzxV+?@9#=dr07-$44>!dNt6hB@(a62mt3kD?AHoCE zP|U;A@p7qUlY15D-;P}FdL&8qz8fUGb(DuS>u8Xs3jtvr17q>0uFN;y|Tg|lrz=JhxfO>SHVz(7eO^kUOJ z-B?K>P_dqyI7V=2{e9i^XgTrihi37t|0A*_3~$7fZQo6y6$>m z|H+&|JhHNSk>m)Q@rBm@Z@&iIM^c#$PWkf=m!FEt_H7NVs;ox_#@oPy34Ib%NZ)OA zAu(<`h>!2UzK~o2Xf{50;1;5lFsMZ^TLv4^-tP|*16c$Bub1W1=hJyYsXF9*kz&VH zM|n)(DoP(PiEAq>xuvppBS-|CPjZc%<3y6_-s0Cehvhn3#y+PJ%EiINv0{(%ZH3Rfi2GL*+&Zv-#~aEL0CGeH)|Gw0I~I`?EcgJr`q>*3yK8hK;1q3wx8U$ z28-!CaBmB!L$2ifG`X4)OzFK4!V+(wp+s4fltGZ3#UzWLD65S9Su{#zQOfl`9m=8z z0!QVoK>j*4%GToaKsG%WzaV+`{jZGoDMb#NoDri-adtpuf@?uY=0{NPCmG+ckhzS! z1m2?Wb?sK3%GDxFo7v9UR0mvS%>BRNbdaKsndt#OUT(&GP%y8}>KYn$48r%s$HyI1 zd$ZlK12tUU-jFc|hGwz}0GYe%%@#m+9vi2z1xxmG8gM&B4|1~@zF_2r4KwbUytaXq zxqEQHfB%eUtT??{sQ&wIfmmZT0qH5e!i|F5JZG51G+_>h(XB?qFIBDNP*1+HD(;Y& zGEhW2Zg-^ssIM>jb>mdh!;_C^rLSz=`u)&Md&|)&4VvrP+L~@_-auh$>M+J`qIyGR zc(kLP%?=H5wRdQka_Q3Gb1U~Yn>Be+Kzcztzx+l&^f#s$l&-EXojC7yuZNT8`TJ*a zP5lrza^oVZu=rBuZC@^+Kv|>{1YCkJQ{4TF9hecKa(}eRa7DB10v^+o^Iyn@$lu#R z^EPcJw0*X_$db?UAzgyClt!V{Bt4sKo|^0#Z0}UZTywKh0y<7#dvtWV#M?WEhPMoH zwX_|gWS21EFNAMqrfgoiinr*SS1t}=(00pRb(Ag$N<=91z#LMn1JTQ-jT0SG$)|4~1kO&w-5PM?Uu4v4YTk=3MP z;kGe}(K#ptT}t!Uy(lQeDlNVHj%AUbR};cEw&`p4#BhZJb*#QFIz2~yIL1^f+9GOp zL|P0o5}NWQSOmYhR^E`uPK+CK)q5dc=&7#?CSlYLGyn`|Xp` znm=;=pP^Wu8Zsy&Y`bWP3JWkMEn#}&I}z{DcWa{du4(vAB{I6CnnmVfL6P}|-ugTO zH3K6GLa8TDN0uA!nS*8k$Wm&|b@wT|l7;wM-~!20T$0p0n2JCEt?-jIv}9pNUxQs( z=JxUtp$OC;HD^h?#GkR7yywncCq*R_ESuL@KU(+eXJz^J>7_oylx2xAJb#_gePUJt zsinsWI=&nfch{Za`FO&D)VZn8?+I8-UKLlo9SdP}Nzk6K;A8Qb#^g9LyYV}QMF~Yr zz<24;m@O7>CyrkF+6%aW5;p3;P*g=idwAUnc`Si_?jZ(~!N3O_!!!VGMvvBleYU&_ zUnCo@UD0*f*A=yWBiLRRS}W@)k#GPA%nXub2$UK8U1)Qd@c3Ykq6u7a+FSbxTorX6 zL~g?VGfeS&Vdt`|XVl<;l8j)0q5XR&_q3@kHbn0}r+2NGN;CLgT*>`MQf}_(zi1IB z&&a95eeZ+T~lIlG~i!n5>p4D6s405*SNHHZFRYOv+gYtrkt?(x4I*_DK z$vj>p^;>1f46Z&62!>@n!#Or3+}qlG9&aX6{ZF!r52CW?wK~w1+rj2?UNzZ;!n@w-xo1KOE`6ElbG%%_+A9|0H zC8jE0KRn)jYrjZA#K=Gr~U>l9JzFED5k9!A5f zY#Jd0CS_wZv)zAu3B)BmT8=F3|4rtm1W0}H4yc+?_+mBa&VjRM zcU+m~yHDr&4=k}_y!q&|1A*8AH7+;%2Kq&`K_=Q(@`U()2fmSLwA!{EVb=*)M`A$f z;5DRsg{tc3^}l}oTC{#YAr&``qC(%GB4^o zd9l_U#4m7d#biD2RwC46ErFq_yV97N&)>ff<>Yt_PiZ`P@xG^5R<}m7Zrp(>V-CD5 zk3dps>r3^7=V%7x0ReJ78$5A?a2o~FZR2kSunrWIX%ET4T(5u?$yVO?|LA&bQCdt|90KvdwwxSDc*@LL$zS33Sa`O>Q3l-8 zXEUES1v~7hp$#FyYJ3M=`dN#27b=VfOc!M4A90S3TIbeGe%ReJ^Cm0}rd1Y;4MWic zBR4a>GX+fyhCcRHHCzHS{dTl6vYL6#g1K|8ljGh04;Mg|TN>jTTGAJ}B%|CJi6)r+ z9Zauetf90CIKdZN)8B$?(yOag= zxVv7=b7r0c4ay-U<+))`uOo17HDLgM2=618iu!elRY&A2kVZydjtgCbk0#O*nRE?n z?^e%V^0X&^qg8_mxmW&&W+}DMtoNuWosCrLG~XUA9#I3*sn2czgGDf4+gw7MkfWEi z1tg&+$Gh!*9cxE}z$8X14Z<~m>=f^GoxMkXJo4fk5<={@7A;#IIB}v0bmG$=m~n_f zo>?hhQNX=&mfPr%lV4u?d%=Q6N^r+_eCo%s1ynsWWB|6D(&5ZIPvy;~nd?4M`B3T_ z-EkTyk;gxv zHEe=kL`Ay!13+9ipaeBL@`gVQYL$+es?qyRhsH#iV!*FYToxQ)-&5coQH}J|F&x!@ zo{@)klMpYrLynHJxq;(T(=4-O-AbLdb){Xn-~KE2S(lT_!XUb5E0h>q7KiKGnupAz z&9z_;TF5v!Ci4CFmjagP%rle8As;05bm!jtqpZ4I`~*tcHKliNsBIZoQij4=^BKDf zIGQk+vCL@Ih4TWNNZ02RG&@GO+>B?&EB4K`fdR{S)1yi3fR!R2O@;*#62kP+7_RT1 z-K2CcI$u^aJRoW?^|L?M)RnJ6^3j0T6Vfzg#OQIOMLLXYRgDH%$l?#hEhqs@+;GXd zMjqC0Jo8p;FjFn9ijA#$y-h!R0=<8Q(4a&K-&GyJ)Rp3ha2w7N0Qc3)ww0&lgp1^UZWu`9t-5KX4*8@7q_PVE>5@M`=dP zLW)0UrJUyCIHpw?SX-@4qe@+9tbLBvi240Suu_RU$A*`|R}-+{rJ?s)G@eP4+`~!n zaN0y7{Z9nrMkqMWZRX%(jw5>R9biMZh``W$&-Z`r)8Tg$=QaQTf2<$SA%5U$o8ia7 zZzRFbdV!GvuClFQS9iHa?be0{*L?o-V`JNE#BpR?p02wS##mGrz~aD3Z@2=ud>(Lv zd8dV1^JS5N<%*$gvO!b)yZxgyLuTj*NUq0ZL5S(XARKe5IW`P2dmptQG7Y=vVbHCsby8m#bv=;EJ!2 zGu{vLCE~d4OAUxZxt?kR`H2R zeL%nNT1F*2OFV6MdP`zqHz0UGhUCk#t_rsMxhFRT* zR>n9Y-HP~2dF0|nH5+>i`I!k0P6!WO2LF$fQ?{Yt0+jz>V8+K*CqZ+#?j{O z*Ip(QEHv4c+?h%J+(y(zDZq%_hlTb}=vW2dRPAP2F>%{-8gyCMGw>nhG3otYCR5SY zOzC~R^t82Yl}vO7lDw%Etcnn*$u{p#6QFsb8qnz%*PV%Tc{TM0AzWLDVqA}Fye$W< zg|JFmeV6xkNWu*fX3Tzq2FSkVCL+L`2PRl%;oUBDUGpUAvbqyxP>`RHJ-B-nOshB$ajPshB zpf;HnfMRiHWp{`GdxjSaU5ZTmSt%vf3wgpeB_4YwGx6J2=t&)@87dk;LzRnce;pkN zfn?SjRKZde9XPRoRR~Zr_kR+w2R-zt5Idz$RNTE6x3yWUhGs&cEH7J?pFqvDfp_)g z3dL6h(4JM5UhA^3E1!Vr*zFZ>uRDe@&dm#42Dsne29qm%IXrXvP*}6tvb-15@98+p zxfF7+H_}ht|ISF9Jw9eeq5qsbIn%fAvX5BAhueZG}FJH9za?t}b-|pbGSB4eEJKmvT zSMPEE{(b2WE~ARpWOSIn@HxuAiIXOo4SmcsY5hyr{~5Fg9De%b3G+~g){pQ+D32j@ z?uy!xp-xxxb$p`Hl+Ei@!q)kRlwR38aP^Is7hrHxXwi-tE|)64&7iborc?-(tQ6e##b ze!40mThtuK#RV3kr4^0cRVE0_n^D_jv!3hc9#EiWbWf^E(4hY85~iMrpoYRD;7blb zhyl&$t<8$oo!X2Ati!IyQLE!?Esfg+NJK2)XTAN!0Hdn&)@~|Cm28;14W**Vd;-`h z1{pLw76GG3}JjUiZ_hR}CR6*qkIQ2(spK9YjXJ2o>asLm)4P z+FoZG&xl9SoFhmnpemSFoA7SqSUWLmR>*sbRB!4886<45L8Qm+)CBK-AlFS73f4Az zharq_LOiC>Hi0b7)`jE1k$UC#8r72**-%YW(5#Ry$j03L6GKAT zvg&gM-dQM~4k=Y=YnrjUw{)e}_O*o1fxnOnOvkO-#To(%4&yk~(yl`nn3XCU2oZkV zvyDEaSNaL_c+=kNx0A&fhCf`4=AO+8@rDQx1U&`>Z?ME0!KP3o2cemQB;TkNqxscBE8P@pZGposOue9&8f}dRVSj~%)CG-hy~x#rI#!h7juiAJzriz zjX0QLf9G*fd4wzbR9?s9TpVt2W>pbz_B)lkcJt=;gFDWgvu@}y8J}YKofmX;?V*DQ z{nmw7F0q>>iYRK!;!@^wC~j7;WI(^bNv z71__8{RMeXu|^_3c$e^{w8WIC{8Um>?X+EYu|F2y<*hBKWli0k!FgmMQIRJE1cKy@ z{<=5YhSgESJ(B{JkN4<_7_uyW_h7^AF{P~(ZZKMm%M5gNL#5?Ua$d?15#_Qg&vxb&npd#@K-#x5{kAPHR>=hXZ5BG!&8w|G{jM z?g%NLf>VzsMQ#`aU6O6v9Xh)))R4LhDQn-yUul`d^8d8~JP?W}gVq$#QaVO?++(Y= z(bPwTy)!=`t6ZfE?mK-t%`jBPwG5{6h7x+9UHXTJ2lHW;?*1u=!oGd|8k-bBs{HT2 z?Y@_ZLuepuFWslnCE~;t;?l5NcLowaPcJW-x;bg-Lw)dh55X4PRGF!9)xJS>)uAmD zPn_5Q0cXd0=!fDJM8R$0t})Ybn{)PDJO5C|KoXxG4tD}i;#2iJ&k$nxdKG=pZMgjf zI;CJIC2!v3-IGp?4hjO>K%2EZXW5Xk@eNR?&aXK`M1PYzS)7v}C*Iv`4WtW$rkh1FTlZ&I{X1|Sgj$MK%V|ogI z_f9GuN-UJA{w)C^RF$YuZpHU&DW$Zv&fvj5x99+J5j0va9HMEgm)`yRv*@7i4<=~L zf{MIUys_cF-Z8W^V&!fN5&A6S1%zNmDv8$0Y-v*C9V@6ZXv<5N(PoINVZ@YCqndHc zbbO;cCs8?U5>q1F>U@5%J3wrXY_`ZgDZp2q4I`o4>V&?*2COM#OI1%Aap15t^^OeQ zn78QMi(*nYwwv%NqelJ*0niH;m_iMQ9%|g_*G?8)2Xrei&Bp5%-tMY>l$|KOO5(mD zO@mD`Jf3Za5#aW=*Fcr!)!Af8jsD+%;}f%%F$C7MqqYz)qEFGvW&ER}i7?k#HZyFk zpfQ~K7&YJYja_Ab(u8VCJn`${gDv(EJcucft{QbQh{>I^sJpJ^wNzO}Q|LeIJHF>K z`sQ73^qmL9!%fMndyexL1;!!G!vKUN=%fy`+^wiz+oT%@9qq6I4xC7|3fR2!7tcv^ zplq9_6qZ><)EjYvF*f-EuqxnS!3QcLAx?VSrv8y^lXR8lb@bG!`Ts;J*?Y~E%|NYR zgspkRczVqS)z+v}8=!5HO>sFr4r0#(v}NxUH2Zb)11U5aB^Zb}%QAJDn9qg5sv%1} zhsKdGJ=r4%_LX4&V-cK(=ST3NhAXpt8Ua@*oLfr~rwSJ@^8p@0+;rc39n-ar7EFZC z`-sewLT}GMMhD09)g^Ly+z?|eQ`2N)$Xj8&-~0Kf=Ni*dME!RJPlQ^ScjLVN2 zo{8zKp?@06Kakkud*_fM$-V#7jV6Uk;~LeAhnwC8Z{y(F=S>_CNd4KBcZ|G{-Fy#v zc`-#cJpkcL;Mb5l{32|FH|D*RHR;HBwF4Q_)SIc3gToogJQ^})wbr4}XowQepO3Zd zN%zVPII>2Zz!;8H^b4%3&1rs+{5M$r6wlEZ80b zp9-9^q9>rTra_Oevo@5GEH;{$sAB$)f_VYXx+;t@BYOewpzDdGM*h-2GCx3hG43Q$ zJjkuLr?97*29%yzD}~-hg%m*~`oA{$wEp!< z+kmKIy^ZASfMd>m1M4SLELA8?de(3t%E-ivYXBs1B|12DRK@TxZ7d|b=8jpFVv*?llpUSS7?e|TT>1{;QwpXbK z<|Nb64d!ax{7H!qH2ta<&b&@uOP*51S^8YpqNn?kgRk`1&{o(#KP@eNRP*y^=8YTc z3~iH!HZPf)bf9^o95^+Q;tNOOe&KYhiSDi>6mq(YC%U`KjtrBYWMKE9qTEUk{4GW+ z3s8iY8f>=>1D8EADw+-W9Nu*Hiv4|FJ~$3YHi(c*@H%0;e^Fb~;dJ;#5j^(FzB_ zJyXUaM9d64E)meE*A8U;t}2UAoM&7nOC@(Vr@LD(%MCpyfe_2+q@lJ{3ao+(9Z6-4 zV_4so9!bw#i1iJg!y-)n`>D;_u7Cn&de*eSFK|I9oXyG$g+xmq+zv88%G7$mMYb*# zS{7r^nHF%~1hMKhm6&&X0{W1(UW-Y}hk9HqPYK~|St>aTM=S0`v+VtYK%)_UR`whnEJMV8Zy00O6@k&GUy(d{GGyB+2VY~j^* zCcm+@GA(x8Xf>@sT*uf(;(SZ9MdvlhTQ*U%^9;7E61$%duBJEaHg)|t_EviHs<|X& zcV?b9Ns0L~e+*8BO2p6adv-46usGwq8QXJ`demd`zNt=E%l6ALF;t{tq9*+KeOocG zVZ_+6oI*$}H%xoJ;Tf2p*UcCeG}&^aQ}qlTrwMk7Iy9v2C0GT>)sanfRI;UFLJ$il zRRM1f)Gu7T7?2C!tg}2l%*q??gKrndJb_R#?cyCKqEM{&h7Emz>wXsGJfB_ zO`Ph=UF6O%3PbaV^nB|U`+q}p0iiTeYq8MLe?;QJ?lH`b9{A1pNGV4u2@%7pS+)z+ z`^F(8Mb#BMnh&Q!f4BY}m`5<#Mffo7s5exO$0Y9nc7o;o@peE2ZpCdwJ*WZhg#YxrR^J@BbN3v zB7X9!srWK-!|3vj@i)!401X79@VNnnPWE3mdB~B5d_A=4GgTyP{7x_0fE`tgBaAno zUy;qMb_*;$-7J~``7^dUpB?DSMy4x;Vm|{H>ba~QWXYw)W4MqJ@DJjw_}5efm43g7 zwHOX@5-*QR7}Gom;d~si&Df8Lo0V9zbt@2eYTlP(6a*3K|Cwvs5vNZ+!EQpV-I34k zFQvQbSx4R$e3~;1gyOlsN6((wXorvXA}(J;%cXXuVXStK&r+FQ45`$OfRz|NGoKI*dO84{39xOkA^ zSY?RoP)q64HUqhBo~?=)t{()J#vG8SzkgDxE&ch|z*YL-d#BSFXo%mw?`laCh!hoG zBtUMbNrx6&n`Rdsd*KP7BpA_Ts?bYVN5g6ZsBc6z>}SAgB-#0X zjP1uFJ%?rOG~h4|AMMwuesM?{MduO*E~Nv4Uzj62QxtzmrzQCkj$J-d>;@5~@3*I< zn$FnuaxQusK^+DRh*yN%$=9gt&$2)nE-OzVb-{}}-8dzwY{K_I*pEI5imZ+q+bb4m zIi>QEMDsp!4v+${NL#8HpNqn(7d_5UW(lQaP?rZNwl{!pB!^zYvjTm7k~_&-pnH$&YnZ)6=<0D=VuR!(rZ2ij2CDlOjpp93o*Z3cwDTRvsdZJFD7WgSEoDU7hm zeldukZK}nCv!h%aK9uQ&aQnb6-?VXPnG$f98l3vUXFng_4otC~J62ENk$JZg8A>-_ zD#oQMDq4DaLuUyR8k}8PAT)9mYpF&w_lBcec%DJg=QRTC^_QvJ&7Z%1ZT%_4DVmgK z`J4(FX|LOgoGlV)K>I)0Y=AFe7v-qeVcd~LB_}W>VhnhFany+6rG}W7nOItGd36?^ zh0r@RU63f1R`2GodSh;!eDw70M>`shdZzq3#+1ba_C-uH7|V z_~k&6iYBzJqS)FHKxmUqiNiA>mr;LjD{>{{IE-G&TySYMR`f>9f&iC@-z7_;bNi0j zCB~hyYy10BEX*Z*?cYk+0Loo{n?h{WS$OGUvxT3eK~J_aiEz~Bv7l9;`od+oAP&Jm zKyNCt)#CpT7vNU5zM=mHczi*0@4+cWoy<=@a{md_*Xg!r72Pkj^{`xzo;TG?bhNY#$F%js zUFyIAlhv}HNEUMK-AUdN4Z6Y=EEFd(2Y>&R><%VlRO)5zG}~t&zBHXIe_(`r{cz;B zOk+!DX7u>1KMz+@g#1@q_q!k2E-Rs6)ZYF^X&u{qr|{w9+qIv@as+Ow?m>-X`KOxb zVH9Pgq;+d=tk#@|P-BaW4jbNUUT;&H> zTnwx^jIpuN+G~vCbh{)vTGNMgwcNats<%zeK5qNmot|ezNJB*e7*+E*IoSd{Uu*F? zjbNowq{qQr^SlS&SQ^j8JGe=D0J_@Po?>w*1?eQpYm}|q^-C~gn^3cQ%>0Y&%-gz- z@&=z8t1Ap6MfRhUJp#?F?K>nnW)B94X8@D2j1^2zV+s8G%HUXtgko9=DqKFo#(^u? z^TEUKkjGg2D(Cbri)BLY0I2SVbfFXUVL&orwccpiwQCa;Y0?wv&ZDi?Z`JDaBi?@8?gnHHrWs8@n6$FRW14e1 z>wXz0!t`wWDDxq~Q;J!L#3qcoHSl=Q*?NqDGap2)IBbECxPjL=@SEK&1L48+o;QQ7 zaGcF;hKt|aYe@a%(^VEoU~4)uRz8}4AuTj;!l6vSmy}JN;w&Do=vD31D?@}CZ9VO z0g{;QZbZ%eHipkq`TN(h9g{@6OELas03S@a@pLyn_lv0O1b}KqMAifs&zwizH)n9I zuEd8lWT-p;K5c|-YB%r7nUS>}p$kZT%n4^_&{37`>UXz`_ksmd%PJCWPpXSN5uP~P3i*O-F8Z*YADA(M>sg$^FY;3H5R2~L^x!=pN zWJ8LxYpH4cTJlHM7famq#Lw~7ZwZII#Qq21;!C4en3$5HEshTs2pPQzbeE5SNm zr&96_M33HqKEtEcbCQDaT%U_rjGoM_BO+F@0GQ7lQHO-z<=g|v!W2T2pZ}tEf$k4I zo5S4!#RLPuaW*9jy!Q)@5>Kk$V73H>p9qNk9huja8-SloWk ztqFG?M=jsQ$=*79TN`#zlil;e`~mFUvCp6HF>2JN1~tr9Xj+YxA3*F2L)B|JT_~y0 z19)!PUl4nQAZv&*H8#**`kX%yG}4BZ3S|8)oo`-3WcfM_o?iVK{pq9um&;*a?TTjL zECWCq1wG~SBM2KMpVI5i_rJS$sBxq6Wp4}BMGYW4!ZP|6i+kXi9 zk}w?1R3Gz}tGSNT_i$x=B|-WLw^jp(>Vt-l<|(!P#9@xP#I- zdt&9Z-~{*4QVgr#p2NW*W9R-`$p>O`-6uBk6C0YT3tf~@(?|AQ^m=rkbcPDz)4B8^ zW0ISJ(gI?rtjIKgu8^F;ABF zP!eTf{Zp+&B7wHyFw_qr{+^aQz%%grr*8QC19S1U5}isYKg+n+iz{a3?-6U2deJ;^ zm5%iTGRYQ}4kSmLK?%cJ)F_lEDz3w++Fnr-Z|S`@TM-PKRmREISm37s>8Y+_?t{Cr z>!RW(Pc)&v&CNx328^r(8u+#se0xuP-nt$FDxgk`Yk^|Gf5HOZ4!a}8m@L(o*ii&m z)=HP=QsPd=#34f(fPN{uhJ-@}JrFtVR&Dd9D|niCet2$u1j3n&q2)>X;@ee1t=dM}cy#or!1&Ud zKc-YMffV+oFOy_(bD7)<55`TH>Wm(n1ag<~@II@3U<(8U(%7nFjiVm4dH3#{=9jru zWk?FaSYKS~%MLE)C^t>BA&OGc_fHFHp;v5{vdm+)y==5&)X>A0#XG}b3^#bp99>M- z&Brju>R*uF^y?_rCLg!~h_(f#sugP`qJTt~rczFo($FnFDW%O-dd`eIdDW1{A)aM+A?b(nBVN!{TW{ve-Tb9scmm__vyY8*E0o||=9V*C5 z5Bp^&?*xHuWx@Ea)mVB0B0gP@fMimb$ao79b zr>0y67P-dQ#SgRI?-mq=!X3Rbh#G@YY5z@NPfzr}zjGPMCa#)v{=5B@$eJf1q+bP! zs<@9%+-!$4LsmQ}_APx~Q4vXyNv-J#zb$p))oB!7smDGbK9G)iU|_A|W2nEx^a%Uy z?C-*oLs=p;cYtwxT83tfNs&a>WM=69&6@# zjAevBp9ctSSU2mCjv8?E$lgE4U+NS)I-zlkmlCAhOG+l*fw2-pp&a}vmMx|D8`?8%Jsj2q7>J|u{ z%7)8YWIrD+DqZlitt3_K3*xTtV~YmRs0g&x)oEb|JRFf7&h1bzK@}HrI+x=So;}r0r8MRRn?2!wzxR) zmz(uQ?jKduIIpp`(2KHT;{dT{ZlF03R}tIeMVpPAJ`RczDQf9c)K) zi*szhVN?w7!6~Z#;-MRmQ20=Y(l+aV@n@y?Nl#x~6ejF}GKRHaW0+Yxx{YH)1^9>N z2cnZ`G;ek#W9sNCvMbjptLC&bIzN?}A_IVCOp%^V5S}+q%t?U^@hrcr6pH; z$O;+MmT|wQ;yXmm-v6s4Z_=3*m#PH-t|C?T=>bbc)=K()6T`;9+SniH?4oHnpH09r zO!!sJ`Di!q-2(+6YBnZA)Ll_hEv>!4vN+Zwwmtkjea$fJgT)`m)C50M0U1;{RJN3z z4Qk+BNUE?2XCRaaBftC*phd;wbKUoOW1c3emMu4D*nPy}wJ z{nual_76d&+rBEbnml>3NMdr|O<4PiVn&?iB&JO2hQ~Xpv;l%=10kUPEo~D+yuJHG z!~-UxNMVF64~ULNt;FHLw(3uq(8ylH8QlaU!Uo#Cd$(@6qG`L0`=s0^_l@!N-iJrV zJFgv0s)I9<*z8t*{#x*q)$jQZY_f=eQDbh!vz&3i1O^PYhU*1f=#STT2y=zc zHrx@#V+sYYa~IsR1UT-IAf~8<9S;toD@I!3v(7@yM~I*kgyV~F>=hg6Hnw|eqb+uD z?L=&mY)HK}{}J?WlkJKs2@UoGX_w_M*Rwz}8!*UVAGk#r*n9D@$SJhJ2M<=zV*m#> z@ZJsyoohB~<`k-OGWCon8=|9!P;RD|0T$s*x4r1J`?P7JR9^D$rZI2RU{i>}RZJ-3&a2E`QoQ*047th{ zZHkW0qDPHQ+K68PUTfljnbvZ=vTf?4;$q`oy|zaoc5U3}p}bT?6(r%LREg)w$(!%B zNjc0Dr4xe=*mU^toBuCB0X6{x{HR0C!1kiPjia3D5~sFV1hu@FfiD4kq6QoX1%U3k zzgt)IJyae>-dkq7KJ_2gXZ$5%;ZcLqvWxf11cHcIgKy&N$Xe#*z7`jaWa;1ebw7{YQKdkcAVu1oS z{A$Usni{)l)ArSjNa)+5X+IEI-tVCU_wX4H4{z9UDYR>nL2^&_Hm`)ZQMDgTZn)Qs zqp{CznAq;9K>H@KVE#JdHjvwF3qpT5A@kIjr1a*qMk{*w4|VA1pa|#UANcp*_)jCP z_fEUkktbXgC%w+?xmTx?pLduzUJ3lYjw~(SxQA}OfA3peU2Wm-%04O_>36!2wx)x| zbg7Rv*n8CzYZ7fH?X=%I$?PyLg^{HQBai>7%do=^p+;_dn}$n@{X3N`4v^{Z#P*a@ zVpsC&0{_a3rgWPd7=>%IGr(59P}@oJ!Xf;&9gKZ24dm_w5iJ*NTYj`dF0 z^(}QhO~64o2i_SE=xG_#-aH3AI8&Gx$!v&;0C!V93I;;#g}QWYR@WU(BQw6xh^l8) z%Nz~^BWsPIt}(?NOBsL$x5)a>;b%QTRxBOe)WN^N_u$N6rN#S7eG@v3gk&xJhbQN!Gj8} zTcOe}wxp$A4+EeS0?|vy4nWSd<1zMl`5KiM4RdgK!QG@)EdR9zH*%=|Z>|xHWs;@- zX6>B^EW?iT9^{}yr`o!C%a(E{>L0ONI?YZLp}4|(ydcF(Pb93sp+x{AnLVPEQ&j$I ze7eh?prXx2bpgwl%i18#PIhT#U$y8;#HH}tO1mH9Cr)(63h}d#xU9C_4!oER&+8cd zvXhi!X>o*1ad;ikfa>JsVA&il051Hji)p}7M7>JGhWsBWcnp5w-X`20^oy#i3G~Pf zEi3u@fA|5(6kKJ1^gf%3OWc0j-{qobhPu*2*NZ*Esu&DJ{k3y4)HKXVoY2)vefYV)2F5{rf@@Kg&QtEmM;; zdhWM9Ve|-NgCzo^)68DC(Xi;Y)+2goxZt3?IF(g*6S`-x6G{h%lnjNv&r~3mEuO<6 zg#|nZE?k_;J=Z*3>LOxR?u6!APBfZd;|l&p71^n5$j!bFbZo&kRd}rLlmF%KG3A)OjLJfm zNJNig3OD|^TZ(QV46cg%N19zKKEkT5xKz8F)KnV~?rom2bkU+LIs>(dpx@-wZa*gyo2xrAZVKv>^eGJfEu5zBHYxgmi4eyD!y|+5GpS&esi^>0U*&@;6UBx zJZwm_t!PfRx%cpAHW1T+d7uN$-&Uw8tN;>HY7bVlB93nA}GuITkH9sQOG$%V7 zY&EFv#J~Txr9ZA;c0iotl*NJoy4m?2oTvB=mUhXzKv^M={I!R`X2=R&<#c$@07sXn z!v$m>z3W!EeoQB&!$k_?HXdNUFou_FIYc}n-0RO~b(^TAmo`OftU)U}BX>KVUGS`D zx|2S0sLmrxky>uqyLV)#i!e?|US@vuxiRkZ_&J@mwGhDN8EcGA0J20Wl$x~$d`!p= zUqYbb1wSBA_I*91=Y6Qtt;J74Z3n;*ebC)8oNigH``&9B1q1;|$RLAl=;%d#Vs?Sa zZUzp0`^A)m@k0M6fC40#hR??%d*)nUUln!lbW!sZ#0I?ittVQ%yQNTS(PiJ~g%(%; z4;O%DI5Q0%oveTK2qiGE-N7Iur?-W2%>M{bY(tm$`DHLt$gAu!WrK6c$tA@$ zTn~BVgzVZ2YwGLtvB)o|=8Im~;m)wQv0j%9U|~7Mb-Rg4PCajm3TEsCtQZjgMI&{v zxLZsw2nsU$G124tjT?#c!PnUdzAvAH*H*-Bi{c<;BH3|pn+|UtUc0JjI}}EShDr<( zUM^=}Jt_&+P;siD|C#*`4p1`4Wh6DHEkg4K4<&KZz4XqFN*No{_X0KnjO8fi20Hc} zO(#9U>v}ymPu&JuwIDSYhC-a--8<@GL*RvGs{aC-f?PsO-niP@pSny%UDlCaHqqLS z9x2{@lqz2Eja{4-D?7$m5hcVvBFPJY(gH)!E1#fF8so*>#7DMs0j*xQi*jnFV#w&; z$+s;{x@@JAMYxYIQf9Y~vTyIt73{o7X6_urbZ=OO7Cn2KmbHZK$7!o+{bG2-!G07} zke3P`^8>}1r$z;BAC*CJ-w}%+pF4AA%OmDu1`|A)SQ#)D&7|532n0b%!BkV-ul}80 zf@mlB*r#uTg|Y=G#Bec%HXY-uOA>hjVIrWxjJengz)+ad^GC4|?oA2BlWd z1w$gkI;dPk(2wwCyHBY8?I2NZy}UXY<-w+b=~xiz`Ke)@ zrIl4EZffe5TvgG#=bvQTQ>aOHrJD9C*$zYX0sF>m>|AEv=!OyrgPK>s^44-qy~F{w z4CaUFM~r(!bEr845kaTUojuqMEm|GD!pX7v=jr~_{}*4y zy833$V3{|XQ7*H%LwoESNOV*@;$(cDJ&0RyHZ-3Pp)y>SC3KCCjg2JQt55Mp!`k*d zL6)zob{3}EnDOJyEZhAe;ua>r!^tQritX4Qb`DL?ki_Y!`bRoy=8#xq^-rq`7^%YH zwEIdI018!kRT^u-?B+3KwdZ4)HHM2Osd=mcZ5BnLNT{0$35i^7R(!p-$Z8x|H_*hv zCy2`|SPRffhZhB#!L_zll$sR#$G&phj3AqN*F%X+z+H@o{`M6yqalpCxxC|{)ssbqxDr=H+2+wo7PGK7cC6xWb^y~OSv5Jv)!>&NZjJ<-(6WwB zxymZsU)OdWA|)PX$4>&oGBRHh%?)Bk&?9pfb%aQt_WR)dL$1@=6RmyHF2c>#HH6y6 zb%?pE6Ww*sQerfNfcE-AOOk+i1}pz8dXeQnnZqE`|q_L(#HOCST>b;xztt4&xJNz2%ZV>hTZybjp*{ z9#o4m_aD1kR1+LqCu_>}GMa{&+WCy8pspD!=d*1jM`;-)7KE;Fz@hj9JP7+wH+lPs zq8n+b4;i3VGa*XvEX|w-kGM_Pi;9ZtD*t$-&77VQq}(ROnixFSlzV(TFqRHVp~i@B z2yAN^eoqR-N1ta>wSHt=kA@`ouY*9AWP`(UcFTmHJ9jSZ<0FFK_uPyOCAgp4;8yK6 z0kwMR0AvJ*O3cLny+p0gKdbK+4Nsbj{)t(SSMolPCQ7@Q zQ37}XLY*P~oeak;_|nPG zJcyq^B*cq%s*kqt!v?i~6pp0k94syL1Gtn~c+Bo!44fgv?rTRWY>W*JgYNAF(-4|C zA*3*r0R|pV5m!ps@FZ?Hc?mEVh4rM_A=^g%!f0L&wU62Z?g>=85L_#2M>V(FGjMe) z0i}uWUQDQl5JJ!TKC;`efj7l-%u@rf^Ah(IJK}*XeTq{bGuv1*_0HJH#2}tPDHI+> zE+HlxI>vQlvD&9M5ljE{sD`>N9uf`Vbc1+aPcTcYk0ZuYVul?K@F1nqe$@+urSh#lda;P(GTzV%i10Uwo38BwaY#Ps`L*>c8xXmXa?UyO}u znM0KL2J{`4S{RB9#j(8gNyc6;cpE)S#8zgpDfEPfeH0zvF_hZ^(~Z$CW@}0 zlWbY}VcAIf<9`=2J+h(K5+}92vYQ$8O38A-U|DXEX6QA_Ue5E?YrvG-rcF?o^4|0KuKYMu=m{Nm%sF|5wS6mnE9OB{OA& z7(0L%%H+l=DGli-^12IssQ<)Jv;94`3MaDJ1F)p@A*H)L4O?g-r`55rr#NXMcsjj> zMUwAVzs2PO$cR$z^ItX9)dac7f`eGd(g_w@DeIA}^X^0iO$-`Yb<67n;^U2+hUEIW zfbcvXM^ClZCQ7fE@ygO{y|AeFYuz!|=%tdxL$1j|b@*`vc+sN|KE!+)AyJK_&6Y-o zVSrODm0kiF;OhkX?80%4r!1inE||bO4G9f>GaU7zCQ2fxZjQ6p^JPTe%y+B6rLapqexHeO<3 zREl5zf?BFVo-cjKgU^D>ss9p+i9_P4H~jPLPuXkWhrRycP~f56W!z7ocWwWV$jPYLc+)LNggt+o4K&88rJ~ zci$IO33ARf+gNX2iRWG@cxTOM?dH+DRBe6KOJ{K&WsrO3w3K_4^qDJxl;9=12X%OB zt(e_H;o;_{S9j#lp`5~>9F);8`Nw_wU!IBcUBU(@2kW)=_ph@8X?lW;&FK}j7~jtx z25W}n~-Mbohs#JG=lh3yzdZLoouh#-NL&d(Y4u>8-`>r>X@Lk||CtsgU z^D9)V8}10LT9>_h*V)KPG9Tin>lK1PP+lo+U>n05HA-WQ91NT8|D)@@<9hDjzyC^7 zgqBoR+X|)Z)e9YQEX!#u4@_Nb_=&Sab?SHI0VN1cRmRzG3qrETWRKKZ#&(7`ajHt9)IX}7*jjn z+{NUUT#&kex+F^D03A;UMtoP}fr&iB$5)JmOfSox=9W<7Gj5+X^A|V-%(=~bj`OR5 z7nK(X0f~mTxi1qUF2X!@==xSvzhTGQ)9Y)Ewd01XFd|gx_U3m$!T@MYlntsqIaQk> z0EfNNbDsw;b~o+taEI{D!va2uQ2IoD<0N5IL#)_;5c4usu1WY)%nI)mHGvBv)ekgh ztG2xt#yFPBRtzb9)`cKinZcd)`ETW?N4R(6q?`X4PN6dL@fB|^6*>=p&3pF)9^wKG7|J&8YcPWCs zQ?XenpFE%x&tX0@HND+A6wk_}iU#2qGRXwYJfr6}n2mXAya>arNJ?E6qiT9%-l7RK zlr}?#8R%Fu(>TFQuP=_`K6w3=M4f_Z!kEHDs2pM$$|F@ioS1#cFkHx_J9gYyIBWK7 zbJVg{5Hji`;dJ71>k<{aeY=s>)Q@B&@nf>?^JZGJ@8>}OQK~8D3AvRhF|iWaaYhsz)pW`b2){QRhH)D~{e89OB($0S*F+Qj9MLr?;WaM{ozuHp`F z@b4xOoUgjX>6Hrbh7w)AIK&0Zam2yHb!e!PQ9=*xQ(YYA<4P8J-$eaP%)gSkL4nze z-Gc7RW*Wg?R#g8SAom^>SLWrpr@N22p%khX*wyK5_{el9(=-6FTdx4vpmcLiHn)La zJpgcVLay$uKx+)|B z)w!eT62USTCNBgY0udUxzE+1=EbKA}tzwc>zm-TMVRi5h0s-t5L%%|32E0VS*>n|K zSbF3(iDVQe`$UHzv%n5IyZRW|2o;D*pllSRr-{{ih#ZG7$6(UW2^0i)d_zSKCr^5Q@?$j1?;l}P zbr2Pov~ka}IR{MUo%Z#enWIi?GG;f=sxd>lLa{Dm{;@ZYcJLEuO41`Veu32Gm64si z*p4~F8uW~CI(@Yflzg%Ge2mztpPpTaV=T1>QSq~)L#5@Y*ML6k> zX+06U@I*rSOxY>`qH7O099PG?6GBWtM`CA~k2vTs2%I~eDZMPBJR=(`Fw5vyP4r=v zpXVAL=|1Ka?^ff3Dn4J7nw=^kFgS4^+SM>g6As^o5{ShbIyzY^g*<==+^;2Nh6s;)%zp~0m?bCC4zE`=9~R|4~~e{_>N25tTK z-o1#K2j~wKN!s}Q=N~0DwE>+^7(_y7&PEYudwXS%H`PIdH_$sq=V4}p!N#VcPP8=@ zC(7}j2F_s%(6&?GxN1dM)a?`RK8Sj8-ueYV_Q-vZpX6kxZE?@T8+{tz+ z_#vf$cod$WyB9NE=1Qs~P)daGmmaka9sKf5Za3gSRcnmSS4i&`&~CZX^s;N|y&qAM4>HGYS7 zdZ@qi$pv5qAY|pZ(@m1G>6D906&s-XwX@6ST5M+-=-(OmWIGNUCbpb9#VrhMRQcWB z=?>K3qrOY80i8qOgR&adPr#BcwG+_|^WdgqDTbP?JQuiX-Ug@G;; zLW5)ozu0l$Xtt@v&vy>ebeiDb%l#+2 z+-;ZY0dNBPX!tOy)k6DGWcHs)2KHSywb=>-??h=T2SZw}N&@}@x0zZ5>FVf(z_ zvKcD>cTIUCnX9l8c8E132-GECt-&N@r`avjBOCsxju5|8^W5jp+d#5E`9}LuvSYTe zYY;RX8!-8@PiHc#S#3vM2qC}+YgZuR$I5HPDV8C?#>!YxSzaE7dBFU$qG4bX-anPF zJd*@jvGY0prliSywJ>AtFQZ^0P{jrS;41E=osf}}2aJLGjphN1SRMx7i`jmp8^0J~jEQhep?c5s4clWBU-Ob-;Ap;}K)6Yun)3uG z7PRl(0C^+W#!9>BCOX9C@9FiWC6rysuOXz^@`keV{IU0B()qbT#6{$Wuz4Fe8+F$K zo`}#6`7;(QxM!z~_(?vP)a}&4waH@P#kBim6A$7jJoY<3$x=~|^!PH=gn}rZp2_%u z1Rm2%7F*D>ZxV0>M=J+6ZC?G!K+h=7FpDxA4(_J!Bw{{l0mp)!0TCO|vOqm?f+La| z*}zrl&!HvynTG3<-R_ixXV@wq0vj+7?A&t)P*@H$&(%&nrd+f=a*4MsbQH#%^F|~2 zyqnh%QiW{80LYuG4JXNc@?;m?G5!l4tTZz+Qk0#;T!z4h*issTTl-g%)p>Al^Z`J3 zc2ceR@naa-r{M^Q5Ud2%S1`B}_zPC2PEjnpXv5GqETJ=qRtDkV+2I*T^~q`7NU9#w z#czYl5cNz)d%yCgXDskdudj`Wui?`|QuH*O0RMv*mF(nHcir$ApYOl$n!cf31F&+3 z9*-FR(mmad{t5#;8KX7AV9F%a0SVZ%97#dG97NaByavbsm$ab@VgOLb-Vj#@St-cv z)Lc#8L9J|SUV(H`@E8U0|K{9UQY{$#I%%)J{C{%+;BsBObjfPcd0=14;iOgo&B8!7 zUe7Fosjz$E#Lv`Sm+l}j754@?a-C(kt$@LusNbE!Apc+zAT#K(X=DNirXF$oU}Mmg z-q8rJIUHk}1P%r=t>kE>G|I9yKNyaPkztGG&CUG8x(eAgh`|g{{+ltocRT6!M)Z^I zq7)sTvdq3pma0-4E}hCw8GCa#vIjMgEE;KJ@F2&rEmPXxsUN2KKNNo`ZnNw9h&O${ zyr8e4^-0X~1Yi<2>-#qBBJ6m5V?$p2EXpoGk{O3siL)R~)n$s?_V#yJ=TNW|VmE&8USj zZ90^BV8E*GNRc6zwtN-HMsUtwewD0J!UXn31ND@HlT&c6Be#r3OwX@5h%6UZ-Iniq zFStw|Z*Fe1=cfhp=U4F+ude&G!~Kfh#*PmM5(qQsV%2WY<^5+H(S^4Hy;?gOqDc`T zzPcAVB!t;#;TdT4VqcE|xJ0GyHOI$l%}o2Ir;5FjYec$O zQSwC|)l;pfBnp!(rq=>p_&}!NI`UYqO{8iD)4Wr{_(Ham;CQKg9h#!393ynMwe1%` zZ{71f7f?vYfd)d7W`mD|`DTk@-p%0pD#OL>8A-m=+WD`eODDzU2zF(?F&)mM@@AJ} zs93`Y5{X2i+UkqbsANrz3y%R?Oq|ik`EuDS}L3kPOPz zdYTUn9I%XkP5>lg-uSCM3=Ub#HBXx-WIE|vSox@cbA#bR0x9~nl<_D)xXMtb%NN&t z>jM94n14sT)iYXS6lp1vx3ecEoC<)GY`1`$&GYZnT6=A$fJ>sc!zQV8C`Fixx6cHV5JFma_Nr0`GZ@Gr(R94oV6)jCQe3oXPu40g1K*U+~ZV5ngF z+0i{A!@>Aw-OPw&I*T!{cwk|uSqis8n`ZpGbNh5Q#jwd1m8ovkB^YnA+5X5|DpNP~ z7A4i3T>03yzsp8M(C(ymEwPjb@OQrPf`#6q$y%bZc2`VHLGuy+KPW?aL!Aem6XYb25@JvMRw|nlaS!l`U)VW_VV^C*={UxBV{R$)X z(6S-(2!ZE~nd``oH5LB5Nv~e52ouUr6aVbU4?S}9=uBp`%5liPLtxUhy9nI1AH(b&m`J@jm7 z^;%Anp1%Hr%!ou&qp$TKk9f0|AA@uYgUzY5KY0UMu*kdCYO{fbU;g5$0d9UDj+k;v#fd0p33>u>kA3vJS?%mPURAV!4PA)nQTuC+YtA7M{EQ6kD2IM%O z<3tnM)3OXMg4vqE-^(5Z9@iy5(?Z{XD)&(R!Pb%f9#qIAx@Aj1-6$w{!g zRddnknAVP%J!_WGw69*VA8n@al5R;>IIjHm<41@{XrGZ33s$Y*aM7Y?!k<$0f)OL; zM7MU49pH_~3dMmfZ`@If65<4%Q1bBMX(_)_dK{;}`XL-?trg{nk%E|akFWyf)ok0= zwaMVM-xo68w+__}>O$mBTMesBHbg$9qs#`Vjy=B{d%>N5n6SafQbqTY_80vETM%}~ zXv^AH13yv>+`4KDJp%)Kid5MYF&!Urd&s_&yalx-;VJ(#R&lN91JnAzDi9Z# z%53nxhS5eYR2uDml7q!5qtFrd3WTnzTm~e|ERrs^hUT(M!H>x#o0ZJeQn9ipKD&RG zp`>(1nT51`QsJDh=#0w*7-ZlDCV0l|Xt_R%C1q5wMb`8~sg}c))!8ugpQhqiREg@K z3jpeeRp>@#UmRN3%mIdzbi*^&^G%knSTQsCXi}0?4f(qx;crr5t?LM-0@aJ6G;HZ3 z`8G^Nj5~e8Co<%`YUeO6irrk$OzIH*PA>sYc&Jw!@$+RVSfg`Xs1eE^C()58>d)@WLWOfVD=+coGTIL~fgLLgE%uo-Sln>v1$tx?mJK|N#|Fl#pB zICFjDg=JjE2P~9bJ7@9Yt;EU|-e;*oUc7vXO?p=E@xnrp2KO`dIqDME->}FxQG1VZ z`7K*t**aF~0}W0*PpACgK^0SK)?8xRDWDC(<4Zag2R&^;{(uz^pZ^SIz#VpwnS&UG z%0}#*1ytb`VXQUZOtT)3Fe>K^%-#wIbfzGvV!a%4im@~gJk|z;6snT8d$X5O)RwWQ zi9YN=iyN{6R-L!x`?gN%XG!{irvGB5K-j(YZEx@lie0wsfl8WJRChUOqFs+@+mdLJ(uYo9-)vx zv@}qRdL-^ihr_hwJ3>+X zr6*&;w-n})Q=qa*Kr#_Nt}vcUD`kNv@#39j3tW1&s|;jC3p zL%SfWo7D|BXG~X^Oz6+u-*0EOHSDJlLU`pC^vOtvj=!eE$p)pq_?e_6iyNJin1;iA z5`s@=+T6$<9{!|s5z;BzkuE5eZdJek{CO)WZC6D)d}itYuJ7Veu{To zrGc_W8VptWHJ$fR`A$cmhyW_;J!afzNQd;p5+N3x8=)rLKSa$Z?#OvVgG+AtyU{FU z`*CmuYtE@8#UV$XHQ^9$15_Hyy{SN@O}_5~%8UO;c}w2gF!SM-KB6Lm9vV@~j3fY( zk?$x7LJ|)v=K~FbieCJShD84EvkT<3Ft5|xq8T7(${+b{CySu;il9r@CwBXr@g{Bp zRrh+NTb}rr#zuzp=M0JI#Q6AhaKUWG>BpYp-`<~*pF>G6l=)~_z1L&W63Ou!kz530 zLOs5lg@x=26(Q87-LO1MFr+LRh=83s(WR!}cT};1Qdu=f+wuB13>Mkbe(7oA-6@`7 zvL67#RO~?k_IV6iJC7e2xLOu8=?-RNl9GZ)kxAVqg;23IW~Y~|jNMpO@yy!S;y85) zF{!c~`cV}TdG<&gu(nB82OIh1y=OF-csJd?&tJ1+hI-bM;Zvp*4F7uHjxv%tja?Z> zVFnPCx_S8M#+_QNnYFBv`P(K4m@2pf=E?{Mf`n6@k@?JUYrPH~dblf3$EJg-vuCbx zfo{8Ij!zXgB@kC384TU>cg&hjPsS%d<%0Zt&G|PQlb`rWpm_!?VfgkWphRe0!a+xZ zbjTz%B}L|@&7CQ!7-C$?;F*%DwadRkw*c4)G&Ds`f-i%)|yijF|%OF%# zzXIwo%{FcBHP&9_tvvY(d$X8$Z9VoHLu!a&ht3AYPuEeC0xeR3cruhHwJDDa`s}2E z0=qw#u@+e>XC(~ILjW#1f2BEaQY0%0KZE5GqZHU_wh8Ors)r)Q1XGE3^h97Qn)3Ot zUcc_ZIoKab7`mIb)pI(ha+_VCid8qmzsr=(C?dB(&C~o@$NeNj84VYWp<`hYtrHUL zK;_@8>(K81nwGYG0cZtdQX|C0KmaDHa&jkslfD`q&n>uncYX@bjS|AADZjFlg~hvK z1|!+@J8C1I7ZyrBs)kjp1gEKgDGT3(A_<-jUOYgac_`sM{Gw zPn@_{tc+5B;k0g(TCIf7bm>y;yewI31DVM67N_y-*({0{WH4KyC-+}Uwr7?exGD$Y zi4$}V1MzdgQI6TtDBqC$>jb_0-IdcQ7q^&$@YSiTTRGPE9xsj)N(`LASyC|R+lxt9*!A6 z2+jB#qcmo7b&*`zBGRr`)D+yCt>TblHD%~CMbxS)TE>&dO_}0aWU}{ZLqXwuPJ9_$ z=d<3l2iZ6)>D8TL3L-`D622k6dlM2DjN&(KN~`)|JOFX{CddL`eqiVyF5|6#{@`UJ zqTO|EeBw%KjqE)YW&VBfT!O|;PIVF|X#J_+>)b8Bjv5JUux)|wT-LvEP zKX>kA(whG#a=f{eVi7Q3Yce%ZRMhIapH^fL?5Oo$orUzIvJ?LQix_kg>GD~SyA5mbMdFp)ny}1XQM*=|ND`RDa z!ncpN63RCMgqyFBTJ-Wjskz}&=P zNlSCQEDm{&0M0~h*}3?TkYCXR4&HMy!}w1zP9{)|l%9b=;%XkT$EHV*mY@kXwM5-H zcO4@?wB-ps-@SUx8ZpG^|CBOxYYz+jTA3$r1Gs5I2PUhv+|z+*#Y1K_Axvf`rKP}l zoqF^reXZKGsdyZ$OvaheSXFQ*AboImCqur@@MjS`MlqD+aBvo`8d_?ae97w1{PE`F z$IQ(FKbKcoYbGL=yOz7hZMzQp_0r^V=iUDN$y+{1&AwZbd0FI$7@rv*E zeR@GTcR^euy?bYEPGg+B0kMDzqlRNUchdk<1o^4lyL(sg-Os$LZdmg<5A((8i1Jw5 zl^9zLz^QCRlA$~`>DnHKIKdv0llzJTNP0+;0Y(HmxwQ{ZlICPpPaa9(3_GLZE6E5m?)HY&LfQ9jD)ikPL2ot>P41zz zN-=u#emrc(cIwh4@t*<0`+IpgJN*~KOmVXu+?%F=*L-M5E>lh14Vd5Rxv}`nlkl9| zg3*O1>)ozK$W;baQQIyRA>wQG<=6q3_Vg66`10xp?zR;Ud^E8e<@f9;-e)-@AnUOM zXE7|c5(<gGkP zdKREPcEg!7XO50+HFczXa!;~%GwEw5Yh6c$K&mvoGlqc#r%I)S3^sT1;*vPc9m<@v z09Rt6RMB%R0QkWg>T*F9ldfLfZ#RL3zJWpNw0OQINXsTVsKEX5 zXEA#8mgOo8W|RmBZ}hDuOnuQn$h4YSOCk|2KSkkNYqf{AmiY=*2v|~e-w9NUht8HLk*inmDyqyaGiXo5iz<2x zX;|#0?z^E;=ZeOaLMxL)`=K2uELJZqr(GmTi}RjqBjyCM6L8@0M6Yz*aT+a3&`w!>urS&)5v(c1CGSL}|%YFE4=k z8N^?HWJ&rL2f_FYYDL8AytWC(#GkbZ-chF=PKw1f2152k7Isy%w0A*2zqU%0LBA!HX~^cEhr>s%YSO{ z4Pbs?+3o^v4*bJ#*IbNDOvo-M}_3Iqa7D#CRKj+fYipU%k;RlZL(PgqWfsyJApmea@;@p)9H+73X!1(Hm zcWa}LOrXYJSnt|hf>`T?Z!MYd>E2=v;w;(**EV7>OGffuG9M&BQ3a?z+04+lPix1xT~>V}#8O-OJ^`NyBq1B#Dx>=T!5wjlyDoI@jKb{xur64)+k^?`9-G(M8d}Mzi^?? z0t2s^GiA$$4J3lW06wNA`M=@KP7VdXZ{|fA$+&M5cwo<^cvYLajvT2eIL~1kmj@>% zs`u6yTt(0ju58Sq8j}P8@j=aY%m!7F!t2Kd&a`ahT}>a$pmKoUfh>(6SvWJ5$m7p= zxb`Kd{4LeHdLCbCD{*+w+G^@l@zDF-Ad^_~@2(1DqXmrl({;6!jhye?zaPS9$1T8Y zOzn`n>wPNe9v<$B1A?z%F}^hGE;+h(T0l<7dH{i=h&rB@JmUvo$gcE1NC3b z@=s!z6i+`Y+Xn8x<1dTYur`g@%KSZ|Xso)5#>cOuHbJpW6VPN8{Bl%?KQp=he{%so z&~)wC$I)O%>Cu)Wot-nOAKV}-G>`zsv;@A`M)_rMTD~T6oXeA7@+^Jhkgi>sg+jy( z2gFz$0F*9Tfe*Lg=7=+Mr&p-?vid3jO)SlyTBOA*QgsHhN^zpH2 zGpN~&G|LRhrVN3TGQbW^*yrf)o|dN>ya?_MYFn?Bd#P}UyvN(D#^!>u(#aEN21Pn& z*~pf+82=_=Uz;R?a(jVAty6JV?5GzDVvw1g9n{(as?Dm;FO1giVDTu#yQ%=X+2&A8 z9uF?3q9G)R@zaVPlg~%yuAn@Jz9t*#r)7W>yV%Tb{m=2CX^=7XNkuVD=D{3<6OrV& zLIjCXUo^Me4%&T~-kbc2b%|M2SY)J#NMg5u-|t%}^HzSIFmgH?*B>c3LF4*{2tDPm zOiMMlwb0!~DV5o}RjV0{tRF7~@47)%d6EKurwAeG-ydCqFqiKSMXOOtN{Z}Ryx&s4 zr!rGq&wLYwtDys_NoDndOXOjI^ZFsXLLvDXx}8r+*&Y*Y%}f~7@rx@oD^Y7Xk>neR z2h<$UNx|)=gkbU3ds`3O>g4K;(uB-~slza;+Bo*bb-{2~1Qh+fA(v8qRb4}%+9ko{ z-u?TxM8mTgxKy0_u6`sWidTQLj!@XuiKhpGh)?_buMCeAt_PD6fBLTFJ;(E!sVOfV zWCSP_r@1=2$^tS|NVFIY`TZE`en=rjE;qJe2d!{VfN0_^r9w;QEU&^KD4^|HZjqqc zHfT%k?c2f?64J|`+29V)Z+@<)4O7_+#-OefG)gKX2+8mFQohCW4_A*;vJ`Lx2^x3w za2!Re<&s#olrqdarJ58EhHJU>FUE`l>|1y3IpM~6Tq)Qnw>2|^IPryn<$w*Wc-#?WfBIY(hs+D4s<)69uT|FJ1A z$np$A^pzvN&kM+aaVUHazcVrlV|=1JE!=(x&uoW-ApCqQ=j|W{SX6wyhGxbr<2w$n zw;zTz)c-2&Fn=W;oJ@u9yy*hYAPXfGkHe?k9~Wm<|B5J7R8(a9+vvc^$H4-Z0Bo8z zB5~d-dP_wESfYrtMb@dy`HT^^9#9X2uT=NHU^`%$xwgzJ>#PJZkslJ{}7vZb+2 z8y8*AfBzoOs542ib(1lD{TV258FpmL!ZnEPz`)MQD-x581BVs7ML2GP=;KA35 zb&W;lVP%u1*m5xo3JZ6X{DA64!K>dM?1ELcdqT`N#tND@rTtQOK z0+U5%JZ0ZDu=(k!!W7_pI)l8b$Sp2AeDtU^^DX{R=KrMRc($7uA@!pD`Ls0I2zQ*_ zdjF}Rr4ee32Em`UcUE&WJzA%t=&=D-T{POXZry~svA;PJV+z4X`qOEHXxnY(!kQLh zMq}AI2+(SO^V|D-t#!jf|LCqL9@-elrYB>8`=^y?BX~1!SF3>~5IfsljM47aB_XJ- zox(xznGtEL`XhNoq^_T_h3PcRo3^c3|Kulb&kRz{29%<7qh+a=g$Q92%xGEV?Nx5+ zq@cCYLB}s_R~kXD|GYGm*Nl##%%+Dc25x2ivHpsBLbFK1FM{II(--k zC*QtVSh#nPaZt(@(h>~I%%|9Q$j;*dsc7uvZxZ~}Hv`LQV7PbgDQle(F{tiltR)Jg z)C-&m@%T}Yb<4hH^Oh`;EoQQM%yxR(BF#wYHgL&l#=}cJ2cTn7Tgyv;8xTw&vmRu= zKR}@;tFN-){-_#&1B4L`3Nx4m1DQ70YTT7eHm9i{;I!661E>(Zj|8Zg3vctxmU5I+ zU4^c7Zb57PVo^Q;J72wmt~N`e$hvEw`23cmi=e*Aj8A_y1T;X%o-0-{evqwXQ1AA) z84OjDqFv)Qwz_Yqkd!M))l6-SJecKST-N`#my@HTtxz8{mK{<4=mStO<=UEEjoTvc z7o(#}TS7b|6HP6x8!M?5vuNKuvW)t72>ns_cH0o~<=*Ywx2nU~3`&W&)3|x9sN}qh zCa{0}j{og4IGe%sOM!;TcbSxm6BW{sh>=5PkLj(CG4y9Hg$-5S{oOzhpztzaTUj5oWeed&ZAY@q*CCz5uYXPl~K?6X8z9N$B6eeedvoyv<6c3+4f<} zDQvo<+vv@6IA|XEty;GZ;hC@K5wvcoatMbBR=U@2F>H_($p}Imbw5hvi?c5@$xBlW zcl3XT)(7-MYn8}SJ3ZlK;^(KEBBnOdf_q&SMHf_XK6>TcCXCP91@vwW;h9+3H{SH}yeh$UU?8}a6_cZ;Ta)-1E znUec&tJb2B5*-_HcI->6O;pv@y$^QZzF0?R4FDHE+s_U#R*3!`jkC&>#^K;HqDFN* zq&^C{<3?axLm9QU%qClu>tsy|>uqB{)1+if#IGD`2O)rd&Sg6r32OEv*=noh@1|g> z=*3mXetOi%c#j57{MR{z4T1JFj6e&G>d*;`JIz=%@eJVyoB7O#!J?e=CPVep2(ln| zSnVo=P)NI@HR{kEA4$m$(mFmT1k0*SSEh}PuS4#XVv*sVRMlR=T;DoK*<_4t-|S2 z^aUx4+s5YIy8|er zPQPsfuJQ+NL#8IK(&>Iw!w6xji zn7|a0o<8K8A1P03{k-gx3ybT-ISFv{$hkoS2S&xkjr_d~gjA*}*?)d)mM`xV%iFjq9!4l@@9IkHPce9RkmJ}j ziuE0_sf9-QN)uP;-o~xR?C0{Bz~lSq^Wkx4abG=8q-ul8Y+(c&S2EtTdT^_X67%g^ zS(gWvTe0FY3LD+i(v85T+c|%;?piK}xHh~Vo7<;#x3VnP#frvk77P%U`=+Zv-n+X` zy}R}y#+RhmBTwLjM-b&TRjG_Lxi156a<-4?xUQ2)1+lPK|Ap>Fotrw!jsY(X#kBgm z>e013$b_11%|5*Xw^klDp6i)&cey;h5{0hs=gCyJrzoLm#mOv)j@4AmTV<->UE5gK1gU%QDbEj-y7k z04O+qr`4ld+~Kk%gutc>HSOs41NlRr{-qgE2CaId1f^2;5-UpO#D`FXZ}>*aTfAw% zT-&@&=SLZDPwUf8(Nmx8YFp0mWA5)E?5`zdRmHx(x;n6Bhx#GkU)6QJHE2zTjRUsc zT;_T!Hz8MFJJ!Ch{toq_cQtl(KAhNTWT<)bXqza(kd(E|~tiaP5V$h2tkqRHIdrVg2Am!i@9v zExAvbcmoXC`4pn-UjLF?WikX6A#<80HG5tn42ld+oGedQ6Q4D+9R`F5pUPKpTim!G z@07HRu1x|Gl!`lR*uqeqr)cTJsH>{3gHE0uw~>wE5Pe42xIZr_0FK)FrM&czyZtokn|C|r(~cNpxk9Qby`yoC!Lfq_LxRDuFr69903v(5#r9u(nuVj!x)xI7?* zU1eQbl1|}7T5B@}MU<{R{2nIk!~pLo+#_VTzwi?EF_ad(U%e(=m~~GVxPEzya?YJu zOfY4yp*e1#m*K<7mRW^|&z|YiI4k!j&4{+JvGYkN%v#F*+7I;e2fp}P$+DPK#SxZ! zyh@IFb^+@Xx?cPja3HYJD(mpf(tug;36mU4UlUa^9?3L4#=Mw7I5Tz3|8Sc~HdfXc zPIcoe_Pw{fzV4TpZ1uWBo4zoh7J`eAhZr4fSH#mgy3FzPnKQ%IuDz0#d!9W0VTWMC zB;W&ksLce~s<_c=IR4vWNyu2$-)g{DG6cl5GFLeW$DaJR#wMgoVal|!CMbMTDzN+@iIHE zzW)8=)0X&46tCClIwbpScNR4!(Q-yR!k;ohtayfWjHZwMG$&6ELRz4J1-v0C%-N1*+ zOL?YngChZ6Q_aYXeOG;Md&ghAlIg(LR6(m`dtv@lGQyu8>x-yX)XvjGiUCND66GGb z;86=0`FB2Tuy=sZx`ofxb0U?}p;w=X{U|4FiE?(c_`i_PQ0Q5*&;(-%ErSQZBJP1* z&g+L;zj*d642V8JXf$J=|EZq;oHBF%;09*46amTMT=_dHd!;3V%fJe~L+xfmN93p1 zslz@RO4FmZ1&!dcK_7V5TR=S$xq>y*sc+iu=y9FMY>6XvP7-%GkvOk>-0Sd}vW~JY zmHCINM)te5+d~=p3CrY#%CRFyg6a1TxY!A2Zo4f}ECjGmxl&95E+g=AgVQ zaj&UXD4$eLy;DhPDa%KKhYVR-WUH=c>3=7lY$Q{ivpY!j5Z0iBJ6jCHe89}}3cq^i z-@MYiS316CvzIn?D}?RJ3s5^)j~B*=wG8WI^!a=goflI+<|89}E@F}@v-z-nbTvgB zR?~m8j#xj~mU!ljOr__$!Bs0NFYoMr?i`w*mBT5+xOV$bykPmJ#8cCM=!87Iyp-5Q zJs}=>!;P$OboZj=5D$X+TRqQjoTj*rx*+U`hgE_J!y@Jjpsq85YPj!W^Sj6t9tbEM z{cKJH4O2?AbZShGQeA!%wuB7`KJSoW?sQevsbjBRt%yKAzFs~ds@`aU{2A(GK9|X& zu5QlOi;N3wQF=yhCo?dN`dJUAeY#vLgNc&?>8oUc*ma^X&p(MWe|;V!mnl}Adu|h_ z321OOAB2$$%l3UgDvi?@tt~Axq`yScaIzQ_N_J{(9D3^*{EBgRw zyyc861*HmjNKte1)~%4tGWk0#TXxLuq1Rs1TUH|qS!uE#um|M-D36a%udQ7rp15ju zpjP5e*=5(BJ+dM!7eCw}NBzsp_HM=r;hyn<4v|#HD7Y@%+X$l0+9i$sPn(&!(hoqL|osTPD*K@F7~!@Ix?J`7$`{>qP}+DuB&+CTc^!Fvb>Z zxBWLb$r(3qHUhpZy0)I_6|CUfiyf&Y#TPGc3N8n z+CrsSW-%!(G`AL(5k zC2TX#y~JJO|Lw?d#ZO9`#j1;(=6z0(7IK3qeO)ymlva_klm&V`^<}~PNPp= zZZ*r9(GhD6ZEu+48h67HH;0!V=$92sxG7SSDh&rU&fV9iCBdx5j-@2R`FzCc;~)+o z4lJj-5vG1E#GPFMmBy$-5g33RyXI8lTAOdtM7LqexsvR7P>_akrb7lU4xNs zQm8c+{WD+)Y`2gh9DV3*jn+CkO9OHUt6xCufUO>Wb_|<1To)RvKO3o{60p3Hwae1n zO#2HA>Pp=~$Sq7)6G1CVZ(+8Gm4?=%cC5(EyibcJ?69w;8)qkMtoykM6Aoc8?AXjC zE-c{{=>khjHQpCN+74GqPn!y zO?sxR8itc6ZOMr)l=}nl=r&I2Ryk+LA0|YkQbVd{D}3DF@~9F{aTFZ@MGo4*?dSa| zJHdDtLr3k$)HpCG10uZuQ@c;Up1XMkV9vr1dwGTE_5vv&AEgi4P^ZTUkg_WGGT0fd zlwW&sLm&w6=)#kmy?;KYQd6{ic>uxK)6@n~B4~IbfLfuwXZTf6eSc=28Tj`No-5Km z_K%%m4_hlPOw`<0xN**~XW4mU4v*FZW)BOsT2E)v>{_$-+lL^UChIS{+QXD@BdQo6 zaCpjA5p2iBwG*vg{%WQX0o4GBmzE1UQtcGRqTuqwPwN zoUMHB#d&p&M{L~maC&@A>rQ|E&wp(A5icASi>HiQyZ~;LS9$gQ#`Qer;_%B+rq$LHqtj)db0Cdc% zmhEdKF5w58>}ooDS@@4O1_p099fx+~pC8P{b#G~nD-kx}j}-WY!aCi(of8m3h<)Vw z_U+pQAW;!ML?j+P8p43LQ=ZCj_0@q38;XX<*XrYVGx^(^cap%O`mna%AcG;bPp_Sd1 z@t9{c8Pu%NXBc9TUpBsG)Q6%rh<#)!Hve)LU<3wyp#lwyJ-|(ntEspVh^-z zuJ!9Nyo8!EutKy@78c&l?z!9V-Mg3cR;I_0SNl0EA!#Z%fuU4L z)z)&X|E(nLim_BoY^>F@HfVVMm#i6&TM4zTCy zyrIi$R#;DE7hPZmKPLXrA;)?Dz%Ms4F}eBF%Pete888Yu<$*4_K+!D1^?de~NGj;o zr#yzSthd4Ls(Ae~_6$-qs7+ymbTXT#?jFdh8&@3v@xupkw%J-~K%~Q8a;U#pV=zH= z|1O*5;3;U$l~x{zS117K9I&^3DF?rW+TZ(5P-MuVRZ#Noogx0lmvgu(#L(2-0{S*G z{%!@j2JtM}{ZR&a^y{Z*KfzvMZq`YD)O`&_99Ojvxw#sGSh-K1e!uedwY9A|OLdWe ze7B$$n)z(=s*9WZ0rS=Fo7|{00m68X4xV2r)YdiB$Gv|Dn>Z^wd)Yi&{;Nb``o^_s#3Av{@O=a#3Y=73$5hDL-HI9u*KgXbVd-sSyX7#dvhF1ADN4-tA}PH`1~Ewn?fxc?&T`W z6#AYm0M6vW)r8r9FnZU{{g*d`4Teb<=&lwj_1H1NUXkx1`KomT-w)KVq>||rS z9X*MeglukDHjw1U4O6b93=Ky4@~4(Oek`uOgMv+!NR3PY5v}O;*hRB^LOk$*NEl9f z6EQ)#CAbgJtsWfIs6Dg~^dk4$9AS3{z{rOoNIhhV+S8tpl})Bldm<(YaC2~M`nrUY zQ1rH1`;H<@T5Tyj8-AP8^OYZ;%s?KY?g}kOf=c#RVu{o=8|=%hm5=qphYo0J(>DZp zx7#-}&wTt?2S~u%o4yAFki11BkZs7Jt4{kLp-Rg_(h@^l$5Z>6tiHOw-i4(}Wf@>2 zv3F2ky8Y-Fa{_)EV&GkS83~ORGUUx~jA&(Jq)Hg_QYl6IXR}!kZcp^H#dGF#WTvHB zPio!DWtb*=M%`#%RM^VbLM$4<+~a&Hn`Ke*+Q|&;=-xkk%X-prnF>*~b0mZTI~a{5 zYQ4?buQl%16L#1nlVlQs_?2|TfN1D2Lxn?2l@Ns808=k$+~q)!$_)%}b$Fz6`8B)ZPWN>qX|<*lFImr*zr~d*FC$Tl3BUP%su7`WQSJU*NF;!*a@RY7yUacA z*wysBU)V?fL=LxX^>wP6+de2$<#)<@zD%H;kCar9%LDF{(_vnP8D{=+=VWm7K$9F4KiHaTpL zLx-kZ`pneCuc3ZW=2(b>vJ1P4RTCp;r*evkj5LHsZ%u6O+YjiR5vVYRJMGWu>-B}! zYBL?L_6{J6gtRmrsk1z(v$E+O`^Bpd8_U;2dFWYRYs zc8YU3nTfkzT-mynmetPQ*4EmLeroyBIHk(<706HFITVMrv{GOH)9F zVjqR&dL-_N`RA|rlm}4nj;$O3l~#lyhG_&cJ(8!Q+*7_o%dq#|p<4z5f3JVRtCA@w zj!2CJQs-&H-0os!_8wiq$rj>B$s>v$o+j3Lo~B;Gp2tM7$bG?XFC_q+G#CiuL2oVu z^!&_ftu}2Us7(tWkV>)azm?S)IyW~&c>$9nA%^{;>egYVRM1wI0tzq0S$9h!I7Zqh zYJ+cDw21b#!=i8h&v^EqSflvUulRY3bP!^jHCKhVpV1j;28YwA>i_8G*h;H&5CeCV|;^1j%W#?r2ou9*q z^WAzE=591r=g#w52#g}puP>!(FtJ57;|N=RU$-oc7l7ZXkxU6ZE-p>jtT(l-v5@Vl zS}1Pg8_^>5#)%*^H6I(h6eQ;T&A|~N4dgF_FCf;Exn(d}Qosj=S#(5*E|hF$+W%r$ z$(u`cd^2hDOj*4$0c0y=KcXtg;Hj?6BE_#aGxqko#tMBgA|Udc6%>mQ5d+|>e}Qty zMw0BiR9pJETNzGGR!(d-Uf~8 zByq=pSwDZRla>0;lQv7Mq8@4n0^z%+ zt6{$|VC+~hDaETX_|KiZ=IsaS;dD1cdmkZMnCls`gaQ2F%S7S7APJXg%ozvEUG`(d ztsP)U!!&edT!1^yCf#H)Lp(gS2=p&0nb=||liO;ZgY_0qxP%p->`Mo6|6yz61^%($ z;t|2}lGlKN1KT3f@k70~^9L=Y!Z@prX59Y=N zS*Cg=94gr*f{jb@S`dN&#YQR`s&f8-q4Wl?>kF|!ED^;#)O9{BBWR}m4r=elpytuf zK+rsSu4&sjWp6m6%AL_y5}8D#(lyF!>QfB7SH>&wdxp^x*vBmrWt9)J|o-#p(iQ_4Fl+ z7tgNi+_h^UMedNXz#|=7!8O2(xG6lAbyF$2I5}0<34_vyhfSivB(+Ax#Qba)U9WUF z-L|}kTkZe(j}1c<4l__loT+U$eM+Q9F(p*C-8<%~!MwdCp%8faAa|Ev1zMEv@ee*$ zI^+&>tM>Dt1*w}0kK4_ysQ7$p>J1SnTiUR}k_2uDH%nouOkT*Qb`V03XT7Qdb%%9$ z_OVzjV#$*-(It0ks}M@+);w~Zn*ex_54I?L|^mUvvOGo<3v7VyMGx+r65J_?YD?rLF+MaF8NBf%gN5pWDovWWR2b+E z*hy>Rp){BTPZV)&(v`r7gJ81vzJmwFyZ3HC*8cf3(b@e3arf3?Hu_;ZMuRd?mg-&H ztgWpuwr;_qMTY~|*OYC~T1({DgkklRk0&;wjUM1RAUk@igr~{varZKwUEg$083t=i z?ypY|Egc$9SDiyiR^Y#qKl6FI?)_@C4^m>=9S1~(cQ!iC9ce1Sq{kbII0cC0MpkVH z@qX-_fmoo?#=n|kGXYc8udmR@*^eK;OD)NxN!PYDxc><>RQQ0hNGgkbYRWLqzSeCR zyG>LoIus&ND)RuNObfJ{3dXC6IU|HVHDg(>H@a6Dp zbquyq+P$}zR7BaS)(I_!B~?l7aAG=t+%4n}`#RW-l$e$_PRVBj?fcB2tbwCuv~Kv3 z4n!KgXODI#S)jr3G#nrOExVG*zNYM-{d$sAakvNMe1RX-r&+x^g%XA~D_XV<$=W@G z8k}B>VOrKar%93MW;T*skwu#qt09aq-uRK0k7=m6zKY&tO@d zQTRRBQ84uF+`!??Vj$MpPUC4ut+ODKOAUXC7#7fD{>v;q;~7czZ@c=Oy|I{DOLpHj z81D^rTyFlPMMP=4_U&7c9OgSV)@!w8Bu_Vo!#(l9p543UAC70YW8g{sqh?7*Dwf3z z6GAJ0A=Ar8;xM9`>?vL8^?FvpCu4Q?p%;8?QzvCO>uoN{Az%*s|* zmxz_U!2RmL#DW6&mEgB3rXO3Ev;ixIq0uF!XK#+jVIRaRTO+j#S5mL_?#1$KPtsPF zx!0F1eXC{3DC%?wmiI&Q?}s~`7v>#xw${iH>RPSTJxu$obm?oCPI&@H2*73=-ROqt zCp)$dTf=cgU7d`NhF3i4UPgefssc6fZ>Al^yW#We^!twMAYVyqxkUJeB-P>VIito% z-mUnwvFN#Gruqpre*Ad%!DIOOAj7r}4uNWKp^Aka&RY7Ll=dowpX)K@uIyVN3TBNwK-OXbK|QHve;FWlPW*B~=2ytSAVKrZJ2V_jw>6 zWq+@z6RP9jLs2q&S@!DF=N1p4Ko#TvCNLTTs0U^-sM9Kw&WRRDEA9alMRkco!66|{ zkZIm&-WtSNHaU;j1U>V87kH}hp&tGco&Lp( zF?o-2bJY>CqMK6M;!(EQvC%f>dcet}8(Bcq2K0<^Z;7J|!bzjyF@2XAOM#-GmftO- zmR4J%b?aeKwJgzE++qz&j786a03hqut+OXS!v&yl_nbWmRxY@d@0N(%Y08OQ@3)`? z>PJA0WMRO`$5LBjah4~{2hPpAv$_?|Olktf?qBE{y}jvr%W%OzTMvwl?C84~wOllCssfiKaSqdk%mckdAJwr3D-mc4{=xIdzng z2^W42ZZyen#Nd#BWA$Y@YXbF$0mRPZ z>+z6kMHLc;I^$t+`07s#%r>$+npcLizflxB` zV0WKm48O9$Lw*i`G8BP|*KO?}HYy4Y&0q|_Pry^3&@N%euJ#=;z@_g`h<4Jbs8@kq z2zEB$x>ymCykw2z$&;a-mAx{UKt~jplsFBM4Hy)?=1-`YFzGH@-&9QvZ(jXxP%eDM z@9@)N${PFA69ZQ13u|8V9{P81^zT!u-)W_#gJ0eyl zMohkI;FA%H$Da6DROAmT+4DF8MMrc!0>{6$B!cfdlJzZoK{rfRU1t{e5vdBZghD&& zf6%I9iaLX|l~fz9emQ;kaFDM>%tXZi66Ek?>;t9Sn6Q=)Rz$h;F%PrOopVWQWb1&=Af2&mYjgA4T;fgqdgq=JiTW<}Kj_XBkyBFV%IgBJXriX}K-p~F0go2O ze|ynXM0@Nvu6by@_(EEmVaJY5z!CPBKvD}v|Xi_`YEagMfFPUbdcumyA6<*102#og*n*3UpNko--E7TZeg zVxorVj)fWpS1l162=V6G2tEo&;ywBo+f6(@ZGbSkDI%`Lii^=hY@!Gv>+ zXp3=_~seJSp&@o@`%aO<>0CZ{q2L_$$LAx%u#l-k9zN?lP{S#4xxA z_;$(u0euGV+=E6=Y#)X(YH0+Dt1=b>3jE%l&nfh8au)6wl9*-PkYNH!9PwLoNuae6 zwNGF^7t*ZlI_??AU2h?DQOTFY`w`Ln=)YCh(Q`{LZ4m6yty9b?@D!P|yAJ2LU9eHh zf1aOjJ99N%9DmzGi`4}tAA9V71{%*+CCA{u*ax%m&$rgrH5?$J%f*Y$zAOf97uIQ4 zgmMzq5j*nCt0btBKeH4*AH3kUzM;MyJS|?7$(7kW5pdA@E%XlHLsiqLsc+=vZSA>- zQAQ(L>BCb|PT$1YQx=-(Yk#zWhUINVux-J_yYMBE+>M13 z@A#{~wH#{Gp_1@^I5Aoo>{-i%P@DOgT0(w{Wh|08+4=I@7fwQ|Gd+8>hnkzQu9@m` z!`{7h$;!Juawv!#oMmO!#inCke56AQ`}`AXB^j^k)%2@=nv!T&h6foLAHM@q2Qjj1 zFG^UgeW@U2bbhu+#z$A;WmJ~Lw~fNq&zXG~`tNPtd@N1D_hN()LloaFN&gc}E_c!B zCuW=Pys={ZNpt4hUCe#drqh7RnqbBd285TBiOw!AvYyhsvln)pE#_N2m`xhe+Om?N zdF1qf_gZMW$BiCtomH(-zX%k3e~!_HQc7T9P>hHqzEEuo0EgYF*>TuTpD%tJWfT>3x zJlNyQRRjv6&!6_1s7onqTh(4mO9|1_4tF8Rb8&vgW=)!}xfX@7SOCh-x9PG#fxKYS z>=a1|hPs9}b0_I?=M+AD`j_$XsVLD2^5tDc{Lhed#04lXN*r{hEa(upOFdXp3hxZ_ zGF)BvyIF%OK=80`AyZ78HP-o}WXJ%3m^(}c*RsiGTX^^lLQMG*A=tn;8PSlHsHAvx zXwuTDQ(yjH5P<&|7idt31{B(IIEqW#LIpnvd+7T(-z$;Ec0Wgvn~aEFC0$gsta9!k z8v~`l?X(Ygat1Ql$G1XLloL12q`H;xw5o!whgfZdsRPK1Sa%vSxKQBsmS!@k%LEBKb_y`A2Tkd)e+s~;Woh$=aL1XU`O#o7goj4F zgr47^H+=8rm6T}6{b%^x!B1GunIB2qj-GZ>{LS%vo6^X3`d5588r zeweX~jH>!^5Mpc<*l>@Nk1&->PS)US*sQVI>oU9pX3v|MH&jjQ-?wkZty>}YOXw2> z;J=oR%L4S>XO5%HT8?%)6Kq*6*+ zQdvoo-5_L^B80-}OjatgWoNc2k`QN^6_Srt_SVjbN>(8aBQptQ)$e|spYQK?yMEX0 zx?TUAw{xlY`}KN0kMTGjkH-P!=ErZ&1)}LZV<|^ulB2}hO+{u{>tRd!^k^=-OIA3| z{>Uh(8YNkH+%(oS(DAcLd+Aa!AbIr!5=ll$$$mR=C5r(=zmq%l@-jQ*rFGqS_<75h z!gCRHjFx!h5i9tx!gQSZnZPDABZXcZM5Jp0aK$Kiib97E zzxIM|_)x@aN)6zixKGOg$)r-Y`$X$0E(!`13=;`^Uuy4SR3rHhJblzYFd_ol4)d;T znGHQp3BQFlVyX9DJRGrk8fz$$E1BTGFF`R6Az#%+b`OHQeEvn*spMf_A|;Yd>Z{kU zPj~A+%;C?AxCwjvH&PAxFs#=$@GTY4z)lKaus${85N>huJH}612XPWUAx)>c;%}~= z2wR6_-MXd5BxS%sj%nh;70i0^#dN^RKG?^}Da@hqF)+)+f6L$&*&B1iRo4NJh7ayg ze_(dD-tJPe_>${M7kwxRy>)NGdy|>`Q<%#3Oa`mYJbBz>hzts)KI{ff2Y~hxN+9G}kh=xDj7U-$0fymEEnj28*Zih%%N?dCTZRWVY|4sp*|R0<^E zcA4dNIb6A;J9fdIp<&IOa-VL*gW~GzA0ZW?qmHp4@_W+Ke^nN|iP_D%HC&*!gVs8! z?c+X@3=QXjEN|hcS-aN4{-;1Lt?I0Q?ZW@&0#xM%*3iri10tz*#0H0C<2Oi2U+eN~ z21epV@7_Sg=S65&n(R67g5P&wX(7f!wv8mTkH6gE1Cza-OCN>qKTTM5ki`lBx4s1D zxzD7`MaomYhm4OY!EWzuVFQCqk3zW)D@2dTe50{f7T3L+C#0mToML&8aXQrggf@cv zag|N~B7oySH)hKh4iAOih%c33TC!qb_8;7INiZYtkKrMc-gLU=W?h~IUGZe66-$BNYl-0 zfS^DUeEp$U%H}E0QaMNCGQiElxKE;|WXt87q2Z(m0L&kT4#RIK^pJF(@m+b8nZ|tT zU0E*}_=S)uqfW&t9J*%Ec{==np7m_W3@@+7kc*!b72%1uEz)kjx=x^TZtoTzfH!KC zE(3%Lgs(?pq>xw$aYqs#6c_KFY%X3Q)Eg$qH)6x+#br2-P&8mYV@4QD1+K#8#5KYD zgmQ5E)(_!6V|sGSLmm}ad)wALVQF~suwK)}G1@x}pDm$NnrXqpB(a1ttJx8O{t0f7 z&is{d_~oKHi5?$*d7WX1pL?=XLZjgZNebibr+2V%4%g^L18KMq6IVb9tAXUwXR4L-eT#1@s-`6ZV27u z3J)U?98CEKCq*=pvYB+n%gqDt*|#KhGy^X%TSN9_05xNL4h3_ZnDh=}!;ExRtLaWv zsa%W0Zst0c?)b0UbjM{U>B2UE`7}=En6fSPt~H#HW!o?D9*IR&DX)=Xf)V=(6@hJ{QnEb4<*Nvsh;fPiZ7C_n1d{_ds}=TA5)Mj zM?Rn^r#8)`Un6d<>d9b%sabk6XLz$20E|Tjwbat+Tz5S#BJfiqTBJ#aM;~C{u!P6P z{oCfCz0Ne@{!+ZYWdbToibqi_5%S7>oX3vc2)fXSwL>N%;MMxYuGwBTHec5MHF2Ws zj?P_8dnLVl-#>)ZF=2JzKeiGxJ}#bUx5RTjA`exAz&E=2pf5ncIt>F>eNjRv zc-$PoSF})V>Jy6odUk|YG8EmwffW3rz~j$0DllI!tAdg^MML=>>mOhgdt}T$6SoHX zC|KU9hc4hlK^!wx%u=ymyuK~VQ-SBhn}re#JC;@BxeFj-M7vr)_|+Qi0fewsXKoN^ z8D_~=36Y_lZ)JJgv9Af~K^T-7t1+tpqf_&wg9sno@Oy{xS;jtKHh}P(xeP1glFmU_$ZA3dGfVY2PP)h*ua_rW2Uvd}TyyM|B>Y zfY%Q;-fkw6!I}1{9^%-D^3!TEF+ujPCoT5%4Pa7+xMfu1B`a3QjLZ&%a8wE!{h*Ij z7GI2zbSiP>2$bObu8Q&jCPEqph(X)-7*k z*7^7J`pkZky?O2TveJ!R&q-qDcgH7@cI<>MQ89>>y zOb*|9EyxmjoKP2clyF|lxaVtk5g0-Vx_K8#BkbFCD2l>_07h8fXmLrFy`Z8&Hbok= zl_u;<2?bXhR-bsc0CMd&a#2}&sO$@6z1N7&d9d9qfkLci!XOJ{VIFTS_BLFtt%on+ zm1I+)dWn?1OKIp&ChsYRBLfOyVSj!D?!bcLv8lA1n}Lq3qU@I#e{dOajD20zw*1Ew z)+_`~AYVvhuw@&Qcp0PSFv;x5Z&z3PMb`qJ>wP#I{qy7PJ9lOo*VhbA$#%;$PcgSo z{)Ln6l=}j~Q_9J}TER8>ZF)|)V_U^TVK+M8OPYOCr}i$Be?quf2lI(t{pn4wC$ip; zaiaHRS@?yF;blE_MqePgJJllElkalk6IaHV zjrj1QZy9tFbVl6VWRq-Uwk&a+iTEzapul;W{Sm-M5nJ56->LMzdgUY zN|%Yd(m85HDf0y69){{#t99#KOFz075pP~P%QcWq#mp5m-qWQDJ3tNHo`S4Z zc_J@`46*qjHxN{qEwxe-2 z>;Pq54Pl?zmjw4NhYX|=XDYHlcss~21JE&cbv83I+xydsSw80&HU0kFN~#+n@Nb{x znQ|*{=M<*)W=Wx;p#u7U9M-Lgc8mMqeA&7G-};6B{#*a%BmGx+qVf|Xp=P#JBuuCb zAkp&P!a=`erFE-9;>qs`!l1=8)GK7?>MjF`1U6X~7;K_sbq@yeAs98$wn z2DlOE)RLnYQre2R{sl!O6EsGGLla!*fGa*5dllU;NTqIcc2*XG@lefLZWJ)XKTn>% zfBDihCGvyMiIEH^LAY|h6qCJ*+`garF48-VMEk0aK{u&Dpbd?3v8B@M%Vw2QwPxEb z%$XkY54wAsweG70A2QBTNkv8087#}@YRsNLKia}x`+>vIC*sV+AKd>BtUeNDzU3bz zIkFMtrrzi=CW`@XGsJ-Ob!MGunI7+#ZC33qZ@)q*BL=jq*f0{zL$XVnovC~81Em12 zk^N0?FW{bbYxwo!hbW@yeZ|ASBD`*mu;>;o^67U9i?5%(W<+xjcEz-0OZ^7$s9iV2 zIf@#2L)=%YoQwnh^|!J^!8V4VkPBRZFh$6K``+_~Qy3raI-Pla{)~TwaKq{&m|c!h z7ObZMrZ_VNhQVN=3myK#_*b+*Z!#V|+V#ES(wxFQ z;aS^V|L{Nf;sLBW(MvgV;C5H)A{M6Uzz`V1ARFyZJUcC1b0p%<8yUr8uPD&Qx7GD2B5xVz4m+JsS zotJ7Ip6z1e$<>+T+PluhK$BIOOG3ehLZTj@7kg@!kaWa$QUj4Ke zkvp4ne&BzzjRqqA!%WhhASa~xw>aoZnm{0NgW zs$U(WAMm4pcCd`OapJ_s28+Wcdm^`P%|WvhQwA7gz=XxSy)>4HYvL(=G%DG=9+Z+| zIVp)c;$O4JnXoBe&Ui~8l~{7dXKeU}BZS`5JDk1k(~Fw=>cK0`STJv%I*DwB*`lJ; z%22M>s*(>_pKxSQ75Nqnp;M}ORV?XoDVD%nChY}zW%4#VPHZvoAI@BRmAL%*7yYnM zJ2{9&m)2HfHzPn=`2jQ;P{_>JQBze-1v9&KQ}xESMcCL2zVjr zPKbsI}yAIm)N#l$K64le6*ZfRfh}h|JaR4vGGhxLJje`a)YzBNklMPEJnQz)@iTq-G6XV8RCSAp zJuw5hEA}$K)fC@e)M>{vRj-})_(dpVela@r4JA8saP2x%Q&T~foHdzK%Nle23Aw+Y zg4T7_L@YM{iMVnQos*ehmmHya@q%!-b_L*|PY~=i*D*C(wT|@c^cUW~&xysY+q5w} zq=){7h$6$Z0goR&D(BKE4X1Y!N<+~_@G&v{pM;|>D7dm=Jd_=1Zq+61)>G%C{*K^& zuANRKyM5=5wt<0@u@-C{S^mJ@;BfI(qx-mZ2!9woX%PDk#wy=%IaU1!{yeR!vpfC7 z44UZI;(b{GzWrgW)?ZB;ed28`X1WeaA63w`bLTYPNYZ9;0H?8k+b(lOCv6SoO1;NF z-aYIpGr<|xxFn4iFgk~!C-dk>w{mitz-ugf(X!A+$xUo*X{k4HQ&WPzGAe{Jvu)kO z!om#51+hv{lQcoLAB@}d$)}NdC$2R-tb!_nxEDru&TgVcWi3^5!5nZqVDAi6DU(Ly zB-)pbA@j{qM0TKjJAQX&w_8$=zyALF;H68k-a(ubXtwv7`T+lmN^k$FluU=p1Q{_> z0KrL*a>6n*07Naekl8t&hx?8%XV@+*xu%)ye~3e@@XpoKGr{4@1t2QCHg*{AfOrOI z=yvTvVWBwNH0+=K0Lba^qJ=PfhQ4H^BIIU~*AID2QIExdw;`3aS`tbqZMM_c4Oxp! zgY;;;WBSyocNavEN-~IsAP9=En{-q$Sq4Tn8g?|KBMuGl7hA4hd$%@X8A2`Y z6^BPkCR(FO#n*x9SXgY33MOHRcEk2Gt<}D4)7#r&S|4uo2`40Y{R5J~;>5~|r{i~# zje~R$&Z(rTCj7!p$-R5^l4@ztS%JLQGGv%48zY#34ltLJoq}^z5L0wu`d<5FA0|~& zc0jU!!}`_(SL<<#4?TnZ)EENCipnv2Y(^)%UrFN|!)Axz8Qct<4sWGipt40~YUlYI z+(D_$7veZR0HfZ#|M>B4ij_%B_%^5VNnt7qGcrNQXlsZ8SW-LSU&(1!t_n=l1odQP?F}x2|`a0pT-uLu9T^_rU?8)Bjvmr zD9WV0LlqJ=rnu(C@NtGB0?}kZ7NuUR5DN6@Hp(hjoZ*$f<8ucQVi&vy=4)&J3zU@U**hI~E z&f&@rh>)^o>+{-=yd1KRx2M^o(oIU4j~?mcJzbh@(Y~kl7}={L5G?O1#sX{0mO^&B z1C|*^bEl=+9LENNd#7Wokdo~kv~Kr@>)awtVSX425sd|Vr_Z_*>wz?^+%S)6AE%X< zj_jP{eDyxI)F1EDDXbC+-w6^scr4jP7zSvg z{{248_hjj}PKW(iy^gZzP0<0?_xb8?`lcy&E^+&~s7v?24m~2SIRfB5HXeCg@!94s z8|!2lqL@-sJEYdC`u6P7lS%3hh3lRJk9yS5gYlZ|&VNUu!&VM(&BpG}^-fJq zRUngkEukCCQL_E7uARYpHeh{B*ya}biWkwB>+}C#easprkMar*R_|@;FDQp9$g4_+ z>FV0$>NJTL|c0XDpdMdGclo z@}S$$iew8+CO*c#!FRdt;7cxQbIE{IMQ6tD^6)gkK4uNxtuqNw9y;Id6d#YSdHN=s2F9>L5e8N zX>HbC_Z$`DVF~QYryV|PK24CcsjZF(H;nkZwA({GJ1*%@p)BAeiAM^W^|UU`{tOW& zthy#-GP3fdF16Lw+n&tF0lZrel@U%(Lzs*qSu>nW;iP3~IHKVk?DX*N;e$tsM+KAU zU|I{IRDJk!ZB0YFHH+FltSPzH{6&s$RQsAsTRX=(W;@UP6k}qwGr`os&`8yFcdxE@ z4;Q&~8Nbvi*riveog)p6OugEldH%@68t7kt-QTJ2``vRjzcA&`3qLihzN_F06u(bOoH@sX-yd#2=k$Gc_sq|P_PXM_I^Jig3PV;4=D$SSm09k`hJq-gW zD^GXMlIjSy=>$=q5nMYJY(e6*Yx=2Eh0yq_14}qk#h%)!(!i}IdjB*>zxl&I!(r3-#fR6n{cQK z5Jz?pB`OKVODE$=Vo>tsUc#ns|ED|jRT9jXasOe52t)DT?sS1rBa}j9KnR!}7-1fw zqX&omicpM})x%8j1(ZL(3*NUBW zlQZ1pt{;B}pIVEKm~A%4LQL&X(v~gFsLzi6qYi~08cds=NSq+gsZJ2%R06HyRN5gi zmsw+|tsRJ>{p2XlHv?|R;ixylx%zw7FcMJa%_m+L?hnz4UL@Sc7;BdYFIA^T^le8S zEDk|0oI2q5!{ONKF$JiX7<#bS+nZ8GjQl$>+AfxJdw zOSE_@2~r!%bSG1HeKi+%*hKUPK^G*`k@AwkW@ZmPM}4kx4+Y<={=a_xf=Abs6aFd| zM6?6bq^w>Kxp;CB<8HDCT)yEt0lY=d^WY~R1?6}oT}EMN%&}WR!9KDL6G3YtQc=Ta z-q^oOFI?zq|&ZoU6 zS@Cu!d0Yk}2IO|t7uyiLCtMTG@*s~-S!-eYf4Bgbw%@H~p;P{|XU@i*nmRdHIwG$m zzB%br$Qlq|lh2)dWw=^wVq3MEG`s6-U-gZXr%Z{Ujm=+(0AC5umbfkR=FHhPdtFEO zYPBm}v(Gj4`3V*58H-Ba|Aq>nl4w$@r)-h%Do>m_NX?0Bv5vDqr+@Eo%Q4dxE@Q(Z z%V>_zp?%d9gNp4lQ9BUMJ5_2ak2V=Hm4PO$@m(3r@M+~R5^Tb26V5e*5V=ST)=(jFbDWODN@^@+ zNc)F}N3}HX(&hf^*xkF!0XKDye&!mhjDW8r2~R6}?_Le=`1K5uqAWm@S)yM8TzW8M z4J*hV!i3fD@d~F6W^vyg^**cR?u?EO!I3AiMD8ZL$pg~4^CliL4V78ooULv{saMN) zz8N*Aw?T zx`vw@`EPE!NVqG^SGHWuPgeEm!+-=)O`zjYdxfq~{-z?J%!;y<#Wu9INuz&=JMyKa zK678d844jBXt^yw9L)HXhkm&J6^z{{*VgpeQpO;?e%|UW6rEe*(L!>-C*9hQE*jm3 zOP%6vNVGlsSnoAg>V*zf4@pDO>y=dQDJWXNTh zMo_KTu;rztgS|N?_FYo|1X(0$-Su+gFrV%8gGlqeaFVRyuRs)r&SU6;2RmVQ7`T8b_ZBq_)L31-2cTW zI*XQoZ*r-Y9;@2`y$;Q-r#Mi>TklUC|s!pM~+Ca`d-S8URJDH8)@7{+D z)zbLNwVad$pErcz76QwHqhX!z6#p4&jL* z5Gdmw<6dTruO17y=;GpG0aD+b>}s_^V4+{%Ph=h%$cg8hXjeiF(WWl-$v-{6$V%{- zc`ty@#j?Zw5|7e@xuwz@q5`k3uBS_vN0wGr+pVh@U$Bj43#SXRrc;9Zbugv*$CBR! zxm{XQ+as3kpTi+F=5OoNv17{kjg|Xip?fSX#iD>Xn4;Do(=S?F0Wprn@G3|2zA^UC zk82jMs7#~*Iv^5frjUo4L-Vl4p60fPcocE?I`n-nw9N>^p>g6wE^UTmq{s=t1l6|D zdyLGuWfKUW)?0(f)*r9gsP;z0C z*o+qF=YHDMa*!lunH@h1Bb_s5@N7A!mj7T@IvMa^-L!?Co&~RlO?qc)U%{I8WvRla zhp?(;NzaYr?4XvwI{ze74H~pbo4=4Tpuoi;^!uiO0suho)p`nbZFW|cuK{xzr8jpT zvoMyv*xhC(`5BC-_#idt{$t!5C@~Wf7QF>25(*C7gSo?Gkpl{rk{w*l8Mvxy?8Ve` zTXeL;I9a7r_w}vrU~6`eFhtBv5Yh#`fw6uybUE7n*wM128w4`Uk&&bDn&JE0&lU+Y z92pP0%K&_fzrK%+gRKMYMUf^Mq1HY4h!2nNvoa&;w4kbEvC?%xZ?D@ni={b%P-4cp1;#|vglzXYbUxsazRa89DLpUGl zj7o=oXxVv4gBkP!mCHaYQ#h8V-4nvsVC50piJA<*g{U?~@GFUk8ICPS@61Y6QBv~Y z&n&wRr9ml~Th@2o&tBxjx|)PG8|qfkL#SPr(n?I_i0&~Pv>YdwzwbOgkp?qhP9TMh z38aXlNz|!grC8qbuiNEsxN}0Z@VQJ|Bn{a%*~^6DO6U)XvCsguX_{tSq*oR0L*fMB zbt0zD=#>EOx1M0OJR~8u9_X72zr9@zU|VGNGMYEWO2%D(GWw;hG#NN>pX+Z9trV#B z&iKuUWIp*L_k>ew+l`5xTO(jv4PU(jyLxI~96xociGtC2tV;xInvNx_Fo9EtI|u(< zXEN-+W`zAT3!z?1JAIq!*MTL5mcAZvL%Maj#4uV|uKBAjmd-BU(9l5g~k@dYOll$1)W6iy#BnrSe#|P*@TEM<@WL^yBX6-u&?VN=vb3D`pHJ zCXBICLtrklv>B;Mr3VvpP40IQ?4xPVvAY^<+Mj$oH%Zry_;K% z!{&5q0GNV-E0^NJA-wge`UlrKcG|+GtM)Xsc?K+RU^=KDbq#ceC=+eUZ;GceiSFMW z`2+a4h8wOhEHDTI%n2`32l|R{qlpTd+^l+Dd9m4t&yW#)F`t@cOiJ(gX+5(IsLg7J z;Nf+4{oUz9WK0a6%{>vVr@we{pxYTNJ{ho9<^%@^3m_ll=9<*$(|P6s?BVr0#c;rw zdHZy2Rw9#ftpi$&&kt#Sec4+b=tzs9oLZ5eH658YU233)oO>z|rKuL=*N|3K`b66@j1jzxH(YG2bOVxQ9+^;H{q%R;t}Gbv71 z9r6#Y2*M=AAW$#S%D(-LUSeb=~4n++#wXUuf=56gCd>T%ErGB_+M=P7BV zr0sKv)}KZ-!{)NE%@Yg=;#!LnV&w5iR*6Er<0l4l2c-+Ze%oafjep^ z(yzf;vBtDLDM1pkM0|XU?C;=p&UyR#EmeX3`oLmy>X^N!h^WFGdV7(=+XTA`n8BvT zxCezYwJ1#Sj5snoc>!{!{Y%8V033OSx3|O8Z+zp}kyP^)1C}-l5s4ts_T2Z3twg{< z4ZG{aFklNz!QZsfjb_&sRhcc7VNemC;s8>Sv8x_|%6^zIqrNjHelyBQu2 z#FR73i;+HCf<`8`xU&5H23Rurc95_}r&n%H@6;$;bXFXL!fH$#^r3ERm@}3X8}6VO za--ks)!S7s!kZ?+1cQkeYBNEV9m~Z}#a$bY#&PyNfugT>Xu#^&`kG27%} zbe;$Q7ZY=(yZaoG8ghM<;#tUM!D5d!Svff!prgLW z%WMnvtJPockbh9ysD<0X{Ox_5$_(Mga4}H2Zoaq0uT?bdw-sbJJWD*W0kUV`34rqXV#fP8-KKL(t-0HMANjOl#KPpg;``B$|~{hY$w<+6NTQdvrVv*5})em@s!}Ukhg}9Y&X5iEw3^GU&9W8pOJM zr$Kcr2rAqwwKb6F(ud3z^CMKMGgk__20}U*+d%PCi|#pd(8kTsz0nElCTsB5r^#!^2_E*wH4L?zy|Gg!2t>R_hd>YQB4E0yp zq0sgHTx$0&mk2|%KlDwV&`FtBMPQZL#P`RSnLSEjgkeMM#RRz8f*Rcmvf}YC0y{IR zSBvDO2v?v<$%)B*$kX8;j@P-j?gg7w$mMZEofm3pUopR^bFT=uGPu*PG7i?!}KH{?>YUW3s@|o`I^*CtHX?>$+=G0g={458G4IU8;b}NUrG>A z_J6YTn8omf9!w^Vt6|zV1YNLt6D8~RXXgXw2{)*SXEb>V#+VA#@PPp4+RvLhX;L`a zu1{Ig(ePA)T<~NM-@jE1pA!E) z5N0VAx^=^~Gy~~OT`rlcuMFhp#y8PDz6;zsvUR`o7c_o3bVg#~S~(D~fQgj14UEkV zgj<~n;-K2S`xceH)FW+-=}HzuEugG)4OldPz6RdW61!^=K~#Nt;4JPpCZH=HBhw+l|+hmzT>h`^v8>{|4Muza89% z8)pX!apx5`fI#P;?zzE6wV~_RNFdYFfdHdfoKaVf$dv)Wvf@$ts}97(W}G}PyunJD zKU$c9^AlmV57^#FhP!I-#Q^xsNi0$qo#y^$g{G2Nnq>6xWlnGDifZYK1%^dnEN{uij9mcvz+Y z{M+ws!5<*M|8yo&Yii1oJL0~{`U31<9E(n?2PPl8F*X~^f*A^zF;J4vw-VcQMUJ4U zem|~6P;jtR4Qo8Ty+dd(R5Z$eZyI6?C@e<*H!i`&kVTj0Z?JWSfH|e5_Ji;|S;IvF zUA(_?uPd8({Xw_fJQc+?RNaA}C&=|gW0GK79*shbNS{WFq z!o+WToG&ZRGRr1-9t^E`O1XOV>Q$$$8#in)0lwly^cus#lem{ZnP!`h{W1|&9~Jv_ zdzXQL%cm8HJ3fW$8JEd=H-6&)J<$-7)DOPG7o5e58{VEnsg}y3XOOp9F9olY7kn?p zzoA~V4nOgek<^n?_-;pLzaAycZa^wQpn($6A>Otv_BWkF>Nv z1)}Lp*=4-tG2D5NMT^ExWI(RUk5RHRv2Ce65n~EsPn|Ps-J%pm^0ycLsuLV^OQdOe zfm!KBBJg7?SkkD+uO71BVBDjc5BZr{*6B0+6b$h$Iqdll6^GSr&(l#C1E@Q*;HHZC zNnF?azUkGt>KXtoX)NJ%YEfZ!Du^H21UDZm4_Q@$Ui+rjChgThlX~Je25Niry!azP zzD_G4$lXkAI`j*4d6{e5ml4`52c9~9JOUJWgi}v3@j58N zcAWe7eYg%Qhst)K0`+>1z#)vNS#Vrz?jCr~| z!}f@z@}0v*urWJAM{B&YXHwfzxE!07Z_&rHHJ zdsw7V?@jyEGN&_lFSe#GIIpE^d=;)c4%MrH~P}0oUr|LWoiMG|nZX zQB^}g{#=XN072-zd#Zw`)9lBLmif<-H_A<#oH;y}`of`0P3p@{$>GBJ0%zP8^64MUTH6|N{s93x2ZKTegG(n&`3#-hQJ!ex?gmtnbhIEB4^ z01N_QsXBMlr&j5apCGgWitf7k1={qM?imbIrHcIYk?{z`@0-g`F(RVVo>V8!O7wyr zSTiU+7VazrtXLgE1t~IDT|nO;H>TAE2xGGD>^cPb#^4fWx~m~3-PWXaxf$nVaT~3Z zYz3Rf<4ng9n0$8D^+xbH8nG{o=$RnqOJ#s7yd@PGCEkO=FXPp!2o|KnV9T%v`l;vNXapZ<4C^5 znZl7op#Waaxh0adxe@TmV$vaK43KgtV?L5z+N|?!xc9C1?%Nkz{vIU@&cm6}DM+(; zDR(D?@HcdltD?kxiXU|{n3e+e{~aFkOn=!nAF}74WxvDH!WFY$p6;%17+{X<4*zNgK0R7esd2xP}<2JhPCo47!Yo` zo^P>P@1&Od=8UdAf-E!>3{#%$A%_YTxc7RdSZFmY?uw?2>Z3XzkRTO(6n2st#t`R) z%x2N^elL@(t{T_tazVpk8SZTPl@P(bJ2<}#TtZAUO8djD7t4bai80o#naI~_LX)6* zDQwA(lu#mP+I@gXp#cOP6K*x3UCELOVhH{AX7TFmQjkqeKRyPyvDw+sP-U+9#e1OL z01qAWH8eGaor_mk5nBO|su9WNPhNpY4DNcs?jf8peP;k2fyhysTkkoB=Z~~>W5je% zlJe92ZOE6R2}4a5d;u~>EAdB2(NP(-7hc0D@n@UYAEKf7(HtE`GSc7l_Wk>VjIk(B zxIrZ}KrfP-naR zox`sJCfX702JipHQqNF4W`h`Ey6frdTWv9H*KR8nRV^dO zDh!fFN5I#MDJj}i7;#g@gpKxR&SVmc?1c*dZc9>&9*5^}Iqxlcjr{KiH>0j7QgqE- zvUTg$;ZPc=Jk7PWg2yU_$MtnXz#CB71Lf&dg&|;1)nQO;c^j7+ z?C*8D8Ax!FJn_WgYlFT+J_-eKkJ+ve)EceLEB9G|eK3K}wj;}Q@ViHdA;KZFzO)c5 zlA2Lv-fVtUkxzoT%u&PDMo~?BFC`~vvb)}PH{hLs@DWciSBk*FI6n>kO$A8avU^}ms%-DA zeu#gmF%+LW9?-d=>f(sT81RQVuP7KStGVm9lX}t@}b= z43MOs9gJ@UYFEW-$}}$wP;|z;c<>;4%RGz-DCL_nd>VEEs90#H$mH*&?;$mg}=mcUKIedKnRL|9@5oEem0!5W~8beK$d6*g%}uTO^?5MMY*xC0(JY zu{E{8kL%JLmD{spU=?YYT&Av~>=W|LjZ;G0X{-`eHi_FT@>-k2bfpHCo=TINQsMp& z-BV}C>srOOX|uu=%IJFf;?>(b2@QhVMc?+DzmcrjfN|I40yke|oj%jpe@I?A5-@%5 zJnb^*YYoF-N2_nFAFDi?p~!=nV@XNLTmM}x%Y+rbi>*6l6L~gP4W_RLHT3u?t*+tP zYFd_-uz@_I+J`lAy-OZxDur)j%kWVj1Q4yJEw<$|4j+?>++S8&T0#<^`-c`VjDd`{ z$3j48qf_+n@Z`t~Xrv5v1{#5MUD70+F?rvkl&t;Snim=c*hWBomlQf?Oyw+08-X62 zatw&IBuZNw%g#Nh36HZ*Zy>f0qbGg=L&JXUjnU?aeK*fC@s|x#$4tO4AnHKAymXUkZU2T&nu5qC%UhN`qGT7_r2sA%R>d<^y$w)n89N> zdIm9qlr1O{WlsSZm3{c&{O1(Om<&AJV;%f0iDQ0x<8P6=vaQW*4OREV?A)o$Z~9pe zEuiqza)7&QPFb~C6D-Tb79fSt^K%1MCgLj@Sj3?*=3F#NY=Xx!*?fdXv%2TaF>ai} zE-;;SQMmaznIT1K3~sY`ubJ6=wW|rokLQ*R;Q2}2NQXJ8SNA6aIcF-5u#6L`Tve=x zt)t2)J#+PTwYa)rb6xXB9kXU{iB@1E;w#gNqRI5KXV^Uh`<`nkUBJkNpsX_#!s_jq zfsYCb;_np#2qH^5zGsB9a|m?@XiG-=Q4A+w!^J4c`Wpc_XPHwp0?s)vI7#!ziW$a} zqi^h?MM?ccun^w}!sidFn^VH{DW1f=5XrOQXV6a*2qL>*;esZn##MW1z&L_mYRHag z*t*h}G-7E(Mh20mXJ2PPyE1Bmpgj_rns(1v7f1^z9H!K-@D1_O_9`uR*6^Vl(a-xOkb&)fl z&Nb^7eu$E*4vFjAw4h zGHf77V_EqSS z>vQ;Az7%(#9x`XuVyjnZBZQmoYUsY~l z)pT)*!(xYp3r}XwPLrYX{w*~2&MPzhiKAZG<8zulfN7F(jF>7q1{sO3ltVc51& zwDQvLIN3-e6?YDz3+=Gh1&Gk2T;IJga*hoB$#Ins^&2+=p`t#ugEo zHAKBE8K`nGgX8@_m}rcQICl;|l&+oItNlP0C|auHPL%TuchpW|nK|j`DZtEkiyDfRG~YAZwMT5f$M3(8Eq(0ftC2D*{J$lLRv2r-Z^PZZAuvnc<(Z0 zkRiQBbB3?E<7hTT&LEp%z^eAtO3#gQ~y zGC_^A5&Heu17_Rg_);HU#Wgl=h;!bLHV$Har{Eg#TR&hSvldZxFNmYa6SMl`9|u~#x{JB6#jpJ9e>8!I<$*V25@n}Dz`fEj7P3k|C<-Q7+^$2m6=!u0l}c#b?DPiK2C==dB_D% zW<}oR!gr+98FcczmGHN{$yB{eDQ%7~DMSJY!Cad)bX&yVm&$m=Qx>L3hv?mfJ3DN} zL8fAmK#&!b%@hj-*ziSi$M>s`hJXqkc20n|o8Y3+EO&nXx(G=*M;)*O+B|5@`+S$T%3vzE%9VP|8)yj^^%FVIwE-r7vE*m^^S3PmBv) z_VMHB6@Ps_rT6fGxVh2JxA_h`dK>{rgvN4VAoj)O*vk3yf*DCDnX~r)( zKQ`5037UF|`kg2{EYX<;gyi#V)He4P*E;6y?bnzuPkCYW)Qj;5jM?!^fC@J=)d&CpNtGf;+SFh@R4*8Zr+tVT z=x;oY7a&aGFj<**#<`E3MtKFD-zLsNVCX{= z&%qq>u5vVTj~LmaD1%PZiwk8s&|!~b+Hpqx?IA~ApMecU4359c+4FP%39D&eVDPLZ z)Uhq(e_Qgt`5m3{?fi2(-N;L6*{zm`UH6S_|1X2cFqiLT9xMz(s2m1;*{VSJ3;iy9 zU$#3PzPoJcQdx&1a&`qMrm0wX23|Bs>FVw-iw+nRnCMastyqyFj|T4ou#(EhLbc%( zm&~LGfl;yjz3hc0O<9M9hlP3MG}VsasDN#Tf1d#*N0tyTovwy1andpx8*#!}&E5`(-|x zktU3BDx49mSJ2zf?M@p#?B{nL3$FJeNbUD!5{DOqkW9Nssu0gr-;vO{fVm;RS0p}1 zbV;#t)fWjTa)64C^;mz1GGgwNf&y)HkIn^j=;{o8l*aKFCue8xpWZMGKWsGb(q^NE zVfyIv=gv8Teb8DYonok)$=OaiLFd|2;K;^3cAIXlKaTN*be}g4z|5dw8x)-1rs;_( zK;@>F+eR4mZpy>vd!2Fk$>wKdN%Rbic3;haI@p!8Feeq;I|is}!AVDsGy%#8%4d-Y zjv>2`cA;DnDn*{mKnOOLhs6z1kR81II{#_dYg1E^(|m2+=fF93@m_jk%&e_DON5S_HokJ9ZIxo*uF zpy6EmXmERbR-WFY&4?HFNv$~I5$>X|(N~!;t$x#$=^j^CNr6is8f&RDqo0vIMKKoy zU^srvBv&3ej!cCEp0u~)h#wCoJa~t2chl1A{C7h))mC+7 z8BM?sqU$?BR_8q*J~UH^5SZ%)rwK+6fkk@58?G?9_~)N!gU!WDYd=dS;~?>!mxF<1 z;vc6smoHt?R;4OZM-?3ZQOn2S!(r7-|JpVLa9Q0HdV|@dNZxAqLE^#?%FeehlzEX7 z#Zizt!lG#O9Q@akt!eA`^Ni@nhx@Z2U7bhzGnd!piJkTQt1oW&`xE1vK$!JZDCX6F z{rL2sJXRfSGzI-ivmM?21yJKC-UR7AKkVa!Ej|4sDUX6**GmV>n57Rz+y zgpl@hrf?l~LjF-5pn_88wE*cr0w z$k^6)cYStYp)M*^oq={)r7Dzk^e7;f!Mi;{SJ<-|arqHb_)<{fv)?!CUiOVY%p`B zs=J6b%1TRzoPG=SLReUqQ&=X-75)7BZmZw(;G!-=1~*KCdY=gt6p;P}L+^Q$pT{!R z)L`M^N@JQ3x|tsR+)fP3Fh4C*#(M_#oZ{v-6up?Q-x(?-b4qaiVR$DrW3t1L!bGdO zwss>>`@W!j$8hDV{tee1`UeAc3VHVyWB}=z5AGx3O3m`~3iY7al3#62*x4I&W^u(Z zA`LmL8MW_ke{PID=s?Hae!!Hm+E;)a`1YOJr&Is*Y6jG5B;hND|H#sa=0<@;PhkLQ z7)a+L-sJ~>F@ZCjsirb^^5i}9AAtRe4TG>jf8jb6L1430cHP$9-M(2HTRr3;uVjro zR9qRi-L#-SsIg~phRe%_{sx$9HZj~!&n_!x(rs9^2RO2IRg34lD9dw(Lk#Dis+rJ|~@Hgb4t1j$kIGW&vK{4**60I9qEL0UHQ(qwyv_u!B+?UjYZCj=}}`7y&C z9ShWASUyA_pjpqYJ3b8JLME+>HWJwyE=oHx+oqLbj{ppZ3AphZTU^h92rMf^;CycYq_@-_lFLG=0YTUM7MP`h*wnQ#^Xbk!n^ z1qA80n#zyLq^cYdIRG>cy4Ws{8t$OWlqt?p+O1l3wF^>Rm-OTg5u^hJfDxeE-;~zY zgpO~l*r1SNW--%@#FurIkc(#2vF?|sCw|= zJ>wo;TeDKWaQ#%sLm(oaar9i>@z2pVpc|n)*}tZ9cK6M>CCHRyBMtNH@)jc3!FD&; zC9}Ax)+Fc$G*XY6i!MhP1KH0p0_V2<{sTuffmxVg9v)+l#wp0s)3A9;*+25OWhn>% z?2~)vPOz*9y#y*PNk!)nh#PlQW3|J^CNs9y*Z*21aiP%>GRhT8$!7%JbYK7=_p$9* z_%T`#qvtIRbr$D2Y`dJ)>~wOn(CTAd3MP&^zQf#2w@O5lqR?AOPENRd`Deq4>n%UT zK+VSUAb`M>+}DS+$E4kkVP4CAUAhP+bZ931nCPFvgCR_La*S-MV zBYf(4Y`(t14gbN%ZsN`owR`tSm)#T}98_HgLcvIf^FozH;429`PkuKv!r1{ZliXf) zGgA*wx8R&G@ArK{YS2b>LP2RYiBOS4x#QOs0lg&)3yik#oXb&ndKbenF#MrjAY6B5 zs&-mL!7$jxwxKY(V6~*^OgmezV|*SY1*0{kDq3l*I}^^p9m)ZGb#rdN{|z9m&e1?n z-7|h&OiY6Q`UCG&vK{yHtsbFpF(7wP6#>r^mlmb=q;uq(-2=~5zUqA6vb<*fsR4Qq zAEkd5Eg2>0f`KHU&*bm*vpBY5d8Py=AjUm$KIA7{Y0G{k3=1elg1*+j;GKis z2ujAG3G*@t*|4aDZGZYTpiZu$stX+nJos_@4=`LcG2ZVJJ?(#U0gMAQ`;h$ru?`Pp z_`Ti3lC9C=u(CuP5JV#AjQBvP#!LDBL=Rvyz}Wxx)h2NCDpW; z;3(aet-t_AT^GK8_f9lUwu!`S+16kO9{gMI1KYnzmndnIyBF|VM8By&lVmuQO(HzD zBMS(U7aabogP?2L%V~HB%XEM~~kvBb|!FDVdJb(x%%&F)2><Y7zt)q7Q-mkt*4AtUx$8KG_4yBH3EX&{Q zsjLE0PM*{xz&-0Pj3NwQYMrzc%YSi{WEb&^6vk|HU1C4Jzy+QP$KUPu34#)Uf4=4^ z_j64}V%xcsyWXbU637R!Q<<@4^0UQZrQ{evY6kD<>mnw4jJX_X0RG1t=Ksz0!Z4;m zeaKOCJ!#JZ8Z{Q{OQS|iWNk+1za7`!qebK$<_LJGKj_57##8j!Cph}U`6_(`aFH^& z`6KR7$KnX8C=b!zUxEoW&fWOy5{_sfbaV9;4k>|^MY4ktrqej}nG8_uO@W;8O-Ed` zEe`oY3u={K-eBfgIPF~Q0pcZ=wrYDBJJP@gg8Yv>q{f)aA^*w$C|3SmZ!fgt&@y6V zqN~0?D(Ol+N(}KA`goFP!rbDHPixMCEdBu~Ux=KlE3@LdRa9ZBnwqm#-=(DkZ**=I zHX$f~qQ0J@XwH z;z4;VC`B;Avr$GzGv$n@ZzbLV?4#44Ls`7 zLT-=VN_hAT^$lRil-trZ>ryr?o6k6>jxo#&OMF-0XnSwog@T#(G0Z<}Lfnfjvb6)5 zP4>jz#L*6JKd>*6;73jvbkH|i@v53yn5qRzz?-|K!sSWDv~3Fq#X41LGc0%Ps;`pJ zMJft= zPAgbF?DtyQ6_M0jKLaodHB?yT;meAvF6U7{;!`>5Y%DypW+y+UY!&rR|y-<&^f zb9RB}J|2@6E8&fQ%J>mBAvx?)189OcoX_sBXJ~j!0j0tYoulIjAB;U*5(a;+0!+NN zW+RB9l~(^HFI8ok)`^p_+<=nnzA;XhL_qzeEfZ0FzV@>9bRZkh;PzfCB{Gj@)!#4> z$|@>`s@-S8oB}7`_8gC@oCQC>S99$waNfP9clr?Xi4l5dq>+5t{T#M6V00o2fP3<%HLleJpEoaZ4@BNMiuG%CFpMs}+9td?k1%|O3DNh;N zdWkh*1-tt}D%N3zl?r;3N8_< o8%?WVX$cj3x!F=TC z9X23XI*7~e@^=$ds+h^hEMtVsuXx}4CoOK7HMK?PXvHep{|+E`L+Zf(FGAP!ucu|8 zM%QJe9r*7(%l&25kT zT)*K@$(oPn9+ynkGDvXk)4h8$!gRC+Ml|E@EMyb~$qg`av&~#4ha{vyk8#`?f_6Pb z8a`PfY%{4Hyj>pFs{v1m9n7sXnCZRxbs{Ng2!24}Mi77Dx1CYzZf2$e$oKV(D6Ael zFr8txYe1NieXr}`OBw6W6-PIH&Cu@5x1C%2FJ6w z8O$x(LT*ut7ZMy)r+x9H@627A1<}b3NSM$NA+H=%rjt^&;44)gz#^Ju7FnW>tl^w)R zOo&FX5wl$2n$1&bIgu zTFPO={ssG8nANEvuR6V}d}YXnH*~mlr2k4iigBTpv}(~fJgd?A>T+eC$?!*||D6cz z>|00j&*JsnN^I(4R#L^l6e}yo?;R~HwD`B3qOdF;PH>L#JwD}@CC^ zfk3TW?v5v-Y`>&)>|JK>^?*zB+{J7f=b5O`yLovz@D-AA5NZ39h;9GB*n0E0p7Zzr zxAK;vv{?#~HX&4u7HbqmjKWM=lgN@iWKWA0S(2s5Qr1ReitM3HLMnS#fcBiD-qsI+!`o-Ba^?z?9xEnzfx2=mPP2*m( z^D(A))AgG-PsIo$QJj)YZi?B+!Gk$PEG+-SsacwfYSVuG5W?!~>!+qI=H26|5HrgM zKBVa?KLBBx9Z~M@E^a*1bl&{=8!$Da$`c8ng!8P2lmocmbk^5`2aHFB-hxF%Dg|B> zC+6^n?k94pAP2O--A&_I(j2n}03uDamKtR;0)_07?*LQ2j4r8uMOIAj4INAfcylYB_HlKgFx&~miJv~hS z<%sUWzz5VL3DXP*^`1bx1dY)l@s%x|;N>-qh9>39CeQ|c#b_%}Xecn=k7LxcploU> zvgEgNj)<)BRc+|jvu8{0fT@9Tw=m;K*0mUjKA16B?DcxGESUai!#{ZJbaeU)av$Ao z{5v3AtQ&Xih&kNN(lVEnf8A#Q`WS+K!k~9AUO4r2^Y7J4J<7YQh>s-;yS5&$m-UgD zB5MK^pBWu7lry$?2L~6|kJ9etthXnn+}RHRkVxO4d+O4)P3@ZjkM8TiFUcai-P6+I zoOAz$7$Q=m+);GpVieG5(r&RL>ohE}!WpN}o~;_!>UnzvEfMSl2zl}=(h>`|9FZO>Aw-ll9=qXqOTY^MraBqV>2p-I(bRI_DKX2SI>QEhj zLCr0~U?ARG^S+Tqklr2Y)ECtxKKCw*6Z)^v#c9qhnqUFpEH`%m*g7>w#x4MuJ?amPfiyJbH9^j?T+1`}cUzbi3Ye{P*EhQHoHa z4*kstXoY&W|2=jWW&$f*_hDA950Y^>r?t_j4x2&JsKE_&yH8oC+FbTVA&hx8gXu6X z^P;ZnYjYott9rKZlFd1q8NI9lO^1#k9-_Q=ZQWX{ z`U#4WGEA`={{m*C4lrST&kRGx1NZ@?^|_2UAM&!#DZ(p)d^7Dj3rNWa7_kSZyCx9wk3|Si=I_IV@dxfvr(52n)=J>un&Opl;iw81o43VDp$+S-|~HB{J)`miWBmukQYt%XhZ^0)9%c;6uh#^p9q4uF>p z{rOf2jN~nJU)zeM_@KE8EF_hRg2LProPemUlR>1b9du+43;#y>_}-<;A9|6qkWDQa z6C0bOnn`>q=b|-MFIOmWd1deJaaTn5vJGJUQmp^Z1_I#A)(6JR6&D}gzdz%xPV4pV z2Zv=nc(Ajo8y+_l6-h7U%Rc?yDWY}10~UHfv^RJG`E z&GsAE?lX-A59!vO2kFIHfu~B*dbuBmwEoNE1-c&XW!mJ!G0$S8UebrRK93llc1N#m z+i*JQ>W*M{LUpnEz=#}AhYb`(`|mU7D0}_-dfZoN5)5nhDfXm}U$2R8XAItAp`ER* zb?6fNHvtLQWn}uF{l<+OADP6;3LaslrCmC5M3r)`es<2n1;a5uGP=my%m+Z!%N{Sl#X;f>l)veysh z8j7G5ubS?#?Bf7>lTDrDIYpf}hPj&pO-1Y3ZauXN3Nr=pT0?=1 z(E*Fi1%*)4)O;DSpNA-@d9zuJ2S@L)fD)Xg(fF1z(ha$|qTF5=aMX1YG zU57$|*b{mp7Ul1Cn&*$u0JP2iXgY8@g>=AX5C~Cp8`zdH^HKFhy$_g48i#k0GKxMM zS={LhZ)HVK?DRLws2Dz|TL$K@qPMUrlVt?_g_Uahbi|;#?hEG8{s1y^1w)lDLCxnt z-&=krJsqWI%!hK$uZR;27ow0D?u$c~Hy>0I;K&Wkwo*@>s(7Y%s~zts$<+JC_O7Bm z8hMVVBvVuU-pn@eTy@`lm%#`xHD1$z0`FyQpG50EwAM?++X;Z&Z1C%%jk=PK!5V<@ zb;s4GG_Cj2vPf77yN2(9hmI!g!eF>Q9E(GZB6fh;sNVt8H2b%Y*>HozTh{CR>%{>T z5e685niH-~;%}g`i=Z_fq}ku$dVYQ;-zYvn6jqcR8I=@+Y{G88eGZ70uwlK|!pA}_ zK0Ubn`SbPETDom0(9?CM^#ajj2T|28aa=++lbIwULo?g38VR@|!>J=$OBOB4g52Eo z^vCMzEMV2T?__e3-u?I@l{?KNh0LQ_gh{*@3;p^rr4YZwld4R`3li*OuP1;EpdXJ9 zWtvdNr?k8Vq9BC1ntw&pC{8-Wy}v%PqkQZm*%c{P_29He8%|ufFu|v5B$E?>b+y7h zyLUHCcY=HO?3Es5vY*CTfx4rTS?alb`EYuEgMO4Rn*l{fxOlFWJP5E*eT}I-(>U*J zPj~nA(;FLayocaGacR=I^ZJJ!^z@qWIrD4*AYr24^#f8K!W3KGf8oN^Tn}NI)U#eu zL~?uD;A1PQyS~|)SkH4X6~C5WiTwHuyicQJ|F|Gg4&k2G{l}(R32g>jI5@PEA8Vth ztsTS|Uau{UuvmY}Oa36N90ZGQmyv9W>!)^To3?E$c)HPxK<~u9+Co!Hi#wxw^DC(| zi?&6-JtGWRA@SxGg@^RZ0(twTv9kf1V&e898WRe_==B(WNjQq!3!%mzGo@;F&^h|F z`7EogU|vMFzE4AyUIYX)vQIGUM#}johO`5Qw*f!~hjC8Ufu6+B=*Gdq-qVGXcw>CzcvxL_fS^Oknz2H0V?&~FDT&y9NYxD_Pi4@_uAsHL*t zKx~6;f`D6{$T(pbMCck zy~l|EDKXi1qjbA;U}9{qJ;K%15%>SUzjEtdBor~=6Hh;=4=>lZx*k+43*%bjqB%`3 zhDa?N3y!9NScrZ!^BT}88C^Dl#f=y}`bfigjjdYL$i)nea^NDUdBAK-oBy}Nz-E|1 zg@%S|_Yqv0!a?Bxfl?o)`KHNK(13uxVuKbzVej&)ig>r%~eMSod-!yf})-ETfph4(O(I_(>V4WjD;+XA_8*doncyr4yK7mmjndJ z{_FTih}c;)0fpnNng^@v{e3vQ^zhN6j8dY$EXNKAua4-=9LkY0NOKJ2RAR^8D=jPe z{CNXZlyD%>42dA=K>53On&hJ35s6(}8?hm>DV3ojT9(Md1PQqT5-lz;Z`u$H#H0_< zq%dvi;>E!v$tG(tjgV!w&3WoA8)_@G-V~ykk@Brdgo-TsvPl)j4$1J|2j*f~EHV62 zBh?7y%Vcqdkp+z65}>jlSV=DGdn_jbh88^?4sKXF?HMc~L@UbNgwAEKoDBeX)FPAj(S12l(x3C&k`Sbt#!_Tjvj$Y9 z0!&v1_0+v)F86NV&Z3w+KS|imgTcCH^-6MgFVmrD04dV8q5@9J$VjOAh@(uc^dzd* zZztFO3y>f-E!L0lN1$<$Kz_XwSRakS?IUd26)xs)ve0uAUDv5DM=o75r1FpM0eA;B z%uIPP|NZ29@CIjAz{twqQ9fn$MB`V#BjHM8=>qk!lz6bOun{YJhS%*73S>wBnKR8R zc9#SQb>htmBBEKhaRAH^U41J)&`Bd_>~VWn=aQA~uJ#b0=07=?^-Qk15K(77!On^7 z(0>oH4Hm1Zxt``C@REpm+9~TIjZv%g%Zo^X$>YKnRqE@vO+O9RX}TjhVO*$Tm4`cZ5i# zZ_1(=6~K{!TX|I?MOKVuP$=}6N_jgX?-^GOf?xXjWkp3!V&kAD)A-%(A{mmvDX!BM zJsCZQ7&?rGH}zDOf8K0MQIbOgU0C@CFV~yHuUpZRRAOe{Nv02u0xl5MUQYOn7x$vF z(kAdI;Q%g$OlbQ0pznou*Df10#iEH&Kso+z-@PkkG&^$xU7$@|JAM63h$P0R+}Jcb zQ`qiM^gn+kZCJ0aUD!7uqGg5LCUt8ot02m3LwDgh&zTcAixtr!;0!UXp+cnO9!UW~ zsK|}fbKCx(Aiuk{%^OgJ{{4m7htD+7nuQJ^K{#FeO*v3P+@4;|`Yp1g=L!6Eb!5x@ zD*oSy(y7$5fnT;T}2qc|#^8Ty*>fRZ}|dy5>+qA&7quS$VM?%9x9}P1{S; z=1>EXMJ)88AGvMkJS{|^b9oOKbUCvy|3bZoz1~lpJb7=>2-6JY9SS9qG#5w~k)$D@ z5MvRdpik-#teYiDmu4bLDk(EoiJ|p^1(apvJ#E^0`hYvbabj^vim4=63yc)I5A%?d zV}lcItgO6)SR9f?P*V302_p?fHtx7(b>K7Hfni)*(y++S;OXjpY^2Zw<}+<#aj36x zsZe%6U%U6B4|PTrG;c!CIOP->zl&uRV3lPxdk|a|WekxVfBkh!RW4#u9BzFd+4`3R z2}-lTC%vplGYmY_O#QPMOb%SLJ90Ten2ElKm!>HfYMb03a0MZenm&jSaH=t9y|(ooBwOs;+_tFS2dGZt7!Or3OaYwyT&y>8S{n^u+TPET~Y zpN(u=u{o|)F_!1S@1(6d1Nx#$I(38`akzD`rC!Pzi~TLG$O38j37^k*?bgj>ewX@? zk2-_4jAwVb-)dd;kceJBBZA|;c)%ydH^XWe!PStObXw~(h#V@?F$mg%z2!p1UL(_| z7+})1-7)7=$%5YB;$+?nivB1S=uTvPZ_|(}>MnqTC&efVg$Of9JU^99UJM*y8fA@M zb=&h_+J;A~nFq2TxX$0XckI^tY*R2YH4Tk~8_;Ibc{|KV3b^`HvIwpGk#{iq(vx^$m4)8{Z z%XwI!L=9@F4Jn#|u0>5tt13bRX|9}^c0BFZxl)PJ*MViaYa8O&0FE5|CrsOxmwo{( z5r_^;pUcOI+>5U6owZh$DDw4BpApVuP0gW}$LPPsLp%8oKBSz5^I|>2|12-<+O>@K$Y112JAi%Dy=wk7)_!BSUd5S-m{|JzqG&r`#|OJ=&m|Tx)95l>2HS5ZDoMVfS}ZM4U7Y| zkT?V@bmDS<0ZkC+`gOhg^w~c2IC>DAaW=A4*7~}2pFU1}R`pQPP79!N=FHA^pMk~< zX%G$;E(FXG^`~AjxJVB66Eo<{7et;;qQ8QOY3#z_Nv`6vORyO5IW5}uyw{a_adouz zV*E4|x)ryp=sS$P99$0@roVf2rOWFf%dfz;75KSyWBhevFsUj!$dwP^uy`m4VJ!RuQE1X#EvE5S%-~ugPj8t$)N= z;!QC>Rg0k}e8q(iUh|uU)4sp-%)ItY?Mi}7(AV?8M6(3|&5?fkOXwaj{X_#s1h=`ZZF#)JTU9 z0m*C{hFe;wG(@q{oD5|v-bs^iXB8E)Zd($(tg~!)ms$G4j(;fy!XXxUeq2}`k=^_| z*m(#*#(r3$|JduYqC}6Gyy)PbNSx<6RR(<(IFxy>rL}CXqjY<6aUk9lR9=a+*KQNm z0%LxeD&!jeo9N?;P7qP>CQ(bkBuHTLyx=Ph-&)Xf9Ma+$_mZ~k(oufuo@C^lVXPsN zzlc`H+*|l%vO+~?GzhK`Puj1guanG>w}*2tMhN~bMLi{30|JFAOycQVDufau@%E8w z1$vB^J3V;%bTc(T?`zw$S2bIC2O(M%;#M zSW2;gY(o3V{sF7n&}qn~s2?G({?)D;oS&jF5L0$(rz3v@HUeTzUWH?!hHOdv-jhwA zveo!tI-Y;=w}*V_6=hU=$!VYmn0dFx?5)p#j(=(<5aP_suGs(-vgI3lCln?p_dQ9is7^ zK7Jvt{tmmDps{S*+2_MqCWayJK$akTW$kLmpo=8cF_TkGi=P@Zg7!(+zOP{Jw1w@I z!$U?`hDwy_%4=~shp&6xyGwAS_X5;@(#g(Q$nz1^bF=;oy8+*-t0<0I0!FD6@ON`v zO?bd7yAjf5)$Z5R#Cd2j#W4|;O|#0x^EDsS{wW1rTa_-P3PuRFBJhlVsS1^vN)@?V zYU@_ruU)%#5W3LY2iWL{&32V=y#=x1X8CvPZfWTRQe?Qr&a!+mA|6JWp`{A|&&kW* zJ1I|6gu)qX)uxRxMD+0YTr9gq<%<0yfwp<{Zth?Civ$UOGg&dxtV1nttr|g_>Ip*b zK_cwE&QYat<*P_K{*TXW`T}im1+#$)^fA5)P>*QJU)?^)-IQ@t@%DR79U=|ALmmGh zyqc9ydEBBEb<1$t_VM%Y=%SPpi+i=0`IH;Q(hb;aMn-s24uUYJcE&kmcc3lJCCsYn z+NfTW3Qt|QuzOE>uDxdS=Gmx;)YWkL6F2EMlMo`fWA6?(#U|e8U}4eQf1u#5_#4z5 z5a-|~YCAok8r_`F01pMF;Q$@t!++6$k(p}C4i?sCEX<7hD$Yf8Mk0~Y>;PJ{o<^$4 z{l<6CxG_YT%*-0{7c-|#U%EL=4O^o=tmawcnuhxL>-FOFO*5jahCLZZycMN--v%bC zH!-rdymIoSF8FnNHQ7a+S?*c%v$ucFG1a&%6EZ+u^~Ja(lonX5J(7~*=JVw}(qb#R zvEQD`uo+7l*FH^IqkLySrtscK%Ilouk@Jvt4e>xik*q`UeSHeHP#izu&-(k!2Xqtna|cy){YMTbTi{p*uCr z1}uTnO85KC$JAp;4o8fn0CFoR71 zPQlY!{rwL~ZuPRHp%ZIksf#N}t8Rx-x5>s6-8O(iLN$H%FOOgm2kBd^OdM=&m&}Rs z?$Pw%Iff%WlXTpc!-Wl;2kkq0P5A|(G646S^cYK#*r0aXWD$2-*O3|$erx;g>tTEd z-RWWp7BrqaTBR*R7AHH&BID?g4pR1;ZV_9oh;EnH&R3mJA15Ot!$8;;)2A>ZqyuEk z=K2gLp?Si1_AmSc%QjqhfamLa(#PMp+6$t!_TV zBmXgWk%=k&Pt8SeB&zS%cKA$L!#`-)1lNm-z$&o0^2u}97*2&IbIK`}%>t0pRqqld zM21qjjN}dDw8UT{L_D5)7NwJhn^Z|pxB*QF%6&%(iYt~ioYECm?D%Lxt+n$cmr)Hc zY)S=*97`G3p=4=+X`pu4MLz<+;});(DP%EdwR%B7k^+Yx79$-Pj@9QQtEVJQ;x`K~ z>~i-kizp+Uhcu{(RTjW`=Cq0vRlrQa-$9=27p^PMjRd_mvzt3_p0k%%ywmv#EFk=S zz1UzP5N$URp*`-hc77Xe#}E9`1P>$KWPrEloMpvSd{0DWq4k*DAnTs?g$N<2xO>By zb9v;M1AO)ErXny#z&PD&g#T9^-5n%?`QbX7MklTAwYw^`cA`=|e@1nS+z*1nPrBVM z0x;HSZt+eWGytl)cnY=`U)mcQ8j84qB%$dGsR4rGHmCEb)pbeX{%#jwt`Xyh4X!|HYEEkIKChbeZDGsFyR z?=H`@E`N03@k{-!$r8g(pe7xzV7R7W1gBa{vpCW%OZ&h$(`Du7$D@1g1ZL-rb}Zl< zDD#iMwjJ5u2(-QV@TbPdMGfBvr43`lWO&(XjPT+HtsAKPHBBXS&W$`mz?nK zE7&=H0p^Ef97~#7Go0_wPu16NL@0=}S$shroxcFJcnkzr9YQ_8&zkYEr-el(v&GV5 zeGsD|qSDsq25WNk4=OsNVMbtHa~ID3wA1vnS~M^<;ZU*$;}}C#=Z;9LOjs4K-n`L` zh%oDFY{bP7;gqQ>Bs6g$pW5@xw}>mUKLdD0B_9}RI8j|=L zUbtwo27|XMDC|?r$;m#5gZkg3+m$^@#h7(j3tYbMrtB+*&*-rUlpqOc*w=$^OKWyL zCGyBb`lzR0@p-YLoLoDM5B^b1NP^U@-?PXOK`#Tx3JPn{9EQeI_0OukNS17 zOm1l2-SnY=M8tyQYyen`9o5z&19p_a7rJK{Xb3g=`uE_bEoAOC;x|Qd7gIOf_A>!+ z>Q0axWX-$!ArLOC5)2v{63XKHN9o9Mh|Z=F36vRS;6l1NHIRuTG>M|JkuvgqJw0Nz zy}S*|blRCvQiK_eOyuq#D&Isr;_|_|o1lbyKH79cfl7ril-qDnkOt zyNE}x(ovU|n9@Aa7CE${6rmsb{2fG1$X>Kt8kW%BI#dZ!o1I=t9O1)1X}K^;EN3)T zRW+u(+|(29&LLB00vzAC}$4n{gWm3KA zi8|DwK!6(+V?>s)J{{2@9!WPckFx)fO$$2#_HS|HhK%x|f`!ujk=Wqy4mP_$k{L$` zBC^Hw7J_4f3I?4<7>ormhK?E83>T6Yj$0W_TYS0jCDz*K@2RsE^<-G$wyDo;KzDC>mIz-ZWqtQT>l+p1@R#V*;yDp9&(b9 zfw(hoRHf0FmZN6#!OQ}t zNQbTu&n}p1XJ=3`4-5&y$a2LRSCWqocb`# zqKY@$)jWXwq1hXk-w!hnDbz7LIToL=eh`NOtF#- zrji^p@h@mcoV!PF8TQJ0)HiBZKWTMdf~jsMM|s73re6pVhk5t{Qt9r_V}GG z1O-Hmp^xo1;Phj?;V?Py3l{FX$sMwGxsWcbbh?0Im!4&B0PGT5zu;38p0%~L#T|)O z?h_}D95jGtk@Fs_q*Jtx9ih6%{rMJk{#P?bFZzw~v{-UCnhbjod8CRNm*}jclYLsA zjW9&3o<&2hUddxFe5pF5YCc#3{&wG7X~uf4&^aE6N+@tD-3iMLs{F{@;U}{QOBGaT zLP9yclbJu?(%aY;_cd`9-aiPmtpMDKlu)zrN@GL6pBB9$*Tl5OzWwQpj1GVqM>Uax z8-p$0apXJDsM*wE9HDc~O+*f`oM}m;6FEHBKn_15ANC37D{t=B%0$zI6}hu|fXgs6 z@z_vCQw*W#Q-@2izcR-JYUCv5jeM{uQQr`5(*{I-FCkhH0C0A&sqt@h7G8KD_Q>! z>KCV1+)F8|4NIjodN_66bEW|I1|ec-!EKptE;J-MZ5tclt>*A$O>{Eiy$RMbKF&0PxH@t4eZ?g8zR8kG1-dJ%@V)?>!fe>Y$!;nY|6eidMlnXdBhikkUw^}VIg$4OPe*Acj zrO^l5d5=PYYX|P)0;f#5w7X~?wHEqVpUl6GACJ}lIA!Y8Ez}dLd0_ei2N;bf+JlQw zjvlW3KpYZfjS4#jqWN7jOQ_~`+wX3utLh{i5ic>5n^~CPYPAzbB>QzxEJSiEMQCt8 zWyRVDbNgZq3kcg^TV(4%;I5xn7v2LUqOKb9t$0t_%pqT1;16S7$em9#UA0l`t!oww*)nJpR?UYEp(u5kM z^F3UC{k6-fZheQS@8Q(tA+f8SPp#1sA2hdkn=niK*BqWad6HjjXK%kK;v%zw|F(Q& z4?8L&MvT`UodY&F$bdn!J!#FvP6%7f6&d4pw*C_*QrTXWlKYd(!|J}h6&#$8a8pkH z%iIaLpuFWv!U;1ocfXynu_4oPHY{pBj3KkyFf!|{QMM|Fx>;BZ9yMyyhi<$&m{Vf; zy!05Q`cPa={H8|@v(%ZT=-t)-Q`Ng)2Q3Kswm_+-YQK*9O|FIbe^83YEIo7TR1TGn zzBiZDvCTrNMp?~D7voYU1WUf~h(3Nl3RbK4t*qMQLwfYRdi9scRk|0d!1dCq-3w;4 zdK(qz+j;je8O`$e9=+oF%C1$Lf9NiBd;21)#%FZcl??dCg0F?* zk~TTm*?ChG`Az@}!@l6^2hu?*i>Aacz^XY5G&V9{53xq6I={2tvbRxO!UU22Ak|?KD5Lttj z*hrP2G7;55%|8!+^{FnT!b!fgmXy5CKjAxethB{%OR_dhhuuKD%0{~Kp)WWK@0Wi~ zn#v|xH9F47dB6-&umF2sj(E2v`*QA=?lD#}t2wb2SdAZ(!(tP=sv00SqR`PnHwp@J z`2;ME-nfn(S*h1wQSXUj!Z6-*WS3z|qB5rgrV>9oUC?`}st-8#H4Y92-6i{$&RB|& z8w-`YfiNOOcD8vek4+2kadscxIaszM?S)nl+*N}ZMwPN!2K+#=nw+o!<-(?~wCaIu z#lEs1^nhV%<_6htN||jH+j`dbA!5x-FD>jpu#FDb#OoHBwrKLK$ZHt4x}Q z2);|-ra-CQ1bg2Lx!rzue_F&IC5!P?>+KkJc0MNdew<}f#Wx0YA+QtTKTyL+aTxCh z^QpZ;Z1Z>mjoy12aGx*-{&hrCY5W~AWJG=a>X{1(FFQST7>x zR;|WdzoGu~uWL7WuXV1h6|W$g-Svg&o~YmtZ@PH?ye!52Ae=K9E-VpY5`ul@V8#ie zDyy7MapPvH_~<%%bRNTOGn+u3^vSu92gR^$plxC0|8M~eZ<4~i*)#qfRB0f?@_b`D z-z55I|HZM)=ui~A8i@=(8kU&=S6>If2@m-2bZ-MJwbnrLxLoa{OYqqW^}fUK(kc z=kTLoQvH+3z5W;#{PzMD2{KUcJ_uyJ=CcEk8aa`Rd8mB zq}#&UjMR1t5jp@9<8(r>(Lx7?4km0hj;K1s`Qug=uVpq60(2ESSx@sfl}Weohor5`I0Bx+V&uEOZaf`B&tush zr~R>c_lk>yAma!vA;I3k!G~Mpzd`JE2sE<>`8aG;I*mH)99~K4EpL1GNDZIm-D=Nf zwvwGV<18!|B+v%?WyHH@&&cG5)P(1keWsL!KC{C!mko?pdxY{x z;r3)gq$zQeOa@}@?&CFta1Sth?MfZ*4U3OYUIe)I=uZ=@s{i@xN)q=m*9Ejt6#uSq z6dDy6N&EC)gB@Zi(j(f>z{1uK3cEIIbmTi$Pd0jSs{Oe z5!;N@WRLUBXp}Xw&1>D%`W}g{tsQUT`IC2_lfhZkVTyHFOsh9zO0SLzZgpGIVK$*A zxk&ci_}%*x;Vu}G?=xkwxfK7u0liqy3<;;}2M&Jqa9!IMY!koMYSOyt?{K zjsbX#>EPEyPm8#Lq&=|C0_z!Md?8#ld}iM2ukr}#&%c-Xjg`v4m`;?&dk%w$iyOQ8 zq6?8$JgDCbpSUu&CscWTYEOShE{#7#4K+C_QXpaU z35WL8%IFQ86<)IQkG&Kf40}ek7q}m=cJ@I)kWBtoyYx~>j?|{D-nB?I^+$};p0~ET zr?>-<^Y$MnhVo`&L@?NB;uKFv%G;nDjCW?eI1mRpqD729L6{Rrqq2SN_Dv zLs;wwC>;bjd?%|z0ET^IeMQd6l_Gu5sb@Y!XP!6MY-WArCohJfJ9|zZv92>yb|5U9 zw1Es95T*9Ko$>W^T(@ZzgKT@&+n&$jk#@D$*|fm8D>8>XuDj_2{f+L+v9&|hC>zdi z-UEAgWJLUf(z*A@n?8Pl+{b6AMr}_g80X`i=u&$OEraFM%L(i5V!%oU)FI%H+YF!~Mw7BjjHIbwFGH9S(>4318n z@9%%8e-)QjDE(3`YBR-|h|yR?kjM6OSM9$Wct_eFh&3POld*W|QfK%O;tl3ID#3nH zPnt$q{58jogC&a+=Q26!EA|t1mcBa1Yq7F0`sJ4$i<4m@x}^BDIo|W=s6E5%xA2WA zF1^Y%Tecj=-#2fZONgSg46P2auy7oKJ3z+MUt$s0BVvn9GX*#n1n$Uh30*tihchFL zKA8nH8Xqzn(%qnKTRDE0yEE_b^zvHt{)}fr=R=d3N#&7nW>4h~K}S6_fO+BK2k$sy zi$F!_8kD%grq&?eqM{4^$bUEo34F~2CdD!jo7Hb!!)lb0yZEoqK_KBim#H14gc5Tc-EI7L>F~F&qYGEamb#ZSlGLYUJmyYG zZCNFvb*`+b>hiZG>uHF$?cu2YJpcUrIyNBhEZr6tW}f*wXA$Pfub09>`dF`pnca{@ zi#er4*0|Y%Pf|tQTS`^^bCq;;Cw}^46_5g1Efwkb)o1v{h24lt2@ATZ&f-5R_-DmQ z>KKCfac@z+2?bO*|R-PTRAbR$i> z3>aWYzXT`aH0}tdUe&9zs%j`3FLoG!t-(9eezH+>tg-b}_T(Tam*0PJ zG`3OH>T7m#UUFx~!#kD`Uv&a>j*g^8lWQgw=W(Q?6MibDZZXO@aNAsUt>0%!^_*buRRb(cnY1r3$Mb2;hK9Mz`Hq>xmwz=ODwGTm1O?CQNN1 z@8d-si-4{Py`dF*QAP8FR*w8j4Ybz;u4_HY%4J8q3K8(sG=2a6JqszkhjiP$3PnxrB&XgxYc(8?MBLaH_qOv&d(t7F zaCj$MTxqeX{G_31)@(ZUtlLVb{b3&$5%P3*>=rXUDSH^!c+G%*#|N^mgwnTgQ3zab z0WLWYDius`7<9Wg{@`3u#HIPj_CpZ`R$X9f!)PgLyx6s|maFO0r%&CU<~lV~ewF*c zOxd^Tl5UroUdrft^iyHn!zFsPAM(@BsJ{CpPqvJgzq*iVCLs9?738szw04Qi{nmOF z+-Z|rKz+u5&@lyv^GL4Q*1ua%7+XDwijg7P5c3-5%_X$&(_MjW(VnWOG=ZG^8-yeMF~jqcZ{Pb<9n`n+dS4#7zGP}Jr=G55tr6}EsPnUS|Qf?omSpdgWv7-==>^VsEjk9N07gLmokA&|K zrom=6N~lKT?3bADeFoQ18=ThiHiZlN+P8gHF&X7+Tj8)db2CsbxLN0gzd_cafSIAm zbr9K?o&!uk;k+8gCUyT@HGUfXmET7|=022Vui}_GbDAPxs-JWdMgxt`^;$y+uVGqA409+ZH%M4&J*Ey1R^%iMl%{K7RBlaLkFRR|+>zcy21yIgF0mv){0t z4E*-;MN~xeJ;k1Yb+YVbW@OTnu9wA{HuxW&B=Sp-k{-eT@Q`XX7(u4ey@(zQ>Cw&vOcJsOd#d%e(!5mwv?)f& zBeAhzP^5BPTks^Q_O@POFB^o%#M&o>e9#wP2M`c}ydLz!MS)+a*U-^$Hs8&OPZmuI zqep9Mh@A;Mhb?e3(3@Kiri<*zsOhcgrs(MHp3~c-AOH;57fafeGwk?+xv>agdUrPe zO|2>!2SGl#N&?N+G}}t$6G-G;UVzW7?W|Bl1-hdixkabG`cds#SIOTu%kA=l1Zy z10UX{?%REIFJ#wQ3pi5#3c(wB9^&K-YSQxbZTwk)>FL1wmS@G~1T;KjCecIC zp~#q7C$%%*5}A=Y@AR%D@fh@PT&~wdfV=N^Zr_$2n?K$&u3H3b8Z251p+T1x(#wNW z*R&ONB@@T>XTj7_^hr8~!0~J>fY#KKQK`%+6$$aWosA+Lq_puJocK(yC z`X3kIE40DOZMgQQ7|u^Q@VJ|&Bw0>YaIf3H^y>B-aLWcbR>*wwUy)T{wL~5+;$gd{ z-XZYsv`~Hq7DioL`3q^T*9Gw??1TNr0U=cOyoued`7Fa>p)qRUwlGBf%)&& zPlt~ie0Ak$m)XDH5MzrE8s!}IME}AP`HX_~i{d~StBP{X~ zoSmG;>>fK|LO12SQ2#M~6nIg1KT4k+okoQtxzlwP8|Kwu8AX34Oo=gtfj`A1qcm8W zI~p=!o)O9YCw)dhjB;m|ACV-sB%#ah-@@)eW{~Yk{$pAjzRlhe9v1c-b=dYfLJNeo zBHd3-C2v^#m|ZSmvV}HG-9t2SbbBY&UsD9}V=C5c(zhPsbMP<}r{gQ&zsR+w2=crw z;huBjDyA&isHzxO`H#3rngf^7|IGV?DXDQolZmM?2XyW6D)Ih&R{y$%ud)#CfGru# z=I(DhclcHNHF&|uq>~B7|7}3<|8D&on+193SaF#^&AMCXi4PRGb1?Xu<3XfyGmG|( z$~=mbhJmkG{Ap>0GkqOy31nu7D6I0^Er=d21@RDH)TJai=}R>WSYqG5AGLnt#!J1{ z{IjC*azn?8MG=1$`?`#?h_XoZ?bgTD!(hZw$F5BtbmO;# zI9)HW-rhCG>ubpOm4Du>`ue@e`|rvRmv#*JKuX%0%dzN_XmJA$e;vxF8f$gx?%KRS;rEMvI&NCNB9T`t?DPv@pTD(nDsQx==nF;2gx`4(Z4mZLOTWTaJtO^0NFUc3}z0v$m zwksvA_%8RaAkbvdy3Z;O2?;SUN2Xbu= zMZe2l8M0bqy0R||J&Nr7nw`fhN=v*9cayi0v*?1HX>jq%!o4?7l>?5_nuzJQqJm~x z7JQzd!Z&~-w%wGzM1wB7&nuiY;#iF`R)FwfKm_5sY$?$tjMw*n-a(ezzYzy3uL@ffn`9DDAGzk2wu?2TdouozkC)h7nu!m%XD_+*2v|6UQ?uto~CQ z$5!tsSz_)eclYXbr?tU`WS>ftViQCe7$S-K3%y8bmq08YQDa;s zd)l2C4;u|eUgy5psmSsKczG_lD^{(N)n;FFDV3yJ%$h|RbpR)-0UvNC5;};Fx}%2L z#LfYw#%1}pZkc(bj}=l!^5Gvv3>Jn>=8yj(sFr3v)vtIVsn&AVA%=$&el*{bwb;8i zwe<2Q{oL^^Z}$3}@5T#P|uK2Sn_WRs*2 z$5v+WU6NR0kzK}*(=c?{?%Xjc&{e-T?Vzm9a?6ul(knjss_da-lC|1zt*j`eh4V7% zHSX~;|NHB&qDz2fodY+rl^Kx*!BKzO0OW`q*CTz;AmoDV{iFpwpTMVW3ozAUc3|mm z4{puxJ+`VpAe-#0R9(yi&6lWe~9vx83U&Y1BtFhZ2DHK9&(E#${Ve&^fAm&t*Rw z*TZJ7R5L>HHkgW7y*wRUkpwux?Ps}wM&cj@on#b*-EW`$Lhu!$;Hg2B)_Kf+W}m|3 zJc5K}WwhfzR=8shH|7B7ll!efsB~1+xJj~1*n1rXhiC2d}cKIUS zvn4D9%kuh9n_jn%_qm?gx7Q8iS7?5$D$JTMce1kJOu&QjA#_c;2*FY~udAldo!f$F zI@3r7mN;Fny@c}~bLZcERzADZ*l=_3J0gJyuy!uv1L*3d0LaSzww9OJ*)f>!M)s+F z%2U#9AR*ufp}zVrx*^g2^+rY~z9e&ovAr#X7^qiYRaGU@gL^XvvWNod)bqXPSAFe? zMcpQTRqq_?0GSl)4xNKZO@UH6)(F7y&mW4{r|8C_!*Fofc!T$?Nn6kU9BXY9bVH&1 zHtFGhz~b%Y;{3ln(}GVM5xP@eLwCYns149;$*5y;Mz8~THxsOspmDAzeec`tI8I4W zC%|uWFT8#|3$QX?{Q@RzbeM~f*wznEcdL7M_L;cCV7|K~hg&Hw$88o+MKVF6P--%^ zXK>fn?w84rT*z0G8evbXkGLYbxG`glT5z&ETaU0rnSupQh6eH!F-gIke^v@yhZxo> zxrkk`?x^zuJra5yK2q{HPE=}*s6+O~{R8cJJQ!FgJ zDgE3k1T{7+P5$;`1a?T8YeIvlZBw1QewZIICJI^r<^C}&e{1t z&4^d>;Z{kE!~Qn@#y%)4Ld`icz|vAf{39(|&l{QRw=5u_4BLT>RHnAVd@^gRI->q7 z!YwfkPI^y!C`J>+^nh^_dOv$s_&;2L2@^2LIs9ZghIMKh8i&?1AaR0}<7KS8IZRJy zd`Nk{V8k?ACU%06LKu#p2B}esI+JfWL5fy_Bz<^EtFe7^&4AlYd5VD@#UF6)+>us9 z`<@7TPFWphTN5^uNxf+cuM!IK9eS|ZYhDP$)->F4@MQmF$OKCWqQKoYW$7e5ZyN!T zxjFm(f(;`U3wdDa8lt3#B2u@e+Y8ty))UZ5vU~=l9nC(6C!h{geBH5$z<8azbQwO( z9&#(ho%CMtF?OnraSrNUSPMfljmGf(4;**JANNsrP}`taQzn%UUs@=%S(QgLnEx>bANT`)p{(J!u~_L+2f zb-Z%8r=Gj756o2{5#gW~LU3wFVeR|q>UJCDJg2PVp1Z()WQ5az@9-F(KkmtHtrI6k zhYy5p%$W`Q;+k?Xbt))SK&nchTIB@akhL>i*VWf%rEK)ROy@2}1m8SGhkxV7o$#oZ zx`WQP$AQSQ=p?63EOd_CLr;+UgH5Z5JyI zgU!H*x*87RzN50uhh>K*neaa%uENxe1ZT)%^YRxNw-O1aR|XPsSruv?CmZ0=XdWb! z*oDqwkj%7F)?=QU1&l|c@1{Nq({HZgwl7_O`K2|5^h|SexcKnm4!(6vZ`bP_Zsu9#r6i3!~dUuZ0xvwG#kJNCv$udC>}h5f?KR-a&ap~q z2mDq)<~-;UiAV&hndV4uNCNbN2~C}Q@#8SWKVpbw&1MQ*k5e2~!IjvW=vouVRMqU9 zlr5rwJ~#+^tkYqb! zO_6Ms=#I(Ri-z5CaiZ=S?2965;i5&)FEE!dL9{vhAp8v1R_1_o;soxtS5=`fLK>?& zkn2oL@gHD3v**Ss%ae*8ld(%DR1y-RfZs(F}Dw*)n-OCN%$@&i^u$2q6zl9L%+H`w0oslP*AAkMjmPnT&A!1 z6KfFmcb6F$!V6(JwX4w-JdLKs!6^}2ci8sp@3f;sM3JlKgpN@Y{(Jf$pDm$Hrlxu; zqf|544*a1^m8kek#RBo`&;Xse-D(bq^)I7kH6JP#5lA6Es9rD{6oq=N$}bdm?fiBS z4&-}jckWZ3T?*Y=mFt~h%TL3ldfTf#a6|_<)B*_u_mneI3+T^Vl419#?^whX?i(=D z&q6gmLP<6i2V%BvIGL)A{pUvRnV(@rw;6zqkT=rfbX%W8N4FN&r*v&ySWWdyMfKdE zO_=WxV_m4$D3y*F$O4d#9Y^g8qcNzXNm>^qJXDi ze+2I?EFAqbxzWDwZ!YIAudWLwyN-Mn5;jW8H|~TcSk?o*h+t10F`H=574w_65q=Tg zv87S({9GgT2OFtos;v;c|3Wx2@7DpN@kC3%_QhD7D^Xqt1{h-$n>%ZYPG)oUSfwf; zba_rKUW;EL{tuauhAw%~61Kz>?#&fv_pKj$kp-nLzm$6NWH3PNx<0abme2#+{ zh3#L!<%IfEu|jM!fE0w>KS5t_Y_q(K)v^W$aay(Nzy>s^th#UQO3D`AN$@%G+3 z;}$07pcA%H?J!dR*gs0iS>w-hARJP%0JyXB583vb{{&gy^Z=L2iQa7XwYEJbgwhYY);Mp5m(=wvT4EG~DySFAmR!rt^(J;~ zpCfuHh%iQ5+x%JYr2GdAlz-3hsqEI)+sSb{umXTXxb|N}<{&$(UOLgIhzl!Oqu~^D zpmucAL+qHPU8oRdjJezHu zp@5r#9eApvD7aaLL7O;!u<+jXqcl4tVD(YYcqyn{ibi&ia#3tN2Z>24EG1x=W+Gvp z^G%#es5i_`IRKlZPuZxG0J2S~mwf*nJs&m{O^NQqx^R2}6-<;`t@N6<6%LBMtC^9J zl60IdE=I@fKnON>8Z95U0qsn{XIEFxC~PEPMe%E0&O4HvWwdGelR9>A3u?DQs-L z;TUvQIS_T?Q3ZUfe<>>?KOKV^I*5VQj(htvEz|07$Z+v^k9mSY$6{5SNfgv?Aghocfrv^K~R^bG20RT z!p3BvEg6&q|0a|=%%6rb-i<_)zP0Np5c@^Z#b%9U8QrVeaJ#Bb?T(MfQbHjGOr{r$ zMPUG)SVQwIb}o_KsnzTNjpDy@&Lb^lRt-tS(tV7@$}dqW`ydu(J$ts_?HqXwm)|0T z5A^h0T;Ie)aetVnpQ2Ki5&gP#bQ}^?28a~|_d@MGXCfR%Ddm29-=6K;wS#iw)a-BA z`Wzo??}DPDaQarSBw2n_(`xcbUv+Hg>(+GY)ahqXVdTS;S$&}Qk1y}`C~nrV!aiWx zvT};{jNvK=8d7rb|2gzSd#=UPks1TF{ok?P*vQ0W=&Czc)~%DR2ZLR3H5(h38<0uw zF!o{(o4$PRN#7|qPrjg%lwU19`w)H|qbR%sr%#(f5Nd2|!6i((1qbJZQ&|IRpeZJ- zmaw8)R6(@wy)y|eHr##F`ZK6sn%v>>BxYnJ>>i7-Nd!a7*U*<0co1KPq8=qLd-q)g zTkPZ4uU!`Y!HPYi-ubftSi*w@Fm_qY0vCPc1d1xScW*b`7SI{lHafiRaSnaqM{FAo zzT}Cr{oP_p6UFo=%&d8T-)ytAD_^|8o>G%r7YMl)2*ND=A)jZ)t)aN=&|s`s;PmUf zMXI1Go2d@dCR{;($qCX}+`oT+P_4mVo}tGPlTB?g2gD3OzHEhY&;^U!)()%Ie-FPnXC?=T1CY(6U`Ud1}+q~A`Q~T~3P{>9Gs2IA8NSeOw)rL=;S`65WcEy`psJFMIl*2)S=s?h%jQy!N+XO5W(s0)2O3B^Upt^ zyqA^Hv&N2_G^s$z2lkjGAk8l#VuU3hsjS3K@)ri$G2f`{ylNlZyBA7=6Wi-R#-LBw##z8=bE)}reC%k1OLugBH zOUISNbbNNIq%NMylD#MxtZ9{>jt9S8))sJB}07vhjC+gh#NfsRxn!hgu!*#Scx~Y(GU(k zXr`{-LB;xmD0dwVvOCd~D$B=XeCm}AO_6zp)fgm0S2=3>fq#0YsmSJ^u|1<=o4Vh{ zV~La&vTZ#8sQQBp42@}h%xYy96Z8zeu7Nhg$D~V__AbM8>y-G^t2 zQk-adVrivoe?{t|^#?VcSZ=1Xe#zu1mW|Bnt8<*z4WIUco}j95x-OC?*ZMgqzK zJjpZ%7NbU(lu1Tm_xGk0nKg@&c332jt)h==Ovl!ZzuK9TM8qw=s{x=FCI7BwbK%~J zPrbu<{U+6J#Op9vwpu&kX3@(I?;^etv^O>j7)MZIhEO-%?Vr?q@*#(|yd>s0DhKf; zY^ZF=zVPEbTSn+D)x6C)OcVN29cz4BYg1v(kYjjsUBY@QT7fQ@OiRX1AEf}-8L@%awb1o`Gj1y=+oote{dGh48B_+;E@&7H9 zT)fU%YwYtN`J%i|hu?W;AjC1r+JBx2-CR$kpdUZH^Wo#iqCF}&NzquwU`?aVUz5%U z6XieuLvxNyK#eSCI#(1|V0qR-%-N*|o4r4D`}3;a9a*~`weEK0V5=K^hj2>$4=;s) zPq!ghY~}+-;7FH!U;Qt-&OEN?y#4n@S_r97_EHI zYqF1qHchs)B3p$adu6FClWp2a=XuS2?#JVt$M2kf?#JB5)c5=Oyx-UIx?Zo>1;3z_ z2}hGTH-~vc@<0(+uvs;Jb2zZy=FL}Z7W|+Zay&7JRti3FtWIhJ2vp!cg0pa`bu>om z5%$K9`pTGn6MaMW;V4rz_stlsfpqTu>Bxi^y^7|&v&ajgVs-Hi#FIo;~HweT53{mpzN}GPxUUgc6502P5#brVsC8X?*Tn+F~ zl&xQbQMDvxO2OjmB7L9`dAyIDh${WJ+f}6eXqJ|2ls!H?@4x@S9qkBK_zO3K-c#83 zbn^tQWm0x;oy-@YH5z0vkSU3nsWg+JMxN}IxuSV+Kv2*zh%~}Jzh5ugaFAmj5djM2 z+^5@A{#}Jqkm-8tc?(^*CE=6^5Ho%L5VVS(Y~aW(SQ1iiej3Rjs2yg?)!>Q>G*0>( zv3*kG@7_NvN@!m7ZQnP9N^~S6<^~pNHvNNRBeDL&4GlEQT-YOgQ3D-jUbucbCeq<} zN*z)AD`zcjyEu$O>B`B?jSKGAr%w(?x3>!}9$g^vxNX!wv;ffQf55T-2T0qbXw3fQ z6Unf#GSpOO2;7+2?14czughFec<<>WsI*_PHZ~$%T4+x zIuMbWn5Kj0OVGY(XV!C8;$$)xK0lA+eJPMY$^!TxYr0Ty9T?VsgD);epK^q&!?jcD zP51U70$yPjB`O%_f)?IDWCjN6*8NK>EA7gBIpti|Dp!VhNkj$<#p3Iebe=KzknIlf ztD!D3ech3#-G{Uheh~Q#^u;00DY9aXdaZcaqX!Q}Du?R1Vk0F=?&HUCgPMp!fI|Ju z+}_e6n)t!SwIUAIGMH$^V^nHSOR;cEHldCO)g3L!pikl_DugLo-b2NVUgxts)oBgz z)oAum^}xug1GEjLOpq^uZOyTfE$8?;jSsc%`}r46XC$Tqb69@1`P~7|xe7G`#MD=y z=tpYCSK8ZNOruSpREkeeVWhxtRqxYmP#W>Uyb5(w9s7`Xf%v@cgpJ$GEBeXPkDJwyb6nBDCr;_%(A)Eq%j(&9<4gy4!zn{Z?!coGh_-B;f7=w^W zOfq`V+Z2kMEmfuY!QJFgdiM?v)H1J_!F5f$+pQH9rmx$QTgCMTh14 z6NgVcG#@w+R(pPFKg46ixYZVfvn=RDi@pj`#C=SOY4PdTUhBs(&0ceskR}t_$$KD! zVjwxJ@D|&xBz!CPVDK-5==&x5do|802sb2ozDlb>h{d;35Q(#<$DB2Q3SC(_Z$WBV zyp5J4nZxAJliB%Jyb_E59y@NFtSfq-NSGD~a8WAx=wG4-GJ-7Pb=+<)eK0%TuO6c4 z3H>W*Ed@d}O1(Mki$4+;+*750=< zER80Axx2e(^>{dl9U=DVn7S*7$CFP# zPgACVyL6mmpjZ<>7}9LZ+yW*BkI5?udEsENjxSCP_4U~-YF{TLJ~#wFIz!+Su%&T4 zzekX}@2lZ>;@G6wV+R^ADFgbsU0mH;h!9vKbe@6|mAQGt9_)BznP}!*asYVDtCyFV z=96h`E{%{)GzqCZMw!-bK#g?R@rFB$4ddT5H%bF9&Y8WN==nr#@ZfkzP>>iRK8R{L zEL8QFVBz?LY>cC}1;eUQx`O;zX5A@bDkrWQ5rqtUwuzR5KZu?8uCg+ovgPtPx_GF- z9?2O|XVjjZiSjDdrO3}Nra{#G+U zb&x`RS_08tBy>y*lssjMO9%|Thr8ic(xur3wrYn!u?$G2b%y>_aE)#+X~O{O9kH&3 z!n5d8{vwwn5ziP1f+}QWQE!UiD=(U-j*7f3?uVYOQVk=s(VAjhpv8aV^r%t=Zcii_E z5&!c#47idue_uPNfoCDp@Ij>nWI>HHNJd?`#$z;u^Z`z_cWS%@VgOYku#7Dw7D#iH zXUQ3oH5B>gLYqgjw|z8&P=H;vM2f)(j^*V0_rV*)GjkPrM3MOHL_hhTK0AM0Kmen<5KXI^>EC2vC^EV{?6E!qd7;-fA5o;*GCUjhn zD>8+GIL)=G1S8JIE`^%qPAzvc5KW`G8T;Po*^jSX1qxEaBD)A(3Wb8d(e~y(eAs~+ zdr-Q#0Igi9_x%eL4)G!NoRHrrD`}eFmR6dDAhn|zbyCMYcp2t$v;2j4C!x4j-)HOH z>%be{TM|OL^o^S1j|KNkO-adtw4gI<|K7dsaDRj{YZXK|!Zg9RP@MZw)ns1h$baIu zJ?Y4ilNDdJl{{-XDY{-HQt9X3!`8SHeTHC@1EL}Y7k8>nyRh(Z+1;UfAovdrcYES- zV3pX2LTf$MaZcKzc6_<|Nb*6cXZ*a}5G#sPK{7jxC(#1hL*_quVnt=qkspUZMX6JT zqpUoZ`$LK9p)A`285us9Kj!4TwRs3W){Q*Ht*$qrC+m_NRjxS z<|GPpE51#`4NAdR^!5flVzL|ntMm*C%Aqyp)O;0qCp!f+fQ|$=f4}kfIJGzhGs_rbHya3bj6}Y zvN`jV{bn>TMDs3v}|;DFU;Y=%TnfboB>pe-y>3_vYZJC=naWLSeV205}&b%&Ao_VD223fBK;pAr^A;Y~9u4T9bP zjLO=vcK=YRIJ>#^HdFaQI|a#T!Nvo;4*|)PypN9E?y>U>L*#iJXwrJTdpgb=c<$Vt zfLt#durI{%8!I(0Qi6eni^tmBQzT8NS542xIr~?kQV<%m%1cPPwA9BH*REP+&6Ddn z@N=BU7{;b%=%E8I@YTZB_HE`4k1lS|7J46W@q&SESZdS4G-0S+FNw`-1y}thQik2D|IUh`)zUUH&L>HN}mEsnyRK!MU#ko&f zJwUF?uQoLfXq(`^*=F!bsxXEay6xMrZ&d8B>IT5DCTNQ{RNx;ZS&=4aPPPEPH8u;~ zvBPNiUJ3xpX}c-gf5C=}etEWQbtpS0;cU7&4; zW6K@&u^#t&*t#KbXQZ!j%H_7m3c;ANVYE}=E7vYjFs?@xI7Pn&t+Rde{tEkiS5S)+ z$&2G(GK-@zKAwDsbB8#D0@2OR6ld3OAD`}VA5NBK+0mv#M&07r7(1BDpu)t)lX9Ue zO2^yf!|@6J@Z!q;(Zdm{GH$Av!!1)M>L{4?viB1)qTNX;u^@%IdX!IKZh(7{@kbP@IUE$% z$f-D5*Pc&Hdp*132RAypdG79`7he-n@uW%C>8wEa|07BEwp0IQ7Z*pTzX8Cm{O}=< zzhPi>5l%tzXIXT{qgQ$_bYseISDpW@iXCX4N_n8>mWOqjK}&6jkRlqsG?G>Vr>oHK zJr)+ymwdfIa4mcD#Sx{O<^&^;Gy90X3~wAF#}FHi zG`92k7<_xY_9u#f?T@c=$|S@8RpYT?yYUOE&ui@7a9txb0D;_o+8vu^8`Za8zt;3v z%FBTSwqCk4+^PyZfHvfo$%(yYTQEpa))fmu!b3nbMiFn^$UK-G-1JoH|)FVYT%t~{>G1txYJeU`U zmDyPQdnUgD@DQNRts3wdh3vgi)D332eFhF7ixb}#v@M~Ffa1I_`!Q7_jFI`uwVtmp>rvkFm3vD%z+#{+HCgo z%MAK(JTWm+DX2%i-=U!;_=J{wb09^5RNFw$CB~$OyVJoFE9`!<2UIp0LIy7NLqc@~ zKifP@VwsrQss_Pymo*X{ro!0}Da(>`ko^nXg`W&PgY#m0Vi5S~$gC{^_vXfb>uB8A z!r_1XBfD_!KrvEex@RelTJuypmZ7w*dg(+ELi^lUvJ*x#rCE+HHVY^%?=vv`_d}+b!n-;=>By< z93sJ%SlotVO&ic^7(<>n#TSl$cB)lw(PHKD6j0L&p9r;y2|Yj-e$(`)cE%$#C1U~e zHD116rS?a!AVnzWTbVsQ^%uKpKG1{Xy^W3Sv~N*9i;a*;Gby!jGrZO6R}}KL5gmlB z6@m;-l?1W$uYvSdRODuNe!vwvV7Ympg@^^h>eDG!vX~@RjeuWPu=+8r>ZxU#RYMrf zi}4LX^r&6~4S`soI`p(@>lVE`4X(_#ZQFwUzE(W6=U2Z;{04hp2F|c{oYW#>&zO4B zlB%|B8AubFy_WtXb)({HyYBy>k__&2$ z@9v-68FcLd!){nWt9K`;rP;lVm^PG*T)rNnmWa9wZ_zW)TfTfkFHH5qffamaQv`}v z^cC6i0i$F6RhxBf zN7o1nV6)9lFPnaLBzpBwZC`3vxsaQ%L&R*Sjxoj9ix&RleVUJd*UsYYGAWF95=%gH zQ=}R}gqk&<;Es#6@=He)1PMb=Fz80hkg)bU;~McrIhZ4B)frF?dJX$78NU9Lj{uP_ zU-Sa^V&5gYn;HgM6B0r_giQZW3#yk-pFYJ;8RvAiU62i-9XYek4d{+N@q`I*!{7!! zx_GYny8{_=zYRYz54c9etBdovdqLq9m)h3Lt5@xJsdf7IKQW3LEr&K$Z7ERDPL9uu zR4^>*KLP|T-hZ)GXM=|ho8B_KbA}30z@CR{>0OoYF_f1FXWtS2zPJ=s)zhz}X4oqg zNlU-Nj5SUWxteQ`yaY#NGL!ZOzzZ?%0`CDY=4#(keH^IJU;55-=X#lfNQ)7RRq);! zMrjvNN-!ifaMfzwp9p;FeEXPldAj0T2)=3`-fzxj9h&`EGhcl8C=O%a$=%~c#&)5c z70R#s9~@LX0iM057M({?! z&nQyey|X;oU8XYs07{irG7g)?bP2ceng8Y%Q1O>&=-FDR_VF= z*5kX995|hW*IZdWe!lk9yVjleU{k@@716k7Q1fQZyx-o|P3g>$9mjaFZ;W*0^RMr4 z*{{FBEZngxd|LD{W7nh93?P2*AK1#dk88irO~G4*S%^uJ$ng9J{C>uEz%Uq>LUk@a zPYVTB>{~#x+YXMqYpQ(E&eaqlTU);hPVJCCFt9*lUvD!FtEAC@N^f;9F*IZDdV1u* z!GrSwcyvCt-2Qvf`8k)?wib@oN+(KqE~2U`K0vHin{X0svMj(=)}U+?t9FLMqsM3w zw9ufNS8{x%?8#`uRgc=WtK17xPA81HdT1@gi=wE=`B&BHf6NGmS#06YK)Pnl2&z;d z_?|A#=$p(pp+=aSxk)cKCOSF*BZ*GeDUZY?$L5x?iZ0I-SfXJdTc)m% zT+BhQ*Vn=)Sjm*)g)S%$h{Zg9uzDRMTSZWhl*K@JlF-&H-R!o~{gVJ_~)Fg;`)MH3|zgzrU}6%u+8 zaV9}e?N@wqif7NDkzBYO=m4tvY!8eeCn#w7l`z`Es6wXArB!BtHZ`qz{RQ&qjTJd8SAB&bml~ zt;7bePc&s2SbEkn|7gYBOuYsQ^$xJLXtnnF7xgT*A=pCvDv3rny>Cz?f8lu+moO6}^0za<< zYaM%A^8EQh#iQ<5^W$p1e4&(3`M4eJ6SU5P#=|5xo~({>n7&c8?J!8kKw}psg;K^N${?L(KhQn-d(cg$!`P`aqiz+zg}zzqiSjqgqxVye#g~wsTq*0 zT7gLKx_NISZQL`XRk38+Mz*5;TCbdM=QZy>g;g=#(OUInY~N{iR3owYdHnv_{re)E zLHws^UO*WxBDkG@(GJr_FZ=hrk)EN=mW`sEEmNVU%EfY|T@xonc>T&13cfD3pL2mD z1ynq-gjiL^_8yIa3|7^3Q7!de5k#9=T0zCX4e>nV>!{{y@M0C{?mu>^8oHX z4Fp7LPnrzo4o) zjwBw87&q@*j2@O~zObcq@C(Q#sNBeWT2dm`+5v?WG=^^%OrJV+9x+kf0fBQ%$f+$U zbpMIQ@u0FqywhJJ0+-+p&idF7vDg@ogpgCJ?PFF6?~@Sh0q^Qr6Zxc!wC_0)=PMZ} zgp~`X)6SVUsUgLn_nbeMA5gd+>oi2@WC*YUY&pT+UWQQ|5KT7eh6sbS*`&WPe!;??bz>4=J52Roz zr@Wj@pt-u zZ#ruQ>xBT3 zdu`5S$b7$N&%KU{kxHgN*O%nL?n%O<>&a!_n+IMr1}e-?G<0i|4A4qUu+X+9;G(jOpq07VIRAc*D@Vs!@nnVzl>S7N9;V;5PGZhuKcQz&77 z{3HmXv{7HNqWb>}2>=u{DZ~N+Ptrgr6vBwW!cs=vW5?nG{o6G@{6tyac9pdl<7nu* zt|hg>IXwNj-JOcyyLLm*93Ho}+feV(3H9E+nZDg={<*WgPj-E?7)#% zaHUC{eW?T1@((Nk?&qU_Qz8rR5H0q3XGXBJeZv=Z;Rv+lMtAdrD#o0koiPp@H^!E2m6dUEMh9McU$+yceMhALqCbsZ>Rta9=21RC=c? z&=nd+^v58DIaC;W5e?@|wg69RH7}2S$Fe0!xvQv}Ar;9sNX0otnjj2 z|3!(~NePZ(Z(*Mn))9XZuogZD5eRziM~BT6fVomz@hMqtp%nZacnlMawsxoVX4KdgYnG*& zbDf?2xnmZ>-9tH^k`NZ4o;sBbREd)wQ;^;rsr3}r)dl0Cuu(l zw@}m1>+>BGI|98Ap90vz_P@5gtBt%bc_&7S!;KF~%Y7cK!DI?|lvpqp(NDD7&XCCPuny zD>xr})gnu~Q>pUW(dLk_n+Dk*1 z(C;ih`En47xu|BI*fF~nh?E`0mUke>vqM#BoPJ3M|0aeFux`9Q-zFVnMB@gb=HdyIUcGqf(nw)Ar^20c?j|+&tgC)qL1r)a z0?HLiR}cI~tZ|pi{(%LeI|*as!z&xtahFp19i{G-4hHNi?5YbuM>BnN?{EfQSpaW2 zfA4b)Ch3s}CLIwK@Lmk|a~R#hQCtp}Q~3tkVgV!uNR+F2TF{%bDE-|k>-cG)ag%%w z;Jc1H<)Lg6Aia7(^+D!#a;}RO)B0*oVk4Ca6S>bXxQ;Ekc(F5t24n zXNQd;+qS*pf*9o6449n!kSHW;;tDIt?^Wx_*xRd`wp|(3cx!$23zgDa>31gQJRG~= ziS>;UOYL{r^o|a97&zsIbGpOLqWJaN_l^x(6u;=fh(5hN9fFL66mdhU-Wm>19qU8A=Jz6ECuB?>>Iq&hyjIfZ?ajt?Oyb zuMR$VuxoG-`;rZYSc_p3ZRX~)n;X7a(NV46PJ8(YnTS6l__1r(4xxk3>i7}wK%7!L z%>_{jkBHD6WV1`}?=g%13bVWht3Z&b!U7n#%`Qjy70mr*HeUIAsyDEgkfu0?+*pu4 z0H0y1O~Xp(e*sJU)k72_a@;c4e+}7e$=4U@3|qL> zg%Z3J#zq_VTY9Y6kAe%nsO`3A&|9)EQrv({*_D)nT6#k>f3hDnp~{^9I2QCWJJq8` zuhniJ_6s)c%=!lkoOG274^cLy^-eY)!I#f%BQ%SY6n$eMmNEW4-MN?2-h6|1e77hJ zJ2x^D$l;_H7+;~fkoCzBan|n+_R+p8lr*~1O;?$zYeu?Jl7L*Qdcb@s!Z#<-3>Q=c zXi4YpFm1Y}&SX)VN~n%Xbl~c{J#Ou_^ME8HP)~SV#K*Dvw+0v+`_rI)-zaoF76>ML zec%BCZR?lR*4A!Ajrg&Opo^m4Xb}WeGd{U&AG4^Zr=jy`H$`p~SNKO~Do+fkzdk2@ zjz*}eCN8p)vIR z>3$tZ{d($0jML~di${^^WVhVy?{xY{P-phpuVbuf$?k4lo~6=h3hEClkjlz65ShvB zGJakHBTZ|qI$js2`RzI=TY9|jOm`MY?j5qM({I0kvx;;i^C>t%DRs8ebSZ3-A~-=1 zw3$}rn=@`vqe2qvhOFrnyVA|3;t%qANZ%v5@WXEv!iU<*KysrGVS6QuZUg@W!Qy2k zH>6RD$*~??g;qd(6l8~8$lsug@w)YK#O2a>tOX2n4efbw!@^Q#@b=gm2*3mvantq z3X;+jAg!TLJYvw88KaYnnI)BA2pB(~N^4NK&GcCnl%IwhcCC1p5x_^714&tpp0MCH z5J68vL)*Xy#J;23Axk+#sR48YTEBMNNw-cC7oMGu`|{;Gc*&F9WQHW>DzAz`v)D;? z_r*+fFX~MA)q=9r%M({1s{;2mXbVe--E7#ci}i09P(;>hjO1p1rQ(0?&be2 zvH$Q+$T&Y6H{M*uB&riopL!>cnc>Desi#_t4VsS;g*{Wrb{aSyTO(5YGJ;TgonTRt zb_QmT)p&~HY<47(W@EQoK;prN#I9liH9z3$z>_atyZ~z{Q`F8LKeUrxN4?0|iMzkA ziOvS68un2K+avo^a}C}6x}Hc*PY+=eXV*la39y1i&pc*KQt&52yTknRya{id?{2kw zBSsc-KyU3MkPChE>WVx226uU;5wM8AC+z?1jm(dDW_L=32qEG|!xxk+Diri4=OC5} z%{+1^7zzFTxQfSM|8M|?d}4y)qhmd2*Kl@}5TBL$Fzm|VdZ>L0-mYeU3R(6mZo7J> zxU2Du+g=g*GX@fCCz@t9nLjtnnJ_S62(p!t*;Zk> ziIzR;BFGCJOO*O}3OMA$x+QFYw|;vAFbB$^7{7eXXAN>$S((#NbW`5ALZ&!Tcjuu8 zbbUvH9wR?#&J~9ap8)}cEL}*-mDzIKJpetJ!*e}e-GzzayB11^PSU5-xOGa40R3kRe5%cD8vVO1Geq;SZZx9s2R3UW+&bq41U4i}Ypt^So-_ z{pIK}yBx-wtRdZH%=wdjW>v%G(U7K$Oz4d$G8?wcIRCS;c5a&>=^Dv?vzlK7Fc-`_ zEW>V&P3PUr1=Cl&ynCdCR@LoAZS~5UrNbnQa-7lhe<~S*;_Q!!eUk3*rd-S zWmkX&t#x`8y)^%rd%PNyL$;!^f#yp$LXfC=N*FYV!1K#2s?ahjP0i9-H}0shgl3T{ zq3O_3_?Ae7cl;$S5f!x7ZScDUCX$<`dko*}{s+Ds-a;(&rI+ONnAWxAppXA7ToYiBFNB@EM0X>EJL`bkDU?$vJ) zOlq`iXH|b(;ugi;2q*kW$cs6gW!HWm_I=Oizds$ zbO=XPN&-WoWevQCZtTrSVlRtuA3_!OtUh)1LCJT zIJ6`mQ>wpuiD$Q6!gWVTfrN-n50^Yl%m&gg(L_h?571Oo$Beu=-G6jt^(N52Z5^nd z^XG|;9J@2hP%J4$Q72^hRJ39AaEAD#>8e2CS@;wfjxftG zzUf5@8^d!@eZc~0frO2Wsb(q~9`Jbt6G!uUGF8^oI@TtvNX9x9)Xrp#SdoFqY}ySe zh(74mI*9n~X&*N2(I3|Cy(YtkP?XatS4#jqNB&0JD(h;*-T^kW@@pDq#=lx7cOY~T z$<)_U@0>jQftI4Eq$HGw{pmT)b!Tq(!t%}BLke8YNEjU8lie?mb2;0z=9EZJh0E~r zB=dV=Zulgxcxfq{H$NhV?If$`0+QI>MT5Kk4!w@7=!-c-Q9%yaGqQjyckRfSZ6)ij z`hND1T`x4aBlcfyQ>}JkKOn^<$44k^MDM;x!y?5xd<9w*42oi0(^8O!Hw==qdFIU5 z*+8g{d4YoRQU0^m`)`?#i{<2``83jEuA()GlPcRHOHIto+HpVB>k{V9 z%G)idCH%z8Z~2hfxw%`LpA=hN5u(!=HSEOlVICxg57md*X~lHmX6i*;=jcnncH*cO z8tOymVhRvbGIEjqCU0AzU_Z~Isa817r$#lQ%pcEL`-srBj z%M)g$cxaA#^`L3Rn%{e@%?s2~{PjOWs)Fo0En0POFHW!>u};1Y&|DGX=H1_HyJ-FK zi`Yc-CJi;=e0D#P%j~H#jQH>q7xN#-o{;bF6pP6IqllP}M;~$$|+| zsq!CY@2xMF7Y)~_HDiZ7xYRaF_Sz^Ajbh()fodUU{sHG`B)pSqDEwqkNH_LyZ&^cs zMgojaNNCp7*?eK7lFqGyg0dae6yrAv3glToTS&sT>6M6u4P$M{Tb~tIJipi;CV~16 zX*U_zsOsc*TT<+`@Yi3%fj})wVa>c#ppcfe-_M@*b@ivzwR3U?)0pe=-39=e%;`Iq z>CqS}Jk_Nj56YlXDeGXS;J0Jo>x0x%9Ii(B@aV%v{sRQm^NjwXzsPCsSb3RkXCiOs z-BnAgXYs8xPGHcsZTIWL3g&0e3f+$>Hm#AZ|vPgR9(UxdrH#~fmgx8 zTcGDj=g%7&WAB7z@Rbr#wAK_^|OU14)(w-G99xQynvkA$ zUVHj=e(UOC}d}7PRARWmNV^)+yUU?+2E zeKY{&_o^T*?Fv|jeoAW@gB|aG`ctojxzjJtEIVqWY0wadMfT%V!;sD`?3VFhP!Cwy z0n!q)eP03w1#rv;|0F7e(8x`WnFzG8<;Q0gt18&aqE6oEGHu#54j;vR&M|R3hV_>? zDKusqKj8=CZtNPUBMf1nLE@#gACmxA^5lz6n(2PvX7a z9%NGKc*40^CQ6Udnc(K2I*@-CiMTK&0ZLvdMgHW7N01&6MW|O2{7cxYuF?|eTuaDV z;ZfJ~^N*ybi#?m>m1K`w6Zp)`?3v8mHn_mE>yk2qf&?=9Fk6Dgoqn4GpNmnT@Bn=1 zECnbl>IN)Effq{ZPdfRH7%Rg8#j7+@A~^h=U-(`?@lgk%hJ&8?odm1_5nDf=5=~gI zYU;@y=hU@E($f9EnG#~b_G8*ZKu$h5)%cts@V zbu}YnuY+vFwdR1({yzQ%Mwp%0oiegE=rISX3uJ)68^G82$tD+`XRy`OSlfLWTU#RP z@aj#@BNNKNJTDRUw%F5n+4FNw>wRR6)!3ia(Zrz( zy}ZOSH_<6n_t4yJoj8EwiylKu5i^TJ#3d=9J(7@>v9UFAgQ}jNRTLu;J$RL3h%{^i zuoj=G?iZAG0#sXQR;E9wL>OI-Rj23-hQc{ev7WEX=)iE{QrJAX;SH4P!tvp` zlx-1Wkpe~S5H?W_n*ysZi1hw1eeUXom9IjAOf$d{#n#Aw+0v!*1Qy=+SaZbJ-NQU` z^fMt*)2JWhHyQFPuk|Qq#=^&wg1}^U#p~C0L%StMTPIBb{lk#L&{$G0C3|nD0S*)N z-6MMR^53PP6?pLTw{Lmi*7gfTG|W0LTkxt|fxMP{oJMqA$r;<=>nf{()`?a4)6~2n z-CTZZgtsW4O~`#kZPq3IG?W02e*Lce83W8d_v-XJV~^hF&QsW+E=-B-F-6g4YzNl2 z(^2kjBHV|=|4F|5wz4u5%+~J^FX&bfBf>zGBO;7iEi^@Fmh2`Ub@&ba7uJ0JlXt*; zh+SZBX4U({5*q8GM~^Bz-N6`8nAFX>w7Mz~4f#Q*ZTt-2w#p2axbn43-b>#Bv3-Oo zXIY%Q9NW_z#Ex7q`*&#%)gJE=;>xjOpE6}n9j_;KgbYc<%E#n9_qB{KW^$GB_0cU& zvP<@L(gW6i6=5xQbC+7cYL`O@k<5`mR1F3C+RC&ur3AS_9+lLhS0d$)vP3PGLy>$s zEehP58rmY5>WDqN+9(m66sVVAMCE<0{_p|X@#qN&;ivtgM`QOrshx1usAZ&Qh#ASV zbJ{;-yV_FrG{H4>jbm@}n5LhNX6${j_>7|hteh|AR}sPk1e>e=xY>!kF`r778L|G> zXOtRF`v!&C-xc{Z!d-Js4GnQdXjTO}EyX12)*5x*4d_&}tyPrFZWp&e#iv;b;{6Y7 zowGcsFf}Quw9jjhdgsknNxd27m=q(w8%<=E)lyO3JPf9&^3!B^|D^|_JXBel@lz7FbudXKu8E-d z^6JLjCbZO6)NcC+_^CJ(yYmgyfmoO0D`Ul+uf!(6O5~X-aZXqnK||*Z$!yE^3Pw zoeIjE0GmO#iE7YnLQBE!DH=QLnkHo&;rCUqtCcj z+I#W|p3ZKk@8n<*+&bIoSn+ex&zYtK=>LGw%W)x}Uw z%FO1#FH>Ha4IG#Q_9N@-GyAIiyWu#Cn%Z%k5V&-ztE$wvtm17jQ-k6@f>icJ)+8Ew z>R>Ykm-L$N$@qAGSS}i_PmzP)y5$&H^n{iVe_MNnCV(%ol8y}EEYo7@pE2&^*6djL z;9`Btu$%sN#{2i}!;mC5V>XBNZM{HigbD(q>ul}62Iy%SE34+0hvHMh=H8CniUq5B z_3YUks#rsksEpw8*dvI%C9VDkJ>>}7S!nbUzRjb3lXD4$31QIx14j5Sw$Ri(eh}=n zF@}Pc@ym6C^4{I%og`g{C()X$>~UHj$6DIyO$zjvG(GNam&mhsDqddQa-Fg|ikp%} z5vB}o6F39t5{td$)%_b@`ThJ+P$*V`G**kA!2v@nUgkXor6ALp$2J>j$Hn>ciZT2w z>=IQSH_&b1Iv6io9P-8OB6}-ye|K1@Cgy<8>T)1CLynoG9GN^-eT3E`o5p+GV?kL$ z#X|D3X5X$YDz#Fk11Aq4Zan@0|1O7C=NMBKLdlw&2hYHbx#x_8Y7x0(wx+}!LFGiPD!Di)@o2D_ov z4i=qpO+AW5Z3GeXCRVbyLG@8G`9H?zzOj_G=1k-F>Z`uU5TQej^=G{jr&VS5 zD_p45`5C)q9o9BD&@~6;_7RL}vX9=crT;-dqc)IO>vTb%ISAc}9(O1H7-CRhzu5Ed zFo2@UipzuKsY`?_ztn%gfa}t-I6%gLfnneK82E-ja)yai9nwi+zc5lTzOug;+*trV~u^;Nzp&F%B&UCG~@YbgkL8x_`U4~BNPB)|UuW=Q#Nw@UBu}fh;Tfvw0wlMO2Hdm%xl&fNsf2#S(dAS;d{NPwm;;#UZ4 zvL0z(fvAcbBY;mBDP&mJQdDTybv!PHE@eFMrS^!3^MM=cmvYbKvhRn|-;!r(`w~X+ zRv^995B-$#Y+FSLE0$PTBBr7M+ZZXebGpojS&4UAv&V*xyR%@@j?Cw>L6z~ktj#?kV*Gjby7sE^RFlRgCZ2&h7y=7HV2j@RUga@Z zOR>VCuD|UU(XggA)$VH&eFO0XxuL=({_FP`}zLd*|6Yzg&PksC9N{ zIokHI-}4XTpGy~{82^|t)-$K4)rB@;IGJ4A1mj8k0!n`6sBDGDp&2{juw_>=SK3Y* zY}(yV&qM-8kjTw%wozF{<<*viVy^x7XB8${GoWyD*1~1qZQLDFI;Hf1*L_S(@&G$; zoW-BJl%uUQoerSP$T-^)pOk4884QdW~zseRv%u&68NypeTy@2T(biw5)naz#51)1FI7Dj4zqUI(qDCtCVm- zQ>1dJIrW0YY}~-xrQ9wTw(z_^mzZb+rQY1PcnbwX)8|?IHkYadu>C6`3EMt_eMo@V zco;ANYo9v?>Dc@ULCSYHe~e<#BzeGkLf(j{JmAU~SBBH$Ui&GE2id%4X>4w;#m(?M zm>+*c)rmYVtm#ocxFd)cTKXGEsfHnXd}Th1!*7wOu-)kb@w2U|5z9we$6mUhZW%-B zfkCJOWHIz5?dGltyI#G29|3FRP-g<+kDuSGzqQsM)}in?{7mr#l*$`jyas4yF6BNB zCCwY|aPI`syq(8PO7=?`wioj|28CO_px;nB=l!}1sbz?|ZqBjwv}3#y!_JfE%-I1~ ztKB!60iXCOCNr6L3H@yEP_@l-+WGS`WYylaX35j%n7UyW$at&V?JT8G=HDQ*Y1Ou97n41B?IU^$jVr$rzo+aI*H9{3ftb9ptF2uAmp47ED6c?(SMzMgAgsRyl z(4~E{waM=q3Im4?(*-9;^p@`>ClPUoPA)h`DN>}oO^6mtF8K6v8w(J|dgjlG%uMVm zFSPt)hMDOQj8|EBf3SM2y0O|8fk`2Ye_jfY64`+WWz(`zg4n57h6WFAHb@4wyt8l7 zVBN5pjBn?}lKc8hs){grG#_J`#)<>7&Q&3d z6MVH!g?^ln@N1uv;nAna?QOW?KPGSh)$$cMs^V`^FpuZ`S$aEOzmqk&sVy0@p*a-) z|12!$*h)ST^J&=^XOwN-Z;bI+Dks@HXRsD$njppsrg~E+P0Eor3g%5@?Ed|&pq|%g zw>mNR$0eBcR7LhmLqJnu@cN;4*CffkF%=D1MqU{oonAbjDRMy?DOihtXQ-tqdN+gX&aG6S#N(PdaOe!TKqfUZ*7=iB-seEwW+%f$4nGmku`SW|1+&!0b6 z+n}bQ$t_omgOC8Dbxa88#b#P{Pxr62wR;qf)9?%P_FxtQF;kpdo7t@#&7G?MN&1jc zX0y43EhkoG#7|ZK7CKhe@<3mWvE4@&fqN<&GPrM(GcL7EkMczI}@V z)@OXJ^^GRN149&73hw|%QV(Znn73ZvzOBf?MwS%6s;yfe*Ao5tM_S(O3_Op6v?=MyUUt*Ko`Mqh7>l?vmn{nHl1-NvtRwlY|V~_^xhLe%46^3&L9-V0m0DUDFu$s@4o7f6K?*x(djOcCkktyt7@WE(V8$=u@`v zV88X^Q3V5XJ)Hx~(>0D=$Kw0IC%%!&NOUFabmti#9}Uq0rw|$+uu9}QW{G=Tk1l1| z3wQ3r%d3aCCd-H#o8L2sSm(wl)Od`M>!@PQ=`Jn-1PRThVfkH~-3UGZT7SKCgM{9y z`GDrKl%SgKd@ET)`v00a?5d*zPFtL<-sRL z8xJ%z^WX?6NeNB|G?YVL;1Dry`O5C_Ygswrb$a;V!R?`%oVfEGlzD3*&y0Nl9>Z`H z=t_4d6@=BPq4RS(hW?)F9xryxT>K`LSh%_l)-AvBCA&=8zGKHVN|d2J#4`_#!S2^m zqh^H6f}_CVhb#ig;w?lYeqXP4+GtcEDS)OW{7)*v|A8T-5z-pR(PNijF}wH`F_qgA zm9rx>G>h6oW^FeoW5X;a&8tnI*ldN9)Hc$1d-!NW2kV6CRorn|aCG=6b6Jt;?D`~o zsF<>bW$vVm1z)(gSA0jA%{V-UiLZoU*YF>cwRPcpma*4AUdzs;* z7!!3AD_WWAOvo=P(jaFOcRf1?Mh(#1tPdCmaAEif3o~mBFK&IMs9`X?FUayeKkM=9 z?pXi9#;myz5*#dShy}M8>JiWFzo{{>(;qx|swRb1=xg{dv;XB3+^aMNVX8TJaH{hC>O&Z_*u31mG_+=QzDJIun|hdF_M zF?Q3;dh@7Ejq1&J{d)V*BC*=Ji}7i|q+SV$u2e$cF88`oDFp!z2Yv+3eg%idX%)On zDRXxFQjQqxON)qzD0}@nulX;Fih6`i4I;)1PO5bQaLEHi3MBh@P3dvRJ zR9qAvIkS7axceK7V4s(8iA7xqUN_AgFv}<8P-k)e4 zRt~y)i1ll?Zz_9YV`Y@M^gMen@Wks7lTC;}yKEZ7S=3zYoiIKqqftPs zl}TU7`_HQSHQZPZg_4*&P!H>eYrNB{xi;l}Ka5eBq}T^#1yigW=^Y8?=zdn6vQf_u z^NeSP%uAa0GINomT-=x|urubdy*F(qQ#)D8G~yE90m`Lk(hs4K-n*CMN_)u6n4ip4D-kuRB{iNE=WKMo9B(2^Otf#-30A&HY^zhefN&3pIU zRb^VO{OE1m6Ei@TIW{En1hU3cp=JEE1kVF zT*76o;MR&Kxk_^Itizu^Lh!5uCsVsi5jh|^&x4Rk0kzPhy^c;Oul3$9;5^DC3W|$) z2iACd`;#lSyg)eqv%aEx`mbR+CT9y^6AFjr*=q*hD;vHoZfKyOdzQI*e{w0q15tS? zRtVz}Vp{`0^N`E_DRmLls_lyW=)<@4wVZt0Rn^*&6USYQd?l-%a6>Iv-<~xVu*tnE zZ{x@ZVd}@`WY1Ds?7El4oq~a(dP>plDHu91VTO%5FG@oz-um#byhbr&a9M}EO12T0 zKvDj9z+Ee&*2HjMcrxa61P^(=r5ZOgWWc|e|va*Gavp|0Pko`vDDxQcnU+O(Vb`7OwJmsRuz4M;i3Rdn>s1iN6WnHtpLIDH;V z>r^%KR$m?J%umZFyH^D1(P_) zwT3!ZIr-*3kP5GX=#7~+1Gck}g3@^~xqKeA)%0-T;CMx!wXtMcBr6o!GKhZdY{fGq zOsUYIG0oYtN7gl_UBE`V#^@p)AyY~Hd~2<<7ccIca21g!z=uKZS71fqbtxiJl2xHf zQ9J-u=-;h=M{UoWV?eOAMsMwcR^I0U5Lr!@v{QrxfR&z4Is81k0Ps^jEjo+Vl=rLs zsHZh|jQi2czlP(?U^2_xr?Fv8v*|160!Ip@*mW`-4!*vQfh@2g&|6k8&S#)#p4IBg zoX-cvZXC!%)f%g1HU1lmZ_GI20ev;~UjWxj->&6PuMO^cZ(NvUn z0m8I2xWc&vlEff}sIo}G$CUvq31OAa5?Xb+bYe%Nm?QlKnCXWr%V6{a5)I3q$xdHN z!kSHCnRYH+u}U823f-{SL1kzkeIH&14Gs}W> zlAktH!^qZf=XdF>`uLxHSk+dxw!vWqEKLmI-p)s&GNnoUPXKn}k|H||ZV5>;ckY(P z{L^1}0P_W5_aO$3W8jwm53&g{sPq#E3?fI}Go?!=cl|K27DriwH-LHm1gH7eX<}Rl zi>ifIkgechFa5r6H`r#kJKsyHNo2G7@1btZVpNi(g8k)Z9m`Eat5cbvi-q4BJ7PI` z?e+^NvkZe>0U!wFJm_!tA(m1GH^*|sHtBEl5@2ILHxYPb?%Ihi49qvb?p1YlTfpx_ zo$1=88@m!iadr(Vu3NPHaM@jz`hq%}mDt*F?>(>9aMJ%ZL0t(b#?1Ij7Yqnw-B5d2 z*Q$ul4wjUXb;P&edjQ>IDT9z^OK2bDw69R1@i=tee8wL!S+>z*E8#I_Ct5XSl|do1D2 zxZ+OM*O0|D*}TxOBVyFZT8=sQ$<}3*>htaZHo+LL^Dc-h!YbY z{##0?xpNBxy1FQ9hkWI`vHmgY53YCMf1D}oPs}43iUuIH^XF>)$=Z)o-1WdQg8AVV za-n=$E|+y(6a$n?1%HF|s`CX+&VlV_J$<_Wi?ZC~mE}rk1jb)G4nK#I$6z$dz#Qi5 zs#9a-6lvQ8F(*m zQvzTW0a&v@hA;DAGj@3hc$^ECnT$~A<`Pl3(V41e@KMF;Y5caJAaCD~ly5>UVu@i+ zSt1PzUHY!6V$URdWjhzLGa6_oCV)P`zHT?9zAS*##cC z758a?zKaFNdg-!d1+n7f$zAju%Utp^GQyM#G+1>kzgE>~`uTmuZDfcA|MEUP9Aym* zR5Hq=T`lCWOOv;_9&E(DDBkK@N4O6-sm)KIa15rQNWGa(5i3i~Emy+vU;r@h%>qPY zq6d#z=;GJjzls(TDP{U zs~YvQt~n_@uLVscJ3+etxscrpohUvMgTNM7W;n+FK$I<5_mf!|<-)`7JysRVCK)d~u$;k-Tv{sbOR|-pk76h$y<VM9C=>x?lf=eUj147wPK=(915u;YhyQmU$cHJq_xd^Oa3+*{VRxkP4&x( z*W3&m0{6D-Tacf0m=05DvN|)U#>mI1<-;IO@SZw?H2xP7_`f6z|Ni06?h420^7oYd zd6wf}?2X&Yud>;@aUI>DP@N6i!eZGf)2^~Z+)?hEm$MKfc-=auf{cC}>YP)OcL(6G zI}L}DhNzq9+~Mu1S%4hx!1p$Jg%ea74%6y)oMB`X=3SQ}fI4^)k(pQqazDcdZuIqy zr?v}N0k3xNFdrkwYx{>R%fb1pa;Y$+_%Y}ncaE_If{_igxt7=oA+)c%XEc7F63tkf z7HS`z<|Wc&lrS@{T1FYkgy73ptV~0JJ$BbYau+5txxm!*=ZqTiY!(B2p@ptnSNCZi zLiNdo{fVK#0-8&n?C3V~=F3`!PCI8Yg*{(q^Y~t2;m+a9D7X9r@}NT^BmqFwM;NLi**)T^QYJHs;ZVhw#O39T(s1 zHEWKK-1PH@E~y%?{LGm%E9<{3cyXULM#wCFaXfJmmYF=-iGL@L zX}%Wq!7K5g#@Z@nu=V9uiWbL%aYyvHYF1plB=@ii9A@_-oNDooS=TCNLu1r*`nG&(hY+-iuR?Usg~-2b2qA|4MvQB3-JoF@Ljno#27%@@4O3twyA50FHX{k+ zlXki#?!SVlBH%WaR#a?ko`GGRtzx9j_v?h1`poCgpGQ+244g-70&J|)^-!=`G=Ywp z2oPu`DVuzo=E4_Xe`LEvmfV%&oB6@4@n_?38FUA|q2Q3k)&z>Dn&Q5M*uc#GpUfIbcUsKp|o zE)LpHE|$;!_&8Ms$Z#xVRH5Vvd@X7=>nE@|>3S@uGZ+W&nQSnN3`5Jld&vC$3 zX6?%x%WL7>qnFKFH(=K;7}7=)_*}AjAo3Dp4sME7k10T+jE@6Dc;w)&bCFF@q?=r+ z=3~=fMt)_OCqK7-T3UrzXZTw4VaQ}}t3@fut3ReJ>T#iJEoq>P6?#V9T10Xn@I^l< zr)0B7{aHcKnR)ye0k|(c)I58Z6ywlt%GA(E!$W8Kw+oN~d$Mb=UmqA2@ZJ}E!G5L% zmDQ7%yoh90Vr$Oe1IcF2m*3K#SVjJq3qTfG39OtxU=HC}?M#+Akd=gv@j})i)xI6P zc+nzn_u&i&ydHSj6KLOvR%`{uxKU7WcjM2@;oV)C}&z-gx zr2`e+5mtO8{5i!g{ldkIIWX_3ah*0`I28bPzRK_eBR+SGi+^Zwfd~dMtDgd!pzbuC zmlM=fJ+@czZMHBqV`5XYNHu#7p+h+V*@DQbN_9JT7R&2F2cT8TiYS#uVxR3c7Y7q+ z&O0A2f+NWQ&YJ3LZ8! z7r{_Qy@#xGx3{;V8J=6tL{KKeDL%5;0f4t;+d#GkaWkTaIoa8bhAR)nxnKrUyBXHG zE{jftl~QsogbJV$v~TZTyFtO#PtLc3gZ{?n4ojGb^(V)Qq7=Ydt7=T4yBx@Dw3_s)fn)1p{K4af^;DrRdZ?A1q5&N98-OIw{jnt64xMM3!tG? zMsJSu=)#xIaPb+v$MwUQLF|~?Jk82d;8v^erc;y#FTjapH28sFviw4xeo+XD2X{)5 zs3;A;3NYhWxhm=cf5S&RP5!AvdFW)k#&ERr;?;+_3Z^=&z z;r%A&Smp=)!5qTsG*OkhL)C(BVqmimUl&6&^;ArrD87>{WG2gFk1Bi5o(qvTdmC-2 z57KHkdaA(|k1;n;P#ZM7&+9QAH7wdZ+>39n>)ofhes0^AEz47hwC$;4M9p<*GSwJO zWUSR<(Si?N(xLf5XFqY8cC!a+P-}gS*^d}29zYervkkbqV%f6oKrBZALir2Q$#xsz zoArJ&v6Lp@<#u>BG#YMUf_S5B`Q;IXSzE%=HSXYzrM=<*GPP^m2h~SbCmy_5IbG{z z99AS`Qsp|~BY;aeg$KPosZh;7#q5nOym^!5+gEJje@BT7Eo<`w8Y;uU5S`^(S+zXw zj7{|^d6zE0{D0j3$o{^XM4>5C)K|PiFRJ-;HP|(}WpPtI)Lg77HWi|;lW7y@vho+G zWMavnOpd8=n6d-otyTbcZR${THK$7ImGK_N+~agYU;YJz={XPPvO3=l4??8=Vdh>7tL&%iiB2JgJ@Kh7P=Qzz;DbedgMe6zU0)|PwcWnZB zA0h3BzG69oom}O~UzV0A09DL6O4Eh{@^ljwv<Xf zh_qp3v|ucuQVChYP}VkNkFqbNk`_w{+1l*85@XjcM1>)Hi=_q?8ikhM{hasr`_J{e zuFoIu&)jCH*X#LwJkI4fkK;I@bg5>8RC}`YE{Izlg|^7*-=_L+?cmgffxbtK_F?o^ ze~{l+a4B@1ffyRdYB&0;*?F>A1>JJp1w94Lou5HQ87h_b-Abb6S>1-<;MSL|y+VyQ zO}obN-lG6fT9p#0Mm!gM)bJIOsLYYtd;aRRYXar3KI`S?65WjWEb7aO2?m&xpF7tM zmdN^J%n}IBx$4~8PO_ikHS!l>gS(YnWxXo&-J&(ukmVt@*m3v*VtabL_j5orsh|fumF4soc!G2N48n@t5!_V-41Rt%d~&JU=1v1NX1&|SN9;`ei3C!Kb)8!o{U&z`nNp)3s%Rds=740wbgnag?O2~ zosTCH)*ve6<<8?lV}++X%v*3oQs|wD%ohY$F|VMu(BSXnDME?umTXzZr4^9t#Q+EY zrgOoc=`Om}X?{de@X0p>o4Rz4S6wB{IWp^qw!H@SrU(*Ssk&=S+hLf6GLH#PeS<`UH-jN&aAd`FRlkA+4)3y97%K;9CLyesu9ML^!^e=t z8AAdMT-4_u%zl3e&excf+RrcA!v`$BMs9V2mGZ)i#~4O8_Vv|Y|63tAakq>dAu?>> z#Em*XMih5DU(>oWOD&u!TYSWyuQzG7UHUyC3Bc)-(V&h>^4T9x?@tys8%wV8pEH0! z`;cqm$n_*JUJnB{77O0^0r;BACeVl)N!ps4_PatTG{O9>G_*$S^Eg3CZ~DZD>cnr^ zX}#+4G&nMc@w^YBA#$`4clg~s{ANy_I?AHg;hjpwx)DF&&WR0l#R4!bLH*myY>^SJ zNqh8?(n9l&bGo>ut1Dcjo8c%QDE?kkLsi+b=Xg9ac6-#}bJ&h9by6SPN0b2tQeI=Z z0cp-BDxF$V_8{|MQ?iB%Jq|{Qsy%tHkQtp-3FmYZkLU9pM%HvGH4(@g1dE-zo3qk} z10S)wS=ao&SZwi_6q(ax38DuElLha##}PAyTm7$jS?^x?2lH+bgFkCTScyWs@D0E# z8%B4ppPbvFmBstY%C>O66rCaL6S$NK!=4ZSDK1uEAo=q$$HIBy6%h3Qj2RPU>_XFD zRw>lX=Yv24n@d@oiMRq0qn$#Jw~1MS8D?3LpJ&cGwEo|QTWuGwZ)!9kWE{SaUmE&> za8*YSkAzUGXq<8Yw97YTD_rXTf>su+Hk}Xtf6Wd4|KWp%p$a3}9-+6Hd@0)e@%fav zO-<)(<6QroF}1et##23do(^k1C53kgEz(I3YvHP{6K&Sr@XwP0+J{#}Z*^{YA~M*} zC_E^}!@<=qIi$s!syp@T>b4Eu^x;NTZPPEi+x@-YD)s-cD(*(@6#x5upH30Wb=m!L zFpPqW1hdK)1o3yvvd6d{h!XVDJO+xQVNQR_Y}4dtuNhaCwmJ$s{by5_38I}^*iYo06u9Ck2%&!?Efhq?eYNPrKYYXv4)X& zwcZXvZ$#Uc0Up>xX()&p-gSG89{jKNjhaQi720yE4e(@(jJ%;$fR}_Yqi9jjLy~2y zZCn9SadqB1kYZ9(POrLpTASXj!a_UyQ$_*D9nEaN@TS90WA1m0(O^?7qum5fJ0XL} zBFxA5C_XxzER`uORCJ#20Z^vemuXkR#KQ&~56hOyUYCg5V!0yQ#nsPongCl}CP+Oc z?Px*_;&zhT(eO2^SNmbjakrDOFF=REH&POu`h0312C%L|+351Lb+uWec>l@SDFhWw zXKGMBQGPyuailV3!|@2MLzCC7U90kdLg74R0u16pw-dOMW2_d|!^lVxnA@c=yw5H_ zm!zsQ3W;7%ZYVsE5YN7q?l+SE`s+iUcVT@UbY1h)|F9~CR3Pq#iW6P08;?&!%%Pd{ zaq9GkfA!w}Rp+Zl@~td)?cSC~u1Y&mC4r;7D~ z+EiZnnJ0%Q!(0F$pK}#zzbcb|weL`L+@~F5Kg;)NihfriM(5-xb09u6LIp}^L|@eC z;CtWU?qAogb(G8Kx*hC>j^o6+fdG*>*r>igM{MP}j9c-Yyp7%0#s}h|&9)l{8u#X3 zXvq^0=GdJBMx;Ai)y95b*8@3}*nSZI8`l#zMQnS>Ey&u{LEFHfO<-VP_WQEynhptxQ zfJ6r#+&~ua2FzCqLK*p~xee=Fj(*$dGKVd-VrFxD5pAUxN~D;60zYweGXT6VH+m(6 zR^Njp9}1GxycPYR2nh54tXZm4f6$KM3_R0Gmp&ug84Oe+Ln%7Jpb`RD2a2LaT#hzSpp?pZl`;dU@+XYr z4PmHq=G3W79)aE!7-Z?p-8?=Wi(j8XxbQ8S#3}<>;uwdGuPi$pSVjZ{rs@J5W)mZ_ zZ#o72&O{Bd(`aMKk{Z_6v>OK$2W8MCD22DYA)52Yddln%#qZze%vEDv@2PXTdk&u? zgQ_U;(4kCcD;zwT0B^Qo2rUEFA&2mlEe|mKc{=i!_JxoZk8T10c4AfMlq-Qa?ZEXr zJVA588wg9!95disP-OR^PdW>f*zM6OlxjS;!e&u77X}X=gv%AFe7M=YtSluVB%D!t z(ezn6iEq4PorX&`puq7h6J1^HfJSPk!Db%H+FunOe;UQNiv2N{y+(z#PjIEDYAs8Y zwwqvHTSL_#A`j&i_U%mqMkbxI2Sat2{{;SE5!8Qe-Fb(yw@p8)goF-J#?gP-apkO>hkm)2M0JS8fmbLJ2LcLT!9*Xn2Kr((0f5$kbC{+wdEQ zVVKDjzGo&rI(P(Dk#-&)hWUBaAF@;FXRE?D zlhoF&WKpR=sjLgX(3z%{_vGK}0GkCcBPbBRpwn*2!FG^K)`|J+koJjD?c2NtX=Ghn z+K0X{3k9S;pPals)`*9mJs2~WPO4$+?3Oe{7 z(;O3zDMU-lfEs#~RabUtacILRPxM(EG@lZBis^h!DB%;o@$l zrd=sp&1*sHEj=@of>#tej+pj{Lm-SVRZS;#>o>YX8LMf05I<4o$a4~aIB*YN>}6Hh z#@Xln&0g1$eu%j)RJGR1r%A+N6!zBD%%?2+^vM#AN+1y^DsLY{jI3t5bH5TKBmbsB z!_LV0=qY9gpe57Z!l?=k4K4Ujf`s-$Nl7pjh(FFRRadb^*OLvO{Qp2R(Vke^l^crD4UaGd_6pZID%FOlYAX4<{-)1$h&HmsIWGp0NWSY z39h673s_28oN=dW07Yd|mwAi=HsDhF#85zQ5jXF0@_ttgf>Lin=pxEyD+s(mb3ALq z9vUem1a&`z_648e(G2{!VNIv3L#+)6;K0QG;B>~@3|v$Wko^7wa`!SPr+KxSOsb06 zfzfma@}#ZkC5nh}ve9$gbL<}|xrPyFgHH%cq=0xWsPm&AoXVyaSD_i1TWXwy({n-v zOi-wlA8?gyLmRL8*&hHyHAVnl!uj$pr0oslLJ1^)kVwS?u}WYJ;kIRIZOyM_@r;J3Cq?I_F=yBJEczsqE zmNE!={P=Of5A1lAF?)`VO#uR= z5w>^tx{#b~2NlpghYt!UX!YtU^YOcLK|w8HHjAgvi8^j=62CSkQ*dSS&733O#48ul zlHhXg5kweoW||Am>0q*foawm@o$l-%jD?wSDHBbU2O#x@plf0~je?enrXrrLe+@H@ zJ;{I>m0hWlZ)1}wbd8`(xWQ~@N0)~z&lQsDtznz*=%nA|CXG4JKiz%+^?=Z`(pK{X zMP+mJ0ftDuI+5HdRE)dY!MZ3*wX+L`^ptr1eDsC@`adjeMuZ_c{aXso4G`+hd2!A= zzz0+S1?G%nr=%*S>Rmk?i$V$6u8R&+yiy;}9wvNRTnc~#AXtZ(KZ$8G0}2?f%|IO)|)p7I?95-jR!zrifVE?^tW z*;nF!g9hlQO`*N(Cr=7{JR&Ys58`4_PpO$nrAD>=vnO*iL~D^{cPRx?er?7(<0m{) z3vTvy{aZxm3)6vV9XZdK9vPO5q797g^^j7S?>b8}Nqw5SAY++8D6uChDl2mcRl<#$ z^Y4#ubFQ!bv18>S1n+>xCSzJdUV`1)T*EtXWNmtLLOEn)C}t0@c;Zr+z$e%`uj{|sI-xOx$hCcNM4u&C}YJOjtMx@*JjjR z)9ia|(K=hM$WVa5wQFDsJON?uhb>@6DqORTZz+=L8`EzxV|f)1T8I5N+p$Bd**^7x(%YW$*sx$BrINf-}n*R8ocY$&5YW>;j?P zfneD!VCn=PN!80SKc8=}m@2D^`KX#7*U-An*cscr>CE~{9+cWrhBMD%+)~dmD(PCi zH8PS8rgh(t_PL5bg;0^1=A!~~eJS%4mm`eSh5;le z;I6JO6Yr6$BmktFtv*H@cwnEM=jDN4tQc!?$I8_hE;$P;Y^zf&+%EQlQNHvrzt zOuBOjq#yk6vaN`I$}XFEiU5(FgP6h!n-Fe@+vT^Yq4?XHxk7fQbeiJ_TTo2!=DCP! z1so;f;u@?ls+jTUz!u-7{*ps+uvrDnZ;`;x*ZusK3z_F}ikM0Z)8{KKW-w#z{Pz$W zqyW+N5&tn_#dkBN`&4h58=mpiPvmP>EcYy?)GDd0+zKQ2pNaIhK*`yIpS^n3Fw-sr z$bIycFuhIY59UFU2FWpb{*vvf)i)IS+aH<+-bCWbKjs|0 zg#Sk*uUWAIUjo?)RdN-^zkK7jCs8JghSBW+3P4y{E?Y_fF(g@RsQ`%m!sy!@J@T|$ z^q*enjAObF2D9F{nP6dyNjY^`&X66VYzzoEd`l*eorkQ=ZFv0hrMczn0T9dqan_E3 zH?zBYs^+@(jIMqd@LXhZ-?OP0fN@Ui7O;iQueNgfj%L!G(&b$I2_D+1_f>4>IM0k0 zqHS;Ei38o`$fWUV3)D~ipmd=fU~KuKs*K0&Kt?d zo6fXm4vhlKOAYs${H%zq%7Z~cp_9Ly6OHkBgqFh~>m_0Iif-+`o*4>3Y*rf=l)QK< z=+)HI1nF6LD5#1-h!ha1ePFQ6De#^@zcr^_*S4jyvgY{)$aPq&Y@jSg&SLU?6ouD| z9?xF9@CH=Q>dn9vzvDdxf1g96aoC-}#)8K3sltHgXK2pIc+*;ci{8%Pr!Ykzqo*Tb0<`aF5(np!&ia`*) zL?DW)rN$APelr5n7$kn2+$b~e3+!$L=iRjL7V;aok0Z-FLe}hlQW_)Lk{+IA-IB?*;FvtE{UuK{NlVL{{*pID~Q`k?BbhoJHyW``xNexDss?*46({vK{ z6X&Ds!a`qPTlAsr!V63zny*6sMp6FtCi0-;=i0Px9Re?t21!K$zlBlLACP>$upPBu zD_z5H6adgLJZ&+!3`W0sUXlGLT3j5{Xw}xOGhqsz(5H_Y!qmpy$7|7|wiF`&tefTI zbAI8Ae!+thA8DH(Dc{}!TaxLe{HQP3AEx}Ejc|fkZ>>bi_E}tl{$aLG=oe(~!#Uv1 zJb5|dz?zWD7BcE3?N2feCjE+6Nd7#Oz;Mld-%=3BcH86xd*9;hDx&HIjms+Lpf4ty z%V1>r7C5$SwLLXxJLF7|Mu%mw9Clik~P7=uzw%^DqpKp=BcVZHVt0Y~E}jv|Q@obImmVt71E$yDjQ$K@uJJKIDuSF*mVZWEkhAEe^x%p<7 zmz2zk+h9o7tU4q-vA=Q6>vG{TGj{5pE z)+$5{v&+=@)&!IL@9JB$IDt}OZAnuN`6Fnr(AeZ}rp>sgHUIIGICZ+hc zRw(9Fn#2>#BnR-u3XbwUFd&R;=KDV;-<_N6+ENhUMF4Aq3=BdW^T5hrv(*eYVpZ@! zL&KoKlQ5xy#gB-%6`~c(XsJh8w`;waXOQ=s__Py7AFpkxq-5Bqk2X`&nnZ#mtj^-~ z8pAZR;^?Y(}d8uc7;? zYino4Oec-Vh-!(~={*)(fd|ku%o$e6ixQ)WG-p^DxF!{*W3sT=6ZNgBfoN{Wlq zB8F#_fB5hr6Cp`Xet!PgB=@V~x*MA>2JoN}%TeGrNFd9k^_T$A#!FYE8|Ssob8kaA6w|mDeHczFg|> zuWFCG0!P@fn%}*NbA>E8<)k$RTD%s`EeTZMKlUO8R8dp&Ir$B|8}i-JVS<32`mpg4 zo+61~z#x0CUAxwIqEd6yqs1hm3<_Sb@&xlEG{0iVRWhD3RaT-me4*_?ffEtd!_>3| zKQ^Y5N{I31>GVi&HDW72S5%~vqTGM{ILfD)-!t3tl+&m_IB5m<0Wz}yBK8)3LmXX>4|6OU~+VK6IenA*gWDsCw3>O!p zsXe*QXl$`6-9KMAVMv2CCUzhe00HSMS~gC$@Wci#onFXviYMk}50igj?7a_O@*d{?Zj2_gH~*H`HS8JX5`#%VwH(d=y@aQg>HJg#@XY?#gQj!E|dyOJXdw| zWse?{Bej!aU)*(A*tnm4|2KD6q0H;L|0oJR>b~vM|Ig;qD?_Otw(^y~erla$8*~af z$*9dfxT85PTh@0(ES+KraC%C7&)HE1pDv|UAqU;}x@KCSw{I6!-Y;pn9|-a9Z_Np2H?qVcaJnU z>jH3ED|W;SL@NLTJ(2vnkAsR10=>P6lrOR6^9TwTE|$rsRMb-;H^9t_ii#|v)&N$r z??%g+%rPRs1geSE7MARPiqcP3)%`ZGUQabhHAa zE5%)KqM0i_s&jO5G9pa#30rkpznG9|fJ$)vk9X~r#l9wu7Dw+yr0Zh)ju)4dImvdUS{WfD zW0>~_s)6b3Jo34yB%v@QG>hW^k!LTvQ%@FGR*nhj`P^P*^jN})boBwW%gOwdV9UgV zIu(P^fR?ibUhfj4s_0q7?QFV5@{^sn*%W`(e*LVhG#OVZsq$j}^ZGe30~1HQ4&iDuGWn)JV$z4{HJqB@)G;cUI|Y%uEoj4)&UhU@*1tVvo@}wO z1_1caM6BpC*z~*{rK0v{Mk!7G{kOaA0nVTed7;aHoZs;EUT-0Bw5g;n8t@e^=D$Kz zCza=h$z+7V{>zzMZF_i42k&BCfLh#c^ir6n4CAld-qvY=_=8I*P#Va+135gbyOe?8 z>FcuF4JyYb+{p{~;vXT4N_XXgA+M|{*Vyuo>1N^U*JH*>k*vHyvA&mkN~rk<@gIfU zXFer0JbfC)uQ*j39%Al^1=s0L25h(ywUfUIMG^YdUB!b@kWeao=u3E#4WKcB;cImJ z^<_jt`GHr$F>9^Q2@XM|?v}h5MH?QD0HuFs$yNh^lL?)?AXG(`FC9;}QhE-6V*yM;i8azUvpvpVcucKGh;!-u6= z^?yUrZUksxHxjZpw8pvH?1v3Q@_2tfwMPhG%!IV0C-w?+xf8%VVIKlSyUEH7#86jW zlS(#FN?9v{h~MU-mk(o)xl|BV@c?01c3yOt90M->?E__k6D3B79Ze5bIA1%l(M(o} z`g~;ZNB<$UQ!ib_QBO%o9ds~PCW$@;C1&W%ZQHgTt0H*Wq0Sm{6#^dBM8W%r)YQv4 zAX9L>(xFhV8F#+WDCi|!F2mIxe{di>-Wp_M(}}Q^Hyd{3`D2&ePGO{GH+}lS_0M1s zl0jyTyFfdES*1n|b0S?aQ-E_4vPL7iRl-yj;jGUq{hM@g5tfcEBhY6*DHaaFoy*Gq z>rsNzj5YX?2Cm1GY{;Sica-&i|F)@1%=4n6are$+w~-e&cz;C<hv^OtGsY4+C`cSE3XA|Lq#eDoDq&W-dBxkE+$(E;Q`bDO&VYnbnm(a)Qmy#rv&( zL1a1Av+Wu6#cWr~#7M;O9Zc~4*pSE`fLCAFv8IZUCu=R{g5)J2&WUqHqL3Et>d5LSjs>JaUR5H zWG~7Slfrui;4n1enm0C2pEga;HhiwR(Q&p3XEKVPyI|ubH(yIG3zh#y9@)sl5yI(B z%-#*F_~^$sO^tsjx9yZ`R?UE#3tj5I7$CzkQ{?8byNw48+P_s+e4v6;OO??Ht@qMf z|5m-}LczJ+S(W0q3mA{4{|EFMD)xBE3%GOeALdvlH3#4rd>Z^)>Rocx+Dgbej2{nw zgOBe(7XVPTcOSYHu@7k;x_DC%m$)V1?`{6SyE+<~b?0EaffY4Ub$HLcD>O@wC&~q&fY(5AWW|>aDl7 zhz$o#5FZpS6pVnkag;N%OytT?8bn!VyT+U53xENY(!<^R8J`xJyAJRADHS9bc58l+ zozmBgJtPBTjD&4{Wo(6hzKRW^df4(Mj(&w!glXs9H~d@-6(SvUt`I>9Kr6p9w|n+x z5bK^K(6G{Q!@an{Ys6eU{x6o-h=cp{SMlqAs&ddwqB**m#CQFd;?zXn$d5Yxw=gtO z3aR>lenDJv+`IXBf$sVSo;yp9jwr=4RA^l*!a|lj4^!+2bDB=^Yo1J~0as2>5C`R` zzK(AS$yw1s&){D(kP5>4*>9|f9yr)0`m**3;3VdGVB*Kd<(lIGZ5#HYy+93{f5Vd% zF7Rqaf!&V&!2`6W=34M(jXA4IGByJCdMMyL7sdSRt|QJS&`(CKndR#n&MN1lzgP;Y z3GkhRWD%iw!5 z=AY^ndz*r@?f^yjHu7-vr_*Q8<^T#{pcEOn?nUJt++`pdDD`E^pDEmFef>MS=Fr*g zNj>@oh2tjqUWOSB-PF|bp(U|oS1!ewQ2GAVhG_+gaIE|f1`T|I1?RpY%YJ!%l@=wv zj~h8r{A7omm#e6RETQ zJwPns^+m_UjrmXqCqgKBo9da;vbSSa^52wuBxt)fv6$mKR`H9)orD6w{h7I$h3cya zjt|u*o_r57LMVgnNnjd}z+x${a)woX{Med!Z&gNP$@b%}`i`D1j!N-+_H;{J#;mjr zy<)`z6w;9zv#FGKSk zuoa>MG<&n2vf4@puoigwVKUxfT1U&iPD+lHRkBKIoIi<}vSPei?^csRoK@G#gpPEl zdo;YFn_>}6nvP(q#<7d%&(DMjKcadePjq=d+JOY8aSFkl*{v<$?~3vRT1E>Yi8ml7 z5w5(>U(8qBc$x>N^N}stEjVWWqrd+a8;Pf9+3^KJxUZf)enS|xJ&1Tmkph!rXLCzXIE=V?YW87Z)(P5a!i;I)eSq2BUa(R&3QwpnolMQl1bTA1#qLEOk(x|a zqmTvslZj+u6Q$GZoxFee&{vv5AN&M;Gf~HkU|BEM8yk z``_{Apaovr6yH=Y>es`gC!(;pcSL5G5PXzQ2A_dm(O(uZd?$mC;7q^s7h?z^e7kJm z5w$fKn3Xr$%gYG7PXFbf_}xNF0AN(fnHsA#~pT}qCfs#9?jBreE@iR%cqLY<<~ z=d4?@B9_k4zAm@WqXetDt~6V)i=H1p4b!70oVagS*dBqdGrZYd(VU?rI2(n8g*@w) z^@oHq#Im%^v+i|+G4LNXWIVZ?yx`i@5xO}%jOqc6khEl%=2Y*74?~;8T2NLI}v+s!*U9@8Tb%N)ZKq+ZU2 zmwF9-udZ+8?&XRxkyQ+a(?nM0@)UwWgsul=p6HYHw416k+tuX1M7Al(q;7=mcw;ohi zQ!&A2ZC!K+Z_CRiQ_nXpT=Gc9&EU=R#>kpo#Dl`QYWjJp!4a@jvip0H3No$}hvUnRA62AGcbvg~WdQR_?_v}rHPz0Hgd0*f}_uN~+u!IiXS|vF(RnV6SADE{} z^=q)R+`7AM6_G9()?H^^FdsTC>sA3vmu3KFiqB8;ZqK8`18({nbbUBPO4YAljS^l! z7ohaJ-OrM$O|rw!zO=iLe_aa2_l4lxXDiTK|CQB-fVq0_pzmZtu*vC(%}kFk0)Cy3 zdu67Ue>ReZ%&fwKD>g@tH-Fqdfk3iXlf>AI+6&^v32REQwdT|d?>RDMV3PABHk#M=GX#OUh9AxyPA z03FLgAJ&YIyko@|c;8ppSfWE%HlL=E_@#F`V*C1}nWaDnU$~IU1HjA!gaWbP+t*&h zCQhB&8qHHm^sUC9J=w-^?_VI70iwxPz=p<8I#BL62wxQX*`k%lM+AjB=fhCE?+7T;w@~z z_3}TXEa$>4c0oHNtW!Fvy=Tw4;LDnJ!Y!swjomIPXe^KO(thvJBVuW@s|CbBU0t_tpGYR$a)2H98sRM;O_Tu-TZP6p@>&mu?VOQ;b02DNcxt-x$ zdk_oiDvrg+?wv}Gg`?u~hQU|_IE`i+X!xVQWI#%>Cn)lrZk{bS9^4ug1vH>`=MV^) z#Hl>7_|Y4uItah`sfa9g6z_xh1&!^#LI8qO`HkmRF*49&k@4O~fla^Olp|xd``B)1 zeGoOq1T$tkL9kGtPp72?Hs77U?(_4DvOkg1GS+V2(7++g>vH{#@m&T`P^_ny^5QY2 zwt!kDmI=C9)c-&-UTI>Y0ugKW0375ZzzeJul1X>v*DYl$1AQDWar6X*m^}IEig7;;V%E1j^2V;> z|M`z%HAA9sDUs6;oTRxW1}+sj6KVWh7sSZRs~77>*~4lo2! z+~rrey=K==Pk}&yB4gt~PB#L2IYfmm?s$n5*?|f1oJXDnQqcL9K%D{AuwNjfdQRzX zWMrRo?l43wh86A}w973(vFGi9V!#rx(Z_bd8=z!Ryi2rZF-Uw%H@>3;YVh{jao{N= z)!4R!fPMs0&%A(_0O@_nlk>3(CJ)D;pxVHSyJj=VW9X8ubdh7vQ?b72>Eq*&VAN+I z!wGWp;b9M;=q^}rI?Lf0_-o}khMc7w0L^bvbdm1mmvD4&b91v;hABlboz{7S3GjjN z#j-D}dv~vWz={>d&>ueVHJTHKeOCWViGTduxo)v&!&4lzsyPb&XKQ)0)n(g;W?-fJ z;Uv5Bi0F^KhOVwrAt1Wk!fzuD6T$pDaC~@xmzfge?~C1MRykG4&Hlj`Ubli_cZAA5 zQ5UI+O6Um%zTLVS%A{~V@cX0oQ6^(gG3up{q;yWt)l)@_2j5#sU)Ed@aPxO@l%{Tl ztH;n<5%5B7HA5zWt_+>@T)tFMC;(%6c?dy(nSYAjZNN4PX$Pic`g)=w;!k{VXDy%# zY=&5;1&bHY>5@&3W#lNJ#IZa(-x8&A?Kh?~2!pb;He?25h6i)o|C$A=C!E6#3$U^j zO~f^EvtZxQt5Rw|b#{tfaYpVX5kx&1e+yExRw%vGxNdPfb_6s0@q!AUx}ESyfulv$ zw|L-5a3MjpK8hZI%zUA{B~IQ{vLuT_?ZDW%G@M1slYR`R=84R zHA_?s|9rrYJd$Is84Ex((s^I#X}eq*B|$4;i96FwTJZ6&#Soc7Xr!3r3aL&p9MsvC zg<`g=S;__fD2vE<5&z4AAYBl5fen#cm%z`_6-ji#DtT+_g2&sd&Fo-ZL^#> z*dG2(Te=MG%$_UHFMiOxonsj2c=93Yetd}6kESb8;=$wy+rUU>8L+WNr4IS^eGvGU zfJEcIN`7TQ>z5nlPkVl^lLixMLjqKLeL_n;VRp1~NBV>!5+a)HW^~Mzmi$Zd;P}R6 zm{X~`KPImizI>Tib2H5+V^gGZ{hRMQnl25ViHmt-{kO5YOR)e3`&9vttJ?$r{aGH1 z>*+G0M+~AGLv5HsJG*H$ zmTVT*)`yl{N3TGFHFJGudh*N0<}D2i*Nod7rS)my|8@e6=`{jojSy43AQtrR%4AI-B?~J`{a^N6+EWel?NzGAHNSlqiZZnF&v!Tal zLNZKxdQfj~x08<@xAjTS;XHWpaYW>Q@B`Fcd^%mKmr*G6TR(R6=#4~HdfLkaU|TY_ za2U?lRw8SxKSte15-)1kiz@QxA-V!uMj>$(=QI2SVBh62GkZ;9$=BN2+a%JJ@lY*L zQyhz4n;YF#)1`v=)3sZ-;TG?({RF=#RM@m>Qzj-en}g7PTw3r;wG3-b zwMwQ847wZ)yqNbAdCC{?CS*;{!bh~|f>{h+kMjxl_bVfx!N+Xr#_dR7Jb9%j^#6qT zEUsRWhOPEg)o$YGiHu=)8W`PevPELotKz|P>eSqDDF7k94tXl4oa*E8fHry-3bMr3 zQrx#0k2oCq$uL<-S^4V$XhoUqB5&r&JH~z6PGKddQS9N&gPE#uw^e_W<0m0%YXx5e zYlKl&p9h z5<1;VP1S~!WFDh^BY5LNaqLlS-dy!J@EcikoZ?TVvcUQGq8(^Bib;QJ3iQmfbX9pX zu&4QNIRM1o$*vFZLc1u)c>t?pi~C@f{~vkh1AAC~tE@az^bbLKz)5?dM` zbfFm~p)oVzr*0T70{!mj(eQ51%-9o}1`aT3j-Oe}m*lzKnew?Pxj9xEWoruk#ROTc zlC-8|oivPyP8k|_MzAvIt6_#-%mC9R04d7F2@UO=|?>bB*` z4ugfMzmaLM7mu{2ELcZpuMf4dlv9n!)g#)}&`{CVs?bLBmMlXS(pQs&&d#y54a$1# zqCv@_esQRgtteyIrJftVf{@7^;MpQqi|%e;=@!V=;>lGv8BNHek)+@>r0V*Ke1hb@ zp>A1HawAzddd;b6&lYB;8+-9*87;w#;c4YQr%c`VSui(N@SgP%L?YQTsj z6~{gcbDPxNu%m20*;E&^$G8vbk| z+xVPk`x2bKwiN#h*h$c_ClDm0+CE{l#90JP)mBQe?TZHi9AgUQ(*`UgiVzUoD&1N2 z$;btLw!h!HWy`FS!&8S>Fv1Xzg-qk|0Y*Ak(bG(tF8*u$JnUyZbbJ}X0?d?m!bk5s zh)sujJmUKF>$mg*%S^;@YJQKS5Amh2{L?V*itmt5aMFGhpms=SH=68f9{3bW&xnZ; zi;B)Sqxv%II0zaIWoPxb&!0ZYIIdrWuA@3#i_a&3!pprWpu|dnp6TllP$PAyQw6J7 zO-Ebrw_ty)WAC&Ih;RbrS4(tU+wgOjQ5C`Kfg9DXOu>O#T>)SQ|3I4$Wc#fHCN<&M z89G(N_LVMTr@HIq|`|-tfAbAHtT+B^XBRw5c$fezXNvjwKwo3LKOlbgDUlcUm(^G9lv$K zBAZhKF74$Zi%rP(ciXZvD45SZ@TBmVFpf6=seWihi%jfdLP$1>VdNiNLMD0T$D^B` ze+^K){GWfmUZASk1dc9?WYrGfzfP%T8#Hs=I9XZm*GoWEn!O==gh|FlR%`6d?opw* zz799vTGU&-BmhPD{43jv9~C({CE9+H;rpz>)=t@{Kn{FXgaikxYiOiLETvMR9`L@{ zfXYLbzx$h-y^0w50j^vzn2_eK0<>#6BttzzfieF+b?)3Y;O*;JZ+N8^6UZ~~et;Rk zBVV#IiogaoVH&mpMn|C&+AIy>sgVboJA-p@;nn*v8OnJ5I&RJl2+v}#Jin%~VU1>M znYa+kWr^bRc!J!pnKR?(ewNyqj@bN(5RM^3>-$vUs|*)du5)@mFlP0?A1B|uh1muQ zh0F2aH_41SQ+CMFWDHaH`puh}Nh>b^iZb9(%c5BX|4N^Jwd> zFceNNgS#R__2mqHA|W4KD0e>4UL#!Wc(E~jJ4F^&@t?dj0x0je+E<46#>Eo=z-tRN zMH1c#K7WEAG8o3lE_?95neB#ppyg0HB2y)37N)UNP6&TtHJ@r57QwM zfxKuQY)f_HyCR3f<3~JXxCb3Yr_^J?}C% zP+j@emX_Q1yF$1|PhHkbz;F@ng&g?blCz_=9hpLtUH|zEDN1}*k4^wm6kqv8kzyRo zD?IEEN6{ACL#nTunb%iJGjtU8$VtNZ{XQG0U8yD;N4+r1%1gQ*G>KTw^ zj?K%LUY}x}Hx3v$P{0$)`Nx-@6%|SU^dGa9e#~?AdL}1sH&*v;w~!VQIKcFM@QQ$d z>Y#O}Hmu>O{%VW}PKM?M!|r^<6)wK;m8hAb#BmG#%CM+~M>{Le6nr`1-m& z>`%3k(tR-!I=YgLK*g+SX)}(2h7f9P;MV~7hsmWcZ`q=S9ZHNII%Jbi&o4%uNtJbo zbgvP?ofv8be<1)+8)_a<9lXbR4@pRzMi*$9Iw8lutEqnM@L{;tF+n%+LLs8eDK0B1 zX$F*`xC>qwSITl3F*O3!*^CQJsvH)-qK1nYa)hWTN|)|S?=hq|$zUU>jQS!YOXD;b z9tj<1!#+}KU!|@LeY5EA>rU_Aw@=vS9rh7Mhl#uOXHwW>wW(95PMGm!;V4V~ffrt& zPqfz&U3LH3+S(8Q;!plzSJ^CEXXqeZ$Hn8~!?zo`0-R;; zUyzRmw5pzJaEf52+@^!WhzOKE*ir2L{Thj3$lAKJUuQv>AjP$*A@)laGR(o717P*& z1Rlt6aO=-cv7J&K6FO|D|9drfG!-U|4OM}%uDz+JUc;LiGzTA|#esEe8{OJ*l+}18 z_~-w@ias>^3aMz5L3`{-L3Y_(_;GN8QkOE}lUlKJ2DrSKhLnp*z>vU3^R7F8W99gu zv-G(701e&>OL51e%3pfM;nB*%(sN7-DM9u6EzoP71}vL^7UT10SW!i{fATqy+1SG+ zQ4M3hY=mE3X{Rmtq4lZ|I|T8@-8Be@0U~$wG^7CH28J!^*ezeEY*m9nt0Taig~Ziw zRM59T<|CYNIdrxC_X)cW}tT0|zK;%BbWtFLt%JH)*vU+(2~-73F_0{9v+M7Wc3Fc2;TJ z28e((l5qjD`eKn8r{9cD5C7GP@gkk9$^%>ogJ|+KUgq!X+nQoUX>*e82<=|n2|Knc z9lk4I%(8zb9<7B~bw6oj*D5|*Fq7_NpYN8UN@t<){YK;&hJFm)@2YxO6lpfQhJoe<=(;4jk0pM*||~4NSM<_-iz6 zyYWZ6+pDWyhgH40x&K(NsYx;RVdEa!_Hi9?c!$}95Vb^AN7Js_zm1RSI?H#5`}Syu zg|9|yw$UEF@9>C2Vc#0c1`qjAay2l2P<^dUVA1W8{+Ilwzxil=dfC~F5B$%7Bz`N# z%+3q0=YepTeIgZhS_YanjFO;>8B}ZrqVKv^JiE}XrluzM9Q|F_C6mMFh}SMP#Z6f> zz|FfexC3i%n3?8;SR*jU9&*QnFSmZJf!-!-8y?6WDK6fzTmX)V_CBeo)#1LU12R2W z%z#*yav!H4k$p9J>Y&5klP!|1}{M(z^G?Yf01N0=u=TW|8g4YlD({U|FqR=C%Z8IaS-55+_ zRr1v#%f$T4HwGH!Zs&vMz?usHdUT!JEjx}R-&WTWo z4#2NB(3weLPjUYdn&Z=1@SA}Xb`|aCGz|PmY7)a09&A9IkV|2C{CMeA7m^#QI=p-6 zI8utpE2dveNePD5?vS>M^S^YGD0%2J#;11#;3ul6ewhI#!Q^b!AJigUfmbX;m{?}1 zNAPMvcq5AIIV6{Hr@hb$#|JTu87rJ4DwF&8?A*Ui zz4aF6&)RRZDkV$%#PqrW6z!`R%kT`@6aV;@BA2>O<8g-x{YoW z5rRkiE&ug>A-u1!51+|;bu2p7(q@VPXDN*sD1r%2h5|#f`ZX}nX(*Z#HS@-QWIm4E zy)DO3AL7yb=Pa!q8IUZkiD+f+&H$v_^jr9eK2qPL39A@N<=UZNs_YfG4gN&3cCd=0 zV{K}F)h>QsDM8xA#3AE7P56G>)no}V1rORs0RgS*WR>H)u|Wf<7l4JVyKCR-sZdJW zK-$>~_Of%7={hKNw*j|KHFjw!qla+sDIP8+K38zqE~M?*bx{OSgsL2 zR^^RiO$F90&UFknBv0|BDPm10U*S)QP34;NJdBQDOnQTsn_t=ef1sUHJCja6nql!| zeXG|fqZ83bf30{egkIcJJPiFcSqNV{wb|ckbWMX67a9 z;#6*c*HDtbeOFUm9fZWpwUHwu_EniEc_R4Z)d{_(q^by`c=rgO=de_T`^`q9M4HWs zNm$G6J7m$KztE~++x!qmGWXe9+oQ%Ef=sc!&Dj>+#gv`?$IT2=6DMp@;#W~+YyP&3 zqLC)swJwFLZ9eU?I^Q?X24*7Lw6^FErYDFPVM%Y6!{@13nH4*@*+_YJRExj$r@ro* z*n!OV2MfsEr#yukOfxIwrQK%{c8HeWm$|zVM`PMAP-;;@6t72+$#BmRWJpL$V?L>y zrbE6I$A@!O5HV@hEu(t<9_5$AWpDx=g=tG~b@1S`=@`$2wQ^XJh>57vF>w1pdm1|~ z-?U#uT?A)e{cq>wNQJWJHnkx}`R2F7PuHQ+zu(#iRw1y(mlX`+{Ae9!PlYoi`#L)O zfN?}gJh3!5;)$ym55T$*)0`Wt*(t-2A+`t6?Kkd7&Ya&lFdr0_%*gUn+U`2W#U+%X zRaWjyxEvj>fk%kk=UEY=NMl$B@o1Lfq)yejLbt{6e7A+w=VJ%CAVyHeIVdL1VyO;c z=|6{3X{TF#2)#R@|80Q@=mY8S!z1; zBNYeyunjbTlnd6sx~4cK<(;M|1)x^kMe{GhulJaU37EUP3?C+GO(KI7e2gY+-@V(+ zbFn-}hBrw_y@JLbPjNh7_a;F$a#0z1sT&`id9mpquJ46dj_yXt9GiBBj|{&2k+>-C zcKK^D9J(3UG$#2UN;=fqQ4Gr_ou^eP+U`wGw!qDa0=_PeLRedOHioe|7rVUapuZx` zapM2w0_cx8Sp+m?!Lc*b0tk>aR`lY9b#V;Yo(YD*N?1Mw7pG!miqZvWSRmx{Vpgcu zmPpnApYN28VB%d-lsQJ;r8^h%@X9lON%rTvcL;8(|1lwBQtOLfAp3D{O_MJPevy8k zHV49FTlO$N7pzGn8Ais<)E%O)F`%I&f7p@?qD5D^Cvo6YXN>3aBQyz}kERD;Jvj2@k%y@FKK2Cr$YWlRI11(2Kt3 zv@Ydd2~n@68wA53OyFyIwI`r|6~CG4?p^|h;3D%uzkM^v>ES1rkG1~qgO;n*TP^?O zraX_4g|wu!OBFEnWEk3c;@9wpi%3U;PFs~cY8HTGTYEa;w8ZxcA-eI22X5NIw(Ri ziTFN%$v`Fr*<26yy@ilS6?J$@f_z?w2J{?U5h|Q*0bX#5no-i(mVH0|t z)|7`fGwGUc!+F+EU=*3+V18VaU!BR-Ulj+ruxROgnC((@!>k`^*nqmoCb029@r?fM zy54*aVN512Da$})_`mXrY(k0TJ;_O2u&nZ>K8A2c&Ij2>C+wvLgQ4rMJ4EA}vyDIV z{cavB903ZR7O(}5;BFe2kK-di`*mZlS4t`xXT%B9v12WYicul?3+b0nDu-FEi|0&omx$Af+jdQDKlyb zSir$W&hWIUZm_4g`bNjckKMMiVcoTKCQ(Ihr#&UrXee<6`+uOvh3OaM3Ra5_sI%ln%TeT zrcMJa%(q723GqB#JV!FKDZH^}MWx{O=uAusQ^qKaNbe!bgRZ@l_A_&7vMs;`SN;i}Jbn*Hx^{`3%Vp6UkTe7eS$hHzuM%WRCIeZy5 zj~w}Z*k(h{q3NqF&MS3@ti_v&<7WpyuebLXMzHbtZk zD-~=q-hNob1t67*&*7j?tSN-E?V8K_p$Lv5WI3%qVjs?ZI~=33=uF*9vJ4UxlU`=1 z8v(`N{?q#Axq{E(Z*O9+>MfWBbR#Aac?YCoX4EQe-@d3?wd6PO_sPicz0VJs5%U!b0e`}XKPu_ zEj**!w}&gbp!WLWPmP|$eRDs|TG-PwZcMRPg}0OQGzO1AMkb+QMKt!+x1Z!BjPZEiwNZf0=0Y^%t;&F?zFRA?4?~ z)s5pI2%2&TF3G#cJ4i&ObW75C8viZMg6Nv^8qp@V1@4-qJIv+b+&9allHtC2_^rT} z9HVEA>qy3AvAPmDvae>Rb3hy3D6vnchOa+fdL0g)NO<}zFf#sqwVpCcN?aktMKN|0 z_upf7FDMeS2V&_1_yLfvf3JK6plzb;-GAUfFQ99aPh%M^F*IEp1}UAU^_$J^UAwwM zyP@$pqA(nop4fkEr@DKml4^z?B%e-Yr55 za?6&F-Y=ke244?Y&yDExwP1+o|qFpkitk z#8{ziJB$&T465#Vj_5ZxBf|IV&80SdTu%uh0wm~Cd(S^)_cK)RIvEehrltH%z-`Ly zI=kH83}_1tzhW_(GcoBq8%3z4$*gEC%Zu3As&D~MYu?11;$=;Z>tOg5Q^VLi<+Y^K zA%r^$zYbVq(g25~@j)y2^y0wKy>;T=qN#<~CVq0nBVko0@ruooIv^KOG|ct$IgShB z#g2+=ZgI*2L`aNi=wRy);$W2a{P{jZFWO^a;Fyi#1_P#gJM@0|CgVhZ%mPrdx=aVw zHiimiIGLgzAB*~R8UsYuw>?zvj#);10p6Fi+qs-u`->67y=a6 zd3K_jcFtE~!AkbB9{kLI>%LplH4q#cSDn3mm#$bboacLDHWQ|v{GCi^`#vMvC)7X? z-F!K);pawzvLO`h2Tzy;!MU-ikJ9ax!H;ttH_p}%z40RMMNAcDJHb4{3vaR1H8U|Y zvr3tUCZwOvYd<~H!aL(- z&yqAHJIdtFBGlRKOT(-;biGY}g5Im&!b=skTOAUEf+(Pj71RQfqqd*4cmRh%Ge(hW zHLe1lO;^6~-UoPR?D)FTnBLU*P$QS1j3!&tJqYNr@-t&#*>C$=b;(SJ{TdKCVAyA% zgVx_et{K(GMYhpUl&LIe;iff|y;BI}<43H}T7ab!mrjM_*!T zPJSvSjo8Y7VdsTW{bu0R=k@6g9f13yzmk(Ta9ylVk(FXWkKPDqU4%?|!)WA;!1}kZ z8R+PU74h!qZ09UWIW33!q0I`vCrrRz%VP~aqO96A8ClC6OT7otyc_4PB9>`IGN|y` zU%)SIn04-U65awVfb%=km~(X>sQsrUHnobKiCD7(T3kFuI0w0F*~5iTnTe4vQswQZ zjf%9q+h*VR$x26Af=Llvokv~l$>G%g*y_l-HHEH>skF3lvEf-8Ei0uo5&t*$FK7-9 z6#a|Hh`CO&M7x(%u0l_K{uiMgLS!qM&R^<>M;9&K^~vPYx7Fdmao&U(!EF}8QMoao zyRAwm*lVgjybMdYEy3$7)5n-Ba@gTqS1j3q^ADJymi5~Mo3-v=lTA=x7&5Qe%JNKbvwWFb<*eae!ZWs z=ku|i>fadeB?_(b869~HySq0|qNAmN=|!t}^M)BOdBg{j^@>F=_=f|O--GW6&sCJ& zA>X&q7rBE%A8hvy%z@{4u+5UfGeg}cEJPqgN9NiC5LZIfB>+A_bW^f#s5+N_BMjfY z0pN$hka=fM@cRsRa%wjC2|`8Q#vYcT-LQraV&6h5bmj*Ki{A+wLhwf>RY-CSMCkt2P(39KdAG;cG}dBoP*r z=KmS;|F6lvp=rRl{Hm2yTyYYBG8D$5Gm+f_2tPb8D>C(dyE-*>u^tk0SlL^8xd3-N zE8d&nAEk$kIbz!OXS2<5y51(uFJO3Zc$xOic#*fOXKtK9{%6@B1ia|745NYwN?x7G zg)fvIs3)g)2s74Q$Dm32Q%!le#*00l(;USoGYUU=|`Pqt)TgLo<$3kEk4t2#I@Lu z@#oorko(mC03{*1{?3i-r9p%xH3`ZxUUyY`DLMM1-4 zG|Ptd*FYVW-8uLRo?rLhe}nAVy(AoXMtCD#7hgKS!h$bMRh1#3Cf+<-pIDfX7wPEZVP=;Jn7G><& zv&VJmEQBsMmCWIWXxv0W_I=#UnfvT^v;G2ki7eau{0HaV8_ir%7Fs=DU*9UfW#3-^A$5r6k61zF$1Va=7OKt@^U-`m?)b8T%JwAF>Th5 z@C>e%Fe*baXPn{5o*8@pF$6e+{p*d6t|Q3@bkU2SS)|4Q#ME-8?kP;c7x=8e$6)`m ziSwrm6IaBP?>^??q(WfK0^^jqzO`E>q4t z)yWn5%8lv!8Uwr<*1p54#qT{|Z206bX9tMT_Cr02bkQ@C*xmAqTC@)n-M5q_BqgqM zR@asMy9#oEJJ2K?oJjYkP6eM^C6hqqi_1G#p9cab?fTVBY@^Xi54b-~Y#iLaB4Gr_ zRA(SeoT*RQ2d%4I(=+?xhhJwdwAq83v;$4umCFTc~omx z5_-+d%)%F;(9~?*df3Kkbehmvag|dD<7*TOh@Hr^0m<39$0-O(nUYS33K}Y*V8|Ch zu=Y8Us)sV61hKcv{BY$$iQ~QehmDOcEz9(0KY#uyTJT1;#9nSDYRmgNcFquHc&q3( zx4UWT(26bBzsaD)8rTquw8>n$O**&6^1&J&pex%lCV$BBfp6ZkpJW50>2^2!uBKMI ziQ2n`rMhGdq_EI!?%!E0S2L?Y)G~Z`6)22by)-PoB3@~Y`2Uz;T5y#Kdwxk4 zGvBf%#-RtCY@CLs#)TYf0a~dsp8wdHz8zZblDngI%x!wX6odiultB^i>|06y|D_Ti4=QpBtsuuH6vCA%(5;FHYVQ25z3fcb=<%>EcBm@SgW( zwd)KM5QJbnXiAuKu5)+gVM%FM$8>IXpA^9`=7M>tAQvpNbi6MvNvJogt*=pVK*^jO z4kY$-gBLAWKtOs6N@>H8?0?QLnmn(D&FRe`zWR@b0GW<5x=y5~l7DigbN*^quM@$+ z!6__-FVcqU7M)v5a@OSLQ9OniAF*ju5X&a5_@;7bSEuAivMT@esNt`ej(BUESpbTL z0`@mi2%6I)>J2m%o6nu^rHCrc?GW5&;*hWheb^N?M`F(PIznSY6m76erkw^d^Zg!r zdX(~Q#@M;V1K5B>-Af|j`@L!Z<|^&_Vgr!GE>BmNvxOIyb^1Pf&&_Y~`GUAR)C0eX zX2awzo-%fi#KZ_&H=T(1i}X=0^fRyV+Gs4C_&NywEg+KNSm zq%QM9SRVb&kiq+$llxain{G<-1$!@#KRV$<(be5k{^q-pG|$_?5OYLi?$moNAOjtk zPE+VqS)OUj1bOy|%-nFd@r7*%`CW84vV)&of?G>xR;xB`L@lQ$go8*LY(&xkC-ag7 z(D5OdtraP;f(@*m2mB!~`YLQB*Di`oqFAsEq|`8#KCV-Oy;hIO%Sm9_7+>{<4tg z{O@ww6n$G*9nN*BXAl zHYq zYa^`uoAuCxaNKK_F}%`5U~KPjnL!NT|@$Q?SH1k9dC#O=##Ly6P@ zFEY&c{6d4A+^3!+ei8NZ?GTtXjD;hqt5QQ^crD zl?ZcWxc#@&{M#X+Y<82rj@dTqBEU59WqL6`e*Sc)=ZR8+TT-Qe3z`}esMTa`dVr$P zFq@X}j^5{GjaY0;A(Wn7J&LEoaZOu*4hEFrwGRl9At^(8L+lnWJ7q*~HFphZCmm@w z>fk5)F|E2zl882PU;a?WI@5t^Up!nLE-~@)>sPpn_l6I1ayPqDa(pW`%gWDX~EK{`+}N+X$smcOX4Eo*^J)%usouD>Ezs;a0+c4C3a zD~2`;E32f0IjmEVFbQ)&$bt9K?ajN&XEomS37%K}H=V_IuxZFNW}GsmOr0bL@P zcFyiGIMXO;n}pB925TRb!igA0Igvrv73%wdqrqa$cv&e(Fr)@_oW~f^&51}Lmo+g7 z5XHF_vLOV7e#Jg8nmDVu&GsIrHbIl9L}j;B&3`bLsR?n)?hvvXM{kL5j!03qH_H!8 z5Rb3qU2Yg+s^9qa!PBRu-`24vlUE7zv+{WN{@ZGQfz%5PI%U+@jye3z>MZw+0XBKua=ve+&BIFZER%;P3R4Omo%I7nI=EK zeYhKoNwX0NOwI7v6up4AbSk)oGITbS%!&yg0{5X(xa)6d8c#1J7TAqU*^^~GWs@+m zsxl6^j>PUOLH=;uT@_dYpev8JzXgX0!=+}eEO+E{eRF(ntvL0MmG6`p|DQIawvwD? zIRw$xddU$F+{w+4t8;|ce%>al{k83dYyt53ki_>d{#YOMlcC(TUe|BlEPDR)zgmFJ z9n|<)8wMTnR1T8BDO4m|4#b17Qqsi0Uh^SKTFR%K#HT%2`^jqn_rX&b} zG2;eV8>(H#wqnVASV+PO9*0(|QPS!vhoA-AVLoeAG}F2Ybs?w+%_`bQZw+`k+2u0H zonLF`{N2V1T@_AdF;&Z|iab3;a}9NG2A>-YP9Z(klF0xPG9CQf4=jl1EcTjxuT5=S zjYtjSyCa3o$n-b9WZwEUhsB8R>gxE{CVG$7X0GXZg{Ni>mQwnCDOKi63-mF6l07zXxlMQeL+U=Q*z3m0r%nwA zu>P@{S{ur~PlsiX`P#Lw;P#&b^lzp}$LmHnplpu zRH*!Z7=&KU?BZEKwGcq9Co6%j!70mMYyP$ky`KmM!q_~F-qv|JqEA=M6yJFtRY_q= zct~vuV3d9$!S(!0t#c}NAKSPc7??L7QXvjn@kt4jytua)%uvzHzVe*|(i!Xke5&&i zb0`JDr5XEX5Nr|$?v^Lo+Qn_kveaR6so#oK#~a;FUD;c9^N>#h8D0N zA`T6J%^ib?Z-e6;sxP=-%Aw*tEMZKUI2QdqgoTU2bBevlN=xoAD@!;=6L(&KOymuY zT?LxHwBl`MO`2DU{parv<}NANz$SdHEZnO9h?IRUZ?jL3m9tSV4p=7W8%(+LEl7xq zY=XL5Okt4y)KA<1P}nnj1E8pDoa91XZ}`0|XamY&S&fiqba8j-8~2kx{M8O95BCD5 z`d70{aHksYr@^u%JYTseKap|Xo~e|sIHTi!K+!A=0aMWNGL|DrnGk%wV=$(_D_b}WC{jrPKBi~rii)K(LZ_ghznuIiL4LN!g z<1IDX=!7S;1n%)SMd8W;Rqx^TGtdF}PR36lPw3|XUQphf;M4<@AZdt$-PoEz*H^|% z@};ko8cgsPs;Tm|-#)Y({2i!4(8nEP>0C1THuT(p;WKXD3{#Cl4hgdHU<-S#l^~T2 zY_#*brLX0R6Kn*Vdy}s66p@myIl^}!0ZE^Rf&rrM?wYJ~^-*J6P=>yMsUt?bqo^~| zRlH{P)DcRi8qssFh8Im>VNTHhdd_V(@hzEE9TM|Z3L z8-*$YTcgn)By9=@x!)3#EuzS}AH>R9fD(LmAdcYQeQlCBTMWaTNJFk6x)3WI%!0Yt zq3#&Fhc1egj&6$kcra=Ipm-cg51sLiV^GE16C0lswKV1gPAF^e*n3fVe8|G%BRyJz zIxM&)P#@A1mpwKy?Z;Uz8R1a@<$%TeM&+HPp^>uQ*3pOeeX;xVv!0`+{Lvd+N#FvO z<&nLn6HdvUweLFp5yM;=0{`4=lJgG{^r;^qb+o@$Ll$H8BaY7E(q8Cd0HNA6$q+O) z{yNz4ZBVDJC^9m}U-2qYB4prLThWIK_ZMG=R*4P8xIv*caGe13O8dh*Rk?=q6M~99BQYltAYYLXk5lz?L4HB!$CQ#e~al5WxL?v zQ|yTrlzvEIJOvE-Q`tbxz0ICRwr-d_e!ueQK0SJ9&?jCQ4&jpYkRT&9zV8$u!X`5@ z^En`SR{wDyg;#UX~`n+NDKvJw2TR z{z0H$Mt7_Jrx4M!yft5yZM0hDGfE+bZR7Wn)};iNLY6kIeNI*sKd@xc`d{BPXy4~4 zpvPY%x}4GMKv(0=+gj}2fV)BYJXYs{0`;mM6^wmO?5Ys z61cv7&+EHEUC;hk=eqE!R!k6*bieK|N1Kh^md=}ZSo!8}Y7hH#>`)yK4MV;tD^rHj zzKu)@s}e;vD14I!kXB!~ohRpsOlLNFm5gj+|5OdyiqEM?s6RaGOHM>{3PbHqX0A3_ zSy_!fJgyb1Geeea2pI0e&!Nm%j`ORlwM2reqLLFgsKgIgJqlSn>JWHyZT_!p+E>TT z0O?2me2+g^?oL^x=_vreM0X;QoJoAp$r5ll<;8|s53U{afz10iE`-yU?f8{GTF6NA zjz*1BE`XZ`$+9KC?Ee}m&KnWU0@{ex2c4z&{nW70DvKX=zy*eX?RgDPD zVRWsUy(10|cv5%9G*?$QXsljsd006dvMHmcfNS$V$-?GgbI}Lc^Tbt(!EB|jsK_cM zuB-tn+~$BhZoT#rM29OuD@wKT35#BIYyxpB{=3nS$oXy>@Uem>QS~0g5EhX^nAO4? zv<_M18|b;ypNm{Wr3DQO4dNjmokczF7$Y{J$0-SIt$qCI78iWu5+&Z@ooL!ni3O1X zaU=^hOklLvsvswQ+Y7bRi4(G8no{|=b?a6Xk=bgrK|>^qCv9W%riMT7-Mu@6widu; z%}hvff$P0il09lREj3E>Jc(Zvy0{Uo#$LWS9Be??RckiMEzRLetGF*RBXr76+r~qC zsp@iLS>pM|_~B?8d1}Vx8@X$c&i@-4>yv)E^UfIjDF+W86oSR? zHv}PJ(%N6{p*dvyn(ze8V_QTKl)c&WSly%y9$_|j@4&T_GLMBXKCPh1sc|6vdhlx6 z%1I}u-}dZbF<-A9kpH9ZgQ;d=T|juthw8PlRcro3~jU{c6|$KJ+{N* zhPLjx3;&aCRYhRipocIUKh~D6e%7drfvJ?5ZuVqjz_c;bQ8yV^*ttL50r%CecG$>w%Eu*v#XbpOdbh=;S(N+ zGHgp6!KV!VSUwz|EObt9?O%F)U;!azXB{RU00iNU?R;J~OaYo5$`eH-Xi-;UJ|D=Q zh}rJ$nPy*Qk1wHd@D;M(i|<*Q%ipzV{1HLit@$dh9Vh{J^;HWOb_*`0qt1fBeSAm| za*Bc3p;H7&ayXUuP_AzP${2M+%$~;maNxR@{?&Evqm%u8e5^58Ntr9XDCsMV6Weye zo8l-x{j--Wz5=zL;VaDM>{kDs?fpZiTe zGn{2e&6#$}j3s6rAp7oD_vASJ?)1y;;IfskNn6ehe%CQ+V1(B{;MF2E4&F4uQMizsq&k&712B*dW!;>7 zgBlt7ra*8e25u&Hi~D|jIT8c0FWwEtal(E2uw{q-D;$$n#$qTz$8c+BJbPp$l3-Xb zHl=JC{N$g1<{dxrtt^Lj$uxod*G%5x1-ATAp_^iEf!B~=nAHm8<+;LGJhsKg3eKL|=QZ#2jq7aV>M7-adj8zHQ7_$rE z42|@iGwww9k_&maMt1ID4eGz08`(f&@w>txoVe=H^*M|#3E{6Q8#ig<4)3cx2LqdQ z6AM<*dx{-;4CPJo+voRk7<}!U&Q8oCRW5n))=jznJ8$`iCKYEWW6eRESGP$j})D;L#z_^vAwP*gD$JbszWCW}p{Be^?IR+SMsDm~#7Pui<8qk90Sb3!D zhV`N}k70?iE&_0N{WjmKnAC8(_zU&IfF}P<7szY)67^k;Uk`uIsyt`Z9}jz)vt#FZ zxc~xKfF%m=`)vMmjXiYDKb`|7r=yjwZ`!i8!Y5iiziV_xCwC^aR2g-&_78PK=_ru`+IB~z>9_N~L2! z94;CLhzhf5ZcfNYkknK`TSY*5#ye+hCP6=!3QQqSW{559h}E1 zAKAxK+1cz%*V2K!V=`=xo5WQAZ`$})2_c@UIsiXtBS??sXNUe8l$fPQ3fGUH?nX;E zgc+;BOF2+x1}mOnR;k`XJ~>7Shv@#;m~dv_-@zunD0?sOncyZuq;6io6(`+8RXtS+ z&Cc+k-Q1^l6s*xi(2yRyxUQ~N6F%sm%P2-O8ZyHkhq(e|Pc-`SdgH?badS3iS`)D9 zY;OkWSG_vSenrP}Fa9C2M9H58D{tY#pB^w^faA|L2@`BmnR<%hO>+-6R#ukDU16-p zA45Y#%g)D_aMGrS(1CD``i;(SaJh1m+h{Bs_HrlkQ)q~6m)^zcV6>|wvR{q+)`lN% zI-%Xy3PGTOgL;a6+EYej*ltZ1bViaypyTzHfOGJcd!M<7CH={~f}4s$olrIv)y?)& zLa+tZWucv3;NQrfn5T0~BPx!7q~>;8$ZR4K>sm>2rC6nHg?o`x-T5F0&GH>UDu~Gl z?Nt6Z8gFAO!YoE*b+5JJ*S?8NPB)+JXYL$nGzD);a3qBtI%I6!4yI;6Up7O5hz^4( z*@TU>H>(evGV743S+1}1?em%@P8Pdt+Rt=NKY8o=SdvWmiE@V~=Sj>D&vjk}4a~oW zgJDBEIVrY9(qV~hd*cZij_FvH4p1ybq?7Jw(`2>_z8LAM0*q0(ZQYu!_x0Pi4ScAx z8-oUay(wz47J{4lTnzYy`nXL|QKx|&&?1aa zLz?6VDmFG|(XwS*P@h?}8$b}CIO(XXhlgmWs%Bu2T(I?j;)Uk@#V&vpu-I!TC`?3M z&?5Aqj$If)2z@G6jdW;%%Y*@_rb&Np1}QJmam$~jK`GV9zBh<0O8>( zCi>Zf!n?^%zI|)}n`VLEl414XHE2I673!w&s^C2v=x?507a;FA{ek3%~o_!mKJ(m309FsC;AGxq1pH1T{~G4?50h z6&trgjKDt+KzRWmcS?2)cX{I077+*VzHDF$LL>;HnD2vy@F2O;1=UI&6_Vt;?t%4 zC`Fco6}|ig1~y~j2yfXD$dN@|R9SIv|6t5!3!99vpWXA@Pf$W~>JNQrIm6k(d}{uj zDe4YlsK+BLRC`Tllg+9DZ3sN(+*QZpZt7wA1retjx4!z7Cxm7(oK8|Yf9@P>HdH_z zCbi(1ML~i(RsDVzd7yRA>86&t2qC5`bq3zV4;A?42m(`Bzd%%2mQFtT7X zY|2=j9yV{})&26t$ctlQ$K`akfHxe;s$Sk?T6aE)?sKe1Lijz86!;n!TtqeobxuTV z*oRlILioPnBwIAViN)&PTtMi5i)Egk!y9#L{(#!3iJ-T_2x)_C({iHx)Ceqp)`y~~ zqM>nU>qX?S5`)kKtbmiXN3#%+1cvO^U7work&=Z3pk`)$0c90;MPuHxFr$`X# zrouv*Bl!2Zi&f62hF?TrybW|5u22_1HqPNH$_iQzpAbumY;{o@8sd4$ z5*mr0T##0~;lqYmjWi$k^~1ypM!yQcXzNoZCML3;-2zFV8VWWx8|r(_V6Mm5lsA7B zFSq37otw)$hyS6f*tkoV17!BCO(PgbJPcFD+BI!y>&ps3SEOtsp4r=(?zzRQ9`CL7 z$^6c7_vN>M<}}Iiih}sku^%$z?R~!AP2!~plGVa^JBB(+;6~rRUEZD zFj39JO{Ml>(Mc9#UWF_DuY|E~)+8-?kB!l;zN7iuPaT651}0H@!_X{Ok#Qy!SatgX z<`i1=+Vxm)3ov9OZMfsM1Tstzhpq;w1ElJdJ1GwevVF;J5ZSDGzduWg1tn{Am7V3f z7zTf}0|Q|Wvp(<&0V&K>v-QNYjG-XlYEs%u4JxKM=2q3S9Yic6d2sYCQGN~a{Q0uS zOwugF@u$rDPDYR_B7<%{sV7cOIYEupWT~Wtp8{vm>kQMir2xP&a|zoXDKT z2;60NDUaoci$n&Yj@8Qm&2vMwqwYxKs30Qzo)k7&<2Ub}d;2+5TgB)6cnPOV2PjaL z(^xpw#-TcjcWCjjp4(FT_3Jkviw2;g=;|M5qq*|J5Psy{5C=Ipc6%GIfP2;a9;}`@ zJV#R_@7b1AohiS-3+QU}A@3EXu@mR({lEAM@|Ca9E~&3-N?J490Y*e=umdI-9+2x| zK^tgjqrW_q0#w6#?AhVElcW++CBNT6TnN~@l^0KTP8W4Jj}5)%eEP2z0Hir9KR@Q% zJmhk&c)e4Xy}XzF1?f{Y?^J_g!P7?8`Bq1+UE3FUuu8*yneu&>4kYUSb?8uIK*izM z3O)t2EZms9jZcN*<32b-lO7xx-@4kAkmA1}r6!()!!HHxYv_FW+4>F*f7BqRb_%O! za<8LU8$XsYk3YBn2Sk*_flJ%pmbc|-`SsPGI_C5+oC6lHh~KK}}_9dG8tfwtIEU4;@x}5hTRz@m~{@^~2w^p8FP+ z1AX}TC;Lf&wulKh^7{c2nC3LbELi?D`mN0RgCYwoaeXdT-iLokOmIhb^( zTU{Y^Bx|+rT-7=ell>iGksX$T4`(V24F(ON`tv1j2Ks-nQ_V8LbV;N~_-V;wlWh7H zFSy(pV~nzWnRyQO#Tq1R!1s@oJ64Ao@0Uq{Kj;x;-|N+~ms-i(Cm0M4%%6&18R>xE zJX%|%WP48`@7^L#k9a1(?qdh~eXArWUMsF8ry!F~wXl@r(b+fwvS1zh|1YEee`)>S zgB7+dCFS_oTVzub`FsyBur1 z-zdhWJ$lS-*Ot#yiRLb0l9#uGo>Wyy?qJL=oc57dTC42M`ue&Nn{V=zMGwMUUgL%E zT8EdaZq4l31^`~e6>ngnb2U`s%AlS|q;AkoG~rAziUK{PkY0>$dZXVR)C=Ctf6@(W z>gt9g6Zm{c2tice{bLB%lWBX{EE*r3K=N&DhYt@6vfyULX7E6T6ql4_{o2d8oMe$y zqafMe!{?s9UC(+RnTWezKc-ZsBQRO}zP!8%Z_7751hpolLgtMNL*GtL@labXoB0x8 zWjmj=ZrSfO&NZMGXJl#QnS;go=bKsqJDaV9ERvM1D`vFAVzOJH&5bjh zMH+yFaAi5Et{;XkW?quhFn6DjdjMZ%r`XVAK-6SHI%_S!Z?!hvVO?I@W_RV}gwy!M zODP>@+gb5yAgdR2&JgukP*69k^6HOid5fM6Tz~n()xz?W=qdAeTOGWWY&Cqs2B!zM zhwR(VA9FC;$!BAEulWxS#x`l|5gwapwbRmSUeVf9pIW>un;P~6mOkK3(>(9WGT%#M zKg~S#zJK1zlRkUjKl1#8P>Fgm3ry=%`bK3_F@1JE6}|NU_$LP-4u1iJryV9EiPINM zT4-J}^-ONJJPWkvyS_#4eaU=`0Vx2T9a9hc1{XjNYbUef#cSXRS>~0vG4;GkxuL$0 z8XzwoS_Nzy5MA&aOsV~NkXBta5S@YR*m7~LF)1r6o4t6ki|qw8c7SviDQP4n^Du{= zz&y630zTNA)G3qa+*Lr7nK?P3%L6z)bHF3Ftql*FVwB3S$nIo)K$A}kfjq=|au{&C zIhE6-<-I$1f>3Dm4x&+}sqxSx4>Axat@Ss-)RAxP&m!|nHp7+L7Nl~t$KTtwZ(mPK z^I0hJfd6eOQpvZ&8gQnHBi~E>3~(O{B+X~UFfe4Ka0fLWb96MdwTEZ`pjdJCk9tpn zG3G)8@Hagn3S4ecl`G28T8-_fF$~k$r43wC?ah`fTXqxZ#fy(Ps0)@I6b3F<(J8Aw zOl=-QFk;rNTPpN{etkiJm{E?bdzshYrMRI}Eb;ZbJWMM%Lq;RAf48ypf!Se^N6h@Q zFjRN0exuqtfJq0YfuBO=1QB9$IzcCUlYT$39nhK1dPx+`Adw`?3_dXJiW8kO=5S#o^OS%fjX z?w}yAerSbkKRvlEd0-BA&Fg%IYIeRCt6xu*>tb+{pa|LSNDHaGLbiipyx22aZeONY z$0T(nO1|ey61=XxlDW|H*EH3j;Tb32=HHb6XZT!t~f#w zJkAfGJc+G{TI=bh2D9d_rlMlYck;a-@?{CVk@EgbQm;V;EL2o#+%t+rw<=T9w_*rF zh4kQWSVIq>`2hSi%wl_maeVYzNHkf^vg9FilnmR>(*UOuCX5>wgm3%6)vJ4NejbMMV~Pl9!oT zH2$=N1^}Zs542&$1^`FKBT|9jjq9!)HrkWBCf`qd>wH2*Htr^3N7f9Z=ZG`8>100;2Dn=zj5xe2yr77ef)dEc>7&h z|9e-%jVDhgCq02%weI@>S57i_#4g zW$goQWL4UtxnuSNdSaLAx5BhndeEAaL?ZI%YUTW3E6KL*i0Kh)gi<>FjXwIVK#@2x zR_p9SadUvv&Hpn~L7`SsUOr4?62czZ>08|(+egHGWr#1udonM8JXHE&?WPu@5|E4x z3(_4jxdF{;@e#@3EfV@DG1rCpWh)<}@HNeWaU$6-TlYMo0hr=5&Yy7wlQ^?=D7)lB zX-V^f3bf6&0-yvKBx=96Yu7GC^bCaY`oE#Kk|du`9gB9D> zce$vg@2GxK0$3Q*4-N7`JbwTF{bJ7|mJd+Ku*W zmvc-z>X$e1he{owPnT}ecGiVHb{?051s2nw$5Ap0EP$Va)+QSNmhE8)Inf(0J|nq7 z64lfKc$$!xr)43UWR|{d_!CJL71l}N+x3R}yB|yV2(k?(6*g;$T*5VM$?YLf08B#p z3jzO|s{CFJo)q~M7eIX-&=?XYpQUy#OB9X|vhJcV?L zGe^*&W(a@(NKB5z377n8<&vx$wF~f%FZftLSr$YmGr}aI&fK*MpK>vWIm$D$^Nc-t za-PKItx*BUCB$6BzVB~__Lr0@ILyLlICaFZ81)M_i+e5$Q^TRq-8bon$P|@?Y{vl` zSw=wt%PyU%uBvL~#v_-Jc|^?q3kj;fPvQ=1dWhhmX2=49ej&IPBhB}BR4WPoE&I%? zQB|6w*1D8=H;4B)WpSZZ`3$6JT6EIh#gWGo;Q=|3V4GM|{L~ z=-lpF)8NUVu8hXtAkoFGaRWAN;Kk5}L57AB5~W_trNFmi^y~s|NwJggIgJY-j%B}Y zZF>&h*r`*UPLs2y-n3~`t3f=58{F+b2ISn|?ARZuA$;@Z@*Pd^013<;v6Nq@eMikt zd1xg;oISdKId#206BCT;N!fHFa-UoBivm+8E3po(3c1C&kIeVaHy9n>2AXr%tvdjX z=ts{(4ijUn0)&`+3q^Shi#MI8vj^$#S$uOBnHdNzGagn36_R`lF>Da=g)yO8|JbGW6u; zM)~)2U7$JP#+FXzF;m<3TkhaK#OLh;V?s)sAST`xhsb1elQQDY3>`Ix=riM|(=q6J z4Lv^oc^0z?dE94P{bZ&oC@idwg4;-E@LXL?|IT-qK|!re69R9~-`x5>fIL6X2l?)7L_wi9_#WK2a62)AhzggHBrMroDRUQ1VTC_8i6s=MafC zlmYl`xS*h*<6Rd?HJ{M^zOYesNK9q_ z$&js-kT;X|aw{j0R3Udh(!*OG^h<6es3_0jS{A+{?;K15P{vI|BP3>sN0evL0Q%C- zHMHIKbN?(gFR~It5x829LchjRDM$AiIt-N9cyG5(VIr@wUh?wV4ErX8xv3%6c{NZ( zVie1?)Jcn%X}x;LOp21h=^HzP=gL3W-UWcOySayMO1rC+IMqkPSn`|iR0A2 z!hpt(-LuZWWJ^bC>H{s1lu%EWkxa&)5&*?fR>uge(mY2i5EVaPuf-cQ8sXn$X`5(8 z_-0gBoIP^}>}vcfVY6j+3?hV9g=(sNHA$hR(e8@64PjwttxMm$3FAIEQv-e_4Oqq+ zx_aNkNHKFf8_uRlbZY!5SqjDxzVVf=sDvY^e2C(zwW^jcg(#m`$21s81@iXy<@2Mk zq@_5*vfBbIewqlD%V^`f0kSI`&+_M)5hWut&P5sw%~;6WYR4}t1dl)GORs==f$+m& zCJys6MY8?RW2DRZTt9;nU{t-P<#TVy>SS3V*2V)1mi}3ssUm{qcdx7B8D2=)oBC2ixaFmkZ2Ww7PLl!Q!cx8Qt}Kr-UD># zoTZ)k!;@?YaUR<4Cy$QdbPFZS#Ep6aGFfG<@?I+@oVAsk;FqpW7Hy_4l z|9Y4ZFa&F__T#LVz?0@Yo*x)6V)M_KDrW?o)-WEwi$(fV3uE!-1D_>tpseWZt8PSji;vH=rNGclOPjHU0Ju_MdrX z^?J%rML|mEuj9m&#PiT|1OArYWMMP*#3ryX@5X$}s#vL~ZKR2pkPF||P~oSUWmy_? zx~eNbohMi5T?J`J+4{7-pnH#xn_Npwg0ktrQW!7RU*|KVGjB&gacI&3B^R^Z5(cR0 zwP&0=(V}ecL~;t9dMU@H;qIMbIzFPcV4hX6XO%@25~2~uuOuf&HFiKI%x$N7FGo2w zZX6!K&go(&t+-_FEbi$*%8Ikr7ha47&V86Pc~fiB>aOdi^KxkB60QWA9^801 z<2ms;dWeXF5aHVI_{?`8@@=BBbtAbbB@m?a8jHWdYUf_TTCjnR`LZF0R*XMtw>XjG z7nwabKsV^H0X0G7zw##X&WF>=4Ia;Xg(d;|BtyAJdbHyPrgpLKLv?&It#+T)gy?m2Wg=^J^YSUI^nQvVy`iF3ijqFZQ~`7dROiji=2WLWe|Nne@dh?BpM6lOMwTBOD!YmZAQV6HKnCdjV@^ zcb^gPh%5NU`jj)11Kx8v%;BAQ^t4%3*YFH^!C~>DhO-Sm9VDpsd_i1S6}7}3t-S}C znW@t;=(iIP8)bSUsY0DZy27K-8%`UgM$sw%06jjP)!JrXDE}n<-W}x)=}qj`==Oa< z3J8&}9)&7O7ei5<3qL~ENYIh{2R@jU@THCE@5WC5>mS)4^R=o!4&jXyQqut`&F{3oAcY&lQ-nF_raQ@5Zfq#vZwUJiLu` z+hp{L4%v^IvgIb+U3@QWL6LO6E&U=UMYhZ*^jZ(p=wQzesO3c6{8h_cYxZ>-{tv?CsI$oM5R4t` zC-^z%XIKwsz$MHIE*)Z!b@3`zd?ggv@olKWCTOB;{`$9Sy6xMWjc`bMZSGKjXpyG8 z@v~S|8{8%H))FN(EUAs0ZYtW*em%N&mC7vnrTZmach8`8y8>oaX0*D#8hnmWq#xF! z_t*qN{slb{XVBQOF?I(1`?tk=<+2!Lt5+LLRpkv0XZyaEnfCZ`=XHm+Y-s^Ce)Q@G ztS55o>B}gs-f7UDbXkr{r@wdzX-_%?Ca52#bh()Fg191`k^kr{F_qaI1xX`$*P9Dp zIH&A$v=4%@7f;oFrxqVY$ragGI*~PsaB^JFqO1JV-QY3ZZxc#o*NfNTL8qmNYm;f< z;)0Rq)-cb62$@+Tf^NBlp8cQ)MXa1Kiv0=Ago4i)%L10#THpF2KH06jyd#71$mbfm z6o0e@1iq}BNN08BW;eTYhN1VSWJj_-NWw5Yzd>tBCDh&zVmK+;HN*EY&T4gCi-2km zOu!VzIf^B4yVy=X&s_52QY%DAVMz8GEoT-H5IMr=Hqv9e8F3dP9q4^~i<2PtUlrQr z)kmHWQNJFM2kjpPUTTrnY~5Vi*~n9zWP{$lx9dyY7V7c3gx*>rOuAj(tNJPl3n&Fc z7(SH-c{j=Vj@+>iP88$(_MnyVw+p!$I1Dw z{k?IxR@QiSwWN#2+?T(K?8(qH2hISV)GNPe$i+0BcT7-n@Q5%qUISn`iI6li#l0X` z`km_{S7Ca%u>?v2tM=Jq(aCN5fDto;myx)<+dFFV#{abMSX~q4z-CPmTgmtIR&D$$ z#l@VzAU^iku!8?dH(vcPKVOqvl}9Sn;U^8&}bd&wPm#`8TZk76S=aP3_}Qu z$N=qY%_i;J)0c5iBC&ODqC%-GTsqjRSV21aAhFrWHGDe}KIYuHbEkFvB%90cN9LO< zJRx#E-bkP8OL8$??38TU-%=rl0!*4!Vg%M{-P+K^WJ5m-T6L&6<6Gpb4lhK4z8=7-J%$*qW0U_xT+*fn?7BgH=wmTj6kv1cuE<2!P?vl3vXaE zt>fO`08R#IM!ok}S5+l=yaor5eVMO2aqO~aJGcghkNnBekRY2Papoq%aVW>rub@Xr zs+0xK$R3PrfaY6jX=NI`rE749c#dt{2RgsbC#5o-$kGN99o*gi#IXR(7B_*^M_E1z zHs|H!3)28SbR^>&V-a`0+9VGqX%$sfgW%39<|!p}24|DPRp-SGJ`5e<*J<=k2g|#n zGh>1VDZK%!pSIi*D1bleQOB^4P~IfpkLnSdaTfVf!Bp`8@qy-K4g#f-v1j3*82{r2 z`w!CI$@?fhOD~SoSn}h6nI>((&hUEIJ*Xyl2tAyGM)NlzM}ub!1~5@(V!z$;=Z_yH zd|57SX~DI$Uvg8I@rt+iD5RM4qeRT?aRFfh;<@O=XADPhz_rULZAV)zhTmWV32;i+ z&r4Y%gVCeKJj}o{c)!tV*-}A(+4mT0^;m+d{FO7uv~0(v*WyUaDUk*Tz^*$(fEOR< zyJy7E{}oG+4kz8Z0`-4$uqgvmzHWO^gTQedcOd=ln%5b=-O%eL|0EGvo%VlF8{QU9 z_k;C|Am0e&I8woU`q&j!o7xGe6#`F|-*PMniEw+*w1hiPDVy3j##JV64rHm!@TBpW zYP4t(23#0;7CH%8$qxYwgH9*l`ThI0YFZp+23m4> zLYh36xGFi%Bu(d5~?;$j8D2doZ3q-m|IMYmgAS(&RegboO|Mak7YFpRWJ z@wd3FgAqOi2aGow=iLXcF&fD=)5O9kGC?fqEQ%NJ`utxlfYP9`I+2R`+@Dng*0Tg0 zUONM#`W~8SzY%^R7rHXp-S~v>QVs;cO&*5pfDmY8LulsTdH1Cd94ml7{a4t^ zahM~z01|dHzrRG93}8_7n`T2c*hN(FfX#-X*>L=2H1@QhM|9}2M&7QNMvBEHci$TUVh8=3oiQ+ zUH~o~Fla<|p($B0%Cb+NVblza0eAf-!-n!|jXnOMv^0nYw8rlwp=-x4G@SLVc5gz; z%q(e*k^|>YRP$~iDMFs%^Ghoud(|TIbKRSI;X)Z`s15hk);p*ZywQP~Adp#B1|2rw zsQa@BH!H~wlsogN7{d^k@oLt+WV{S!0F0Rkb)U&GQgy-57f3s0rHCF>7~>`Hc=si> z{WPU%^V+AxA7;#WUm8TXRRRw%qXw4l%kW=oKKZ~IWo)DWrA*qy>0jv<+9=?3jb=s< z$~wSA?7(YaQ)P{@*ykX+dd(9vu&f=WjxSdlX3ELfCLDMRY&+pb|_sHxLLRf~QgVvr#!brAWBKESTtEvlGsK0i~F1n3Raj+Bm*OQ5E3q? zu%3!h#0L1CWP}n*Z+|3*6hW)1J>tvgUAykQK_0uAjuP(S^lwQWLuI0+f;nHfNVA2q z6oN{&Zv^BuI7f7kH2ilo32WG8U?cy_)QvsJMQUwCHTcrBe$9cqTm=7`<~36aT`Vl9 zsS>lSc1(N^R#veyv^WhIvumuW1X{eY#y-^$5_BA&Uo;y%XKXeMc~sWw(&-qp0a)zL z1Ogp~kBgK%67u*eZ*qHz0kwV|up@ow_VkkFE6zuxFcu41`Zuai_8@^_LpK;Bv zU(>tUYYH!|JpO7QdTPUd{o)mE{`jMv3|3SRm4ww?szk-9S{@aT~vO;M_>(kHwwnw4dNk!-@_saL&d*2gg*4T3WfDOO0x z{3nskheGGU!g&{`E2egU9VLKtNc>&CPf4p@U(TN$k-Q8b%9{v`)DZUGgvr%ocF5uK zPbOdG7{jG-!EC2!&X8`lJZGaz^+4LdmM9P`_Ti)Dt4yiw+o#V40v0on8rO)iu6(~F zFw>%UI3bY^Tu}O*L6@Zv*0yhTS%Mni1f%*<&*_0;0zki|#yPheT%UGLly zUi}7gogbBr7xfsFg(R;ErX%PAoey|oP@XJ&B@!bW$j-FWLn7grqvrTiw`A-qzXuJ$URQi!#Or2caiXvZNfEfPwgwqM+K~^)3`$C z#Q;U8#qdIV!m^6f~_rPQlTSn64%cdRlp{fy|t5{#-fx_3L-nLd-U>v>kQ=@BY-iNsh0Tg49;TS&GUI%NlFB+X*sH2~y4 zI7zOr5QxbtBv)E<^D#fww$&>gx^DaSfBj=8g+oGk^%8l3EDAb}cP^1SB$#2NGrpGg zXUCH~!LR<{iM!_&M+bWL9PF#6r(2*4XL5~;qsWO1>fLwDus27l1Ha@@J`v%D^UaLE z`-L%^MNbc!qB0wH=0q7fq5=ZZ7yO0_gcJ_l(L->F;d_}6#jN#gqySX3=GBTnfBiae z>C#lgqpc^6D_y%QE^ds^K^DDGy?`ah3^sPiREmn2^fhl<5asa4Z(?y}gSS&lz6W_* zf_C5$r(<;k0lo5tpeyJU4~0QYf2F0Rz6B_uHPOAMZ;OuB&rd;;B{}`=cgRRxSy?&% z33tN=L}pdLx)>QrdH>KGHLzqX5O9+5K~DV7uidpJuo^lm=byO+(6C#0del zPN(8t1Iwy*L3Su<9FNq&9o)$%YG0v;iYaeX^azz`O7li}1l;jBTU@FP<2HIc;Vr=e zY}rx8fBM*SfBO2SKX?$kKLx7``qpiY(B3zK9&;bp)y#}Njg1@aMD#BZqFmF{MT>xf zd2kjZ68P1C1BO)*?xTs_<2CcNG<~I)G=?fTgkkeXNK}XVits4F5)D# zLK6`CqTIdneq2g4u3QH$V?IOs$PM)!$>pYwDfUfP1Cj_1VGXjqNq^dNTAlT$L)*7& zM`)4S!bcMBC-v^kasG1D88tH}O`hDD|JGp(?F96w{g?-s5oMc-IChL(aNS6_M~1+V zj0*QkE;ALolksKLD0}gSi=NozB#7ukJYu{k(jlr{Bme+tyM3xStt8fmz2^H0fdpkC zclt^|Qmqca_h#hz1d!fiD-i8Ms7 z)*Oj}M+UJVcN>OYqL<&iJO`1@ziINbo&&)YX&CI*^mr)0EYIiVTV_=YmZ8Ci$N4kB z_KTW_N|ssd08{Rj$;`@eM0(g&gs5uUJQOu)F3ZQ*wp)4?x~LakyBqdv`K&^DJo_Pc z5Iy>oG0!Qg4Rtjwc)qp$@Jh&`t9lW?dv|9uv#8jE{5h|aHz3g=7>(vK-iD(Ywpz_D zR?!=1b-3#4YEXKT_ckxfACvpr>WWXF%q{1CWN82$Z$5ZT^L=DdIQOxk*(iZhR35}< zl;~Ka$B|T#-hn97MHt;>e9ppt)%(%b(12zX{{Ukb))d>vYjd-1E2>{YG3u)G;xu7z z^oq)YnEomBtrAu_bAw2f0aF@z@$$&hh&u=A55z#=FKZ{yXwWIL8EP0RW@5dEZemT5 zWVAS4jij_N@cQ~XJ2_qaKtX9tBZx(|^fm6$GSAz&xgqoi5)NsC3aAOu9DcWvDT>J* z3qZI01q8LmZu$e$KInnJ24g=}B!53+b~$Vl+iVN~dY;ChOH7MTr%YeVbR@xVzx_ zO7{CB;{gazZU8igVoj%!Ii@W})ILj|W;-Lnii98hJb>9tpT_a}5f-X(X~frZdSuJ% zi?cKb1c#Weq6#0<`~tClnsMWjb}lgi$of4NH#Tkcvv>8{P3&`G*s8mV(A&3(d?7dN z72b30-_AaXYgeMh$H0kA=D*uL;xN>RU|iZJCZmCX9ZcRanP9y0ESWrG23SLj<`K%u z%K8Hb?!C>u--ax;*tG=QkG9pHWYFvc4ZzDP7J`njYQ26}(B3T(5g9PIsyp}^gf$A; zzipc`oWh^?H+LG`vHPTC{qgUJq5HmRf@aO((fgaDR=F|T(%10ZcJa4YR~smx#yII=@!Hd?Tk2~BdRyFF zeD{((8kj7W)&;&ocMvw1MEGs!4t@1(muzuTvjZizDJ#!z@Ot>x(_IsoVXI|Ki-%*# zBPD0ygzETNEGv=4)ABRX9j#VAMZ`AC5uRH{Q*JVSRx^RN6`XfMirBx#VpUZsfi`f#}!qfSjZOFP)4 zeQjG@GLm*=7YCM-z`zp42z@VyTVM7P-1w7XjxeI;rDTVJBp1&>gdqDW0~yV<);TO% zxKIv+>>27#xmN%t$|joLc>3i+QVN=CYHp!+Rgs|^5qTS`UbKU*^?;B9poZr2>8c>; zM#bYb(MQ&rQwpvkQC_WEa$U2h!5{<4StziOm%&K-aFPsKwrVwWTp7AKQPa*ji-CQ8 z!|&DaDoHd8;XU-ZW}7^(Rf_HW4LLrtn~f)LbFhyZ^WrVA3E>@8i#Y21 z*da)*8dSl?_V#quthUQ`SPQjw`=^nKiD92UJDS=pZl~Esf@Sy_W%}cpc!hy{aFMzP zpxLxVAcQ7TyW4*uxYb~GJNOXgZv>x9G!Fw)X~pn8?pGvdAl}i%#lyl6sSfO1o&$+M zb_yL}7(i2Hi;=>8a#Cpz51u9s<}D%tqto=CnE>Bw`YGQ{qnS2r03Zi5O6dW@xIqBz zq5N3Ah{E$0nQa=|F~{=W-fJVfSWz;>Y@(@dE(_is`XU08-Lv#;9@gj}WK^@O0~eMg zA}1x;wugwJBZah!I61~e0@q5COQV?*d982+Y3Xdf6y;HLKp@X07Lzbp$iw949Cl6) z)9&HV-yj~d@q0icB#@&pnrWhpUtT$iV;6v&M*A991AVfQ_I9S9^2;Ngon;B7WZcbs z0N82BoT7dYc^x!!`X_JJ?%^e^iK5wT17x^jjzo{hAm?RXV4q(P8_@~QBWrUoApgrv zJOm&O{Hhu}ZL4dtE{BW4RpH+5zAbnt?Q5L~nLvpD@&X_qQ6~;2kGXe6nERsD_+5h= z`a;?Vk;yTJ(75GVgK(FVNa|ZvtQgiRTOpxV-Q9hf&Ni$=&*#)Jd{8BXXjY^lxhuEX znqN49R_9+QQb?|QZkVi|cfr?Wr5K@A2c)do9Pd>75K|uV#G%-MeczaOs==D34RF)s@yGTWt#wh0qPM4 ze0ogap#_7slQM4z)!eT8emXif?`~V|j=kPv0t9!s0Id86mFr!nOvwPE1d4m!?a0ZK z&3T7ThU}xFGw9v2xtWuLATRLx1;^Hdou$)vY%bpTPNsQJm-e;!ZH~@0otoZMD1+&_RxsE7){B>-I<4^;*hdTl|Bt zmsZ?obL(idLl8RzMnTACZydeCKV;A1hWg(&YrmPrf8*N#)wY=8Id7g*!3$R9O^7uG4HNx>UpvCCWf7=d z8jYmnxM{5ptZj0GUB7mwfN&Y7!V=Xg_ zh`DccSKPx_SNNhZWloq!=K{JuK^B}gvnyj0?{!K7@4N%@XP+d6KBYzI*5;tJA8>$| z1a{zc!gKnyez`_FLdh4>6P3Z(8q|R9v3v-})GfcSY?zq+6K+AaHK|QwXcqdloW+GQ z6?smw84|wAf6koF5@kd+sOrc}Z~&Aidm}b#W}M;AEDa)C=7`B%4fFX;cU+Zme2&5f zF*kwG4*i?kT-F}e|Aq)i;%mtMgQjK}BMC3+tz=X*?w|u^+bBe0xNwhT@Sx${ma~Vy z0i5#(`C0?uMRJzr^WDp?O8YYLZ;@R!q7PpT#LR3M`TrkE@L%k}Utt@t&baO?X&B)2 z?|;t0u*8iKLg(Ko9{|9c{NvOhE&qfomsv>h z&h6WYbTnT=dRbc!L9exPC=kCr{}8<7KWiXPGr#>jM;L+~37;o&smB6$XZ1m*AgZcG z?$Btly=S_I+Xm<;DusT?0$YdEzpK{ci;v9Ld32P-1kF2M!t=QZR;j%L3XQm-Vn24zWIDGZ%({Kiswt;>}7*O`YU^XNjlhO|_a|pROO^ z(vRIfpL6AR0q{ePoVi#$5)or%5JB;t^+hAP?}kUAJ#4AJr~k@kG`k{oG!NlB5i!6) zZ%B{`%QcjCS&6Xvby-<3-Q0Q;S`?h-h0crA|KpF#EGp^< zzxb%Q@_Cvztz+@0LnsPhUq0t9B|eGIKA;6%?(~6-8k3UY;W%gIp9*k&;>(I}-BA_< zA|;FWW20CwnXt`1o8u{vJA@`k*@QwRXX!Hv6R`jd-pUXPOL}a?Nq#AQ!))*tAZBYO0L5jQyl*j&o^;4&d;M$eDexIkcCRj$!H z7}?s;{Xl#u%q(Ud$?%rvPOo6yoX%ZV_k0=;)(8OfXc!j!%gnklqs%ysu~HQp^YVcr5@m4k*?Xv zQgB(`gj5aDO;;9LD;dl#Gc+1uSQwckW*(UK#aB1l?ZM`9`bD?OE7|^pB4#lgPl+b& z#9660q~up#BU>pYYx#tyWM^CL8_xt*Rp3CI4 zyalBc6U*lp=pv<8dRh+~AX4(7OD9cgO}5DhFKKHE7A{>n42qUCqXdGHef6K#E9t(x zvSw@W2Ks<=1GF7RajFS&>DmTWZ#Q%buF>F;9qDei&F=~{?>PECx(dowjrF0!h$fPh z&>foTW04U6X-v7SKB15O{361~I3Sv4iuLNp)^s8gS6KOJN$7Wrw+yaNxG{n7s3gl? zmX@+)t?7mA^oLx@KPpcoCkr9}?lK%)grvNgB@&pfR|E5`*{03S)kLswl%Frc?U;-6MlPajUi;Q3d- z2Cw<-P`acJM@%W@+-xbngkm!Kcqm(^i-}$6!qR1k9K@&fMwL%%q`U#N=iuJKXeRL} zoldAA^iYD_TQ}n7$)s<_3`vFtMG?Z@l(C6I?2=ii6h){^wMEH1HyF228i=Tj zWo%Lz3!#Xf*S*gU=Y0{2=qaWv8`Q3~QUJ?CI3c;4L87|%-Rn zP6?W{ZTt2;e+;DemF<)PADMHAfPP&TyS~!Ek4}||10iW6`&%Tx(by6Bvm%oXI*}CJ6Pr@! zf+ZXxSUA{TL;Ms3yL<9!v|#+2Pk1FQ7!EB3lZ3mf_8f}E?Xgg@0_zbksUn@Qyv7F| znVeEEFW5%6BXIuy>s+%5=OI98^Cuj^0J3!BulXme-|GI43y`0W3cYL=McWNdRNY5l z4cs8?+Sz%vYn?L&@Rs{=tT%n>1|ZwPce(iejg7l23}^66Jkk0Kx{_EIaXkkV%c}tI zt{;LylF%Nr?-6cYDXGKS^9>{Q+QH@!orLc1vy6WIp16i?RJcrxMBtf@+CRE%t)E{J z1gCpp+!1j&Ne1q_IxFL?Jp)jEQ$ku3t}Z9M+G2VY6Kp6rWOe@qIa z)efXmSU!nKb}-c5{Gyq)AK0?%lRBCc1pPkjB=xgYX6u<4t#&TE74h(G^SddgrJ_1T z%ceMw6bC~xTk!xe1C>gW14sZQ#ZKayuA~DP!4+554jj=|wT?R^{1(G7wLrJZKwZ@@bDl|l>sW|94A5m86R7^J`Z1K`xwtC|&0cKW zZSH}q%CE^as=tQzxCPu1ZT ziMwCNg`S=&By}Zm0F&W&XdV+&wI^IUhF?KUOyYp!(p6n=+4%=jI!n1f=;BALm`M>T z3PD59g}0H38Ppl~dJ*wbvHCMH`vbz=&P8J}s1l>&|`RDq; z$u4R}snd8Di*^wui>j(x5ifqvg18-e_$EDzNFYr9B(UmrR+`zqUAy~M$Dt+gc_j(v z9&_Q(j^P_(4@`<>Gz5yj7i5N5n%@u)lmek2?;oxPM9WZ@h~X# z_6ewc4s3F8=vqD?@rsX-y1>y!m zGn17W(Mx@N#4n)A24bB=fR-B=YQr|U`$Z=wzwwnd)a(@J;f2T-{G+3_oj!5Y({Z%+ za1)gV9>jXR2?5~s5$=7;xKu7U*vFlM8OMY!sg9#5dhRXVnmn}*oK1N`NO2r4;T;6Q zXftZzqPK6y9#w`U)d|QAL4i>UViWer-^v&WCt21sejf|#kXf7C%(ff!N@$;l9=LSL z@qRNL81!dcbJdt(RKnjOdl$1qU^K?j?u)noj)P2Lfb7RMnMmD5{4=seCjqnEa#bGJ zj_G>Ny|M6$6<>iE|9E-}S6kQi>t=(gt@s&dwv2Qwwos8rjnVL9(jDYRnW6s|+07#nf9tR$nH= zFWLy%uxb_^tMHBZ;+my^OWhd?TwV*uai<|Vo-UM3hjWw_I2;m~s|;TrOx2NZY}9QP z7ONb=>;*V1b%krnm}pl*GNs!duYdR?B_rN(31n(oc` zHq>Xf_;;^pM8J?`xU@sNy{b+i6^GxGy9|_4gk58RfVRmhnWKdT1>_R{;LChy1}J)$ zrHa6q@T{3u-^ojr{0um5k^!oPpo(8~WlK=pw9Ss+WK;xa5@R4thS zLr=by+LH@U3Gg|S{MUsW`S1`d-6c;>FbZP876EUjTYtAKHKE#D*bznU&n86oS39Gb1M|Vz#MS=C{a8UqR=Vq%1`((EqKxFR8R^D7!b7f;Vy0Ry=L#Nn>=6+44SP5I--pNjYu8+Q&2 z>*0hmJS+<0PD_pZ_m?$71-GSsVzy;kTT)i04C4CZ2C4{G#^N$1ow0`>79)$FXjJu5 zb65;QgaA;&$2&3-kxP?6A4UvDvs0qlf1|E~S8$;J2nK}AX;Q-JRb}RJ_34F09fixl zH@>aLs|w$@uTR~G5yOWUdTHqLLyl{c}wuseV`#1`qTOzAE$Z2iVQr8LTK4$*NGQZQh}trM*L){ zFdYcp{oK#?tgU=z7c;c`{MOF)p4CAw9s}HpS|gl2Cu?HN*YA%jgU|!2H}^`;jT=fL zeD!BQ9Eg2Dp9?7YdplkCq}|5B5PICMI2zYhq}bsH0THNwBig8;^DB+M$fkCZ^&tb+ z2$Xn;vmnvn_%6-z?>fe84k%?Jh4W{m(GwZEYd(U*A1dd0al`lP4L0DVbumI!fkqH$ zNVFl(B_YB&ZhHn}+du-NTcu2TcObnPH|w(JBOE@(3EYx$F3Jetr4g=^7cMkXQgWU@ z-!5(#vOiUoyLavg>$G_t>$gFced6rZR)m%qLtSXDwQhHSu=Br{OSQ{N6Azi*Hb2ih z{ddbZ`pD)EpT1|VVq;1c_58{`l;h#y;i(tCQ>k6<%Ql!h(qQfRL#5<31(i7Vr@{Nv zSe67jXR&bMN%s|3t4rEglr!X%YWfL5%Z52pwgNZ>tLrCUH1OYQuyPDLLa1$e@R=@K zIHss7SvneT{&t-90ua#D|7}G@&!NH!zX^9N`jz!N$Fe-0Dl4Z1m(QtX3eetD_#%qnpK#H<$u7;${L%V!nZfzmrV9@3HeS-?v1*w-)w+iLkWO`Kcj z&OMx=HmCMlv-z=(3%U<_Yo*!e> z@8ExKR3N$^m`-|sF0+FY9rAc8u|gbzMjW98*}^y4VU2~PCH>bjStTE72_;;=o_|&O zJhf>WAc@w+K1?^s_f8cQ5pT9ObWQBzoDtGm1~1I^Gv7{+8nh+o2}L8KITbO)Yh;Y& zA5)|30)$xJ*OQ~d8=U%^`Ew|8o5_a<=zm1);iQBk3l`=( zjx!sPO_Vzh6m;yDk`dH&x5|Dotmehc`Alne;P`Pni-OZLhR}j++~lOzG*nk;yY~Bs zJ0;C8hpj6Esl#~w_6IsR9*nJBI`=T!ibSW0KGj9T5v3MIgvt^g^MSKxCpx>$pC3vX zd^;gSxj1)k2>Im`K-+*>h^V6I_L^^&aTkm!Wd&3mOIwC)-5Sgs9=&sRk@zQoowWA* zP?+3QhW5UMQQ__vEuz2`TkOO8*_;4q1_%7}tnoPM0Bn{te)Hd*H17Y<5w%?7h;1z# zyjqYXr$=loXpK`&iEsXc2OB8icH|HuMX+PP&umxRLvRptNf~(D%K3;6QI2b)Cd?j& zj&_fKuyU#&x9{c+Sz-vuwNus|)}x@gA8mmZF+>>&Wq9QFxbxbk3q+FPBuImWHsL+^0-@JwiTO}`>tJI zd*;dc;jy{q)pgw$o`38!d_?ZA=ik?TSo>k&oHbR2$dM+1cIytGr z;x!#QG*R^vXpcaj@qeOSVH){mCLWH7cfLUxd~Q|qWu`@<&0!YK@?ej)z(7cYe-JWz z)ibh&Jb+J~@aeLGQ$=>RKZH;_B0SvUXusvVnisui!6xI7z+6lS-tlC zU=&J9(ofW@BXvL#;V3t2hTtVL8zVF$2^5gW3Ty_n*QA4ZgJ_U+rQRK9R^<~BY+ zJBA;$j!I{&4_I$%X>P`Vs^-J9J>%_MS8q@-B#QAydj=pOQsR3i`Ot__DIdK4H@G_r zNFmYv*h9Lxk)0j7@*ojhL~_Uzhq_#UfB z6{+20Zn4NinZr+=ze8Pf#K8FDAD+xvAIxjsAqE4CZfrfR#i#%jPJa#*3-JT&T~0VB zJG$$Xqx1}I4nWNI>~ug2y+J8d^sbN4nYJ-`1rZZ6zl81v26YDqg1OkBkKS+=rWl2m z(8=cA)NQg~CWGL-sAxpO0&^D+I3M|yLc#@i`AXSqCPfKg?f}Pf@G9EnG@%Bx>0CnA*&a1a9TfC0*C7#wY+4oogVk#hsN5H+9LHrmgfno?2uMi>mkz_1%inr@gz zAD6D(=~%<>@7J|pOI002tjmvOFFAVX5PZk!H?nLtJmNeRV|2^+F&#GS%Nn?Yj;=wu zv>b^LP6iw;nXSqUX=%vk0#+?K|Ml0zZ*yi=o*(k=}r{uDxQIhjQ79)c@-AJ0tfM@SHK#A zLwYN%I>qK*>ZF?a3=MbN243rF`L11B^*VB zB4Du;k3~_y{*)mFvAyt=G*+_f%`TDmkNZO1eoz1!HTPYI&?-}ws5Jw4t^WG;aKn9+ z0->R*mYWo=Nmy-^NwrzbR-_kTbb1PXUe2< zh#h&ijDj@EO+{M27&fa=b15(69*%K#?pF8@K8TyfuO$@;WY2SyUEAM4E)%Jwp4P!* zYK^TH;)qHI{O0yP678WV7>%yCYugr{NbI%QkfeCve!D4>fCcB~z&jnou1$W2>IHf+ zW9k(RHRgNPY#mjMaU9sEz|lC#A!aSy3yy}PYi|dkZU3eM$7QCYqEsuTu+D8|nEa8accJekc)7Sqj@%Y}2sjr=@v z%e;(Ow=>{TpTeC_OoJ$J=?b$R==%3JacabFNw+HyVVH=>;wQ5TaPFPm`J<^sggxMz z!1$6Lz;7GauypVSLyETm8N6sb_KL;)_68r64$q1^*L>&lfct$Np2{D-X;5@HaqwVz zk_B~|EP!h3#fVfkpg}KoyjI1`pH@2kbEbf-xLbCHTF@L<^_(Xsj~`DXFV}qIj)d>s zdnf6>{0Ni4#J{TV-&cBw&U5#d;;jll{Ahxa^b^Gvx#$FIKS!!faT`Yj%~ zgp=d>PH=%CFsL5kNsAEnZ6}tVdpG=E8|ce$R_ln)(WS#Y18CRXW^B0 zAAKcY9{R3VZa=ChMudHnTP zE5HP$jXcPri?7zX72Zd`M5GWLY|f6|yEk%)KG(rpX~%Cj8j?(G=Kbn*0Vm=M-pna$9c7+DO9e?`v2nGUeJVFx#qu2qI~nbH9-hE=~A@ z@C|;15Lef+ci+BQu)(99(L6Tiv(~?)trhd<%2I2mnK8-a9D`oHK3|Le`)^kS=mPPK zs|T$}XIZC89)CeIwxQ|shK`6jT2q_|jb{C`Qy~GAnd_rG0iJIF4a=gbVUJrU{mn)x z#<|_h%%}TTpb*1Zs!?t6|*f@Rhv5BTcAzgWUtA^;2)48v;VzZw7&k2qLu_ zALi$>+EM#SF~cYKu!8WFSA6f^)j)<)$_`d8UM@}n#N5qeQsq^)U?x!f}W zF@67ZbX0*W;@XH~t*Q#8p)=G+&oAUoT45o<#NL(ekzdk~Dw8)<2Zkfx~5 zWg{1z^{D=@5FP;_?0ywXt#9n!K<*QzLHm({OhR9Cx6=9hnYGeqSYyaeI#Cwx$~*r1 z`5oZ44K&}kJBXqkWbM=6l6E0r#7`k3sV_YV6%Omb&XMZXTUD?Amlx#`k zBFiRBW-MF2T*urz*5@f6gJj6=>xAwIxV$KZ1wO**Ox6tD?&wCY2%fX9F8DW=e1{RH zmR7)nY|cv))bw|cr2z~bFr2lb2wrAKorc6Kyf#_4#5x3Yz(w&krZ3?m~ zijG4&k`faGm`oLlrJw3uq5k2WGs?LQn-g%S8L%$i$4mV~Pp-2J|9@!>DKp z*9R$sbN)nxEDYScMFahwPjNOMiAvz+npMO&s7A&EPow1#*e0kiBy&1k~iQa((uWs-E@8!&D= zA(|z>1F&BA>+3W#MeZR}gim4CW5%T8F7|zvN-A;#l!`VdcCZmXZKaV=)6)a2kDU(JPx*< zGxPh2KTon3#nvGrDk_B7GH%du{KX&4V-hSTi0kHYYO#=;_;>&#W$ZHFpKAX|kte45 z3pc`pQGVK5S{7(rR?nggl?*mAk2Va9qG!=KwAc{NtH$wPRH9KKhPY)jH0{tYKW_(+ zZs(Jg>)P8-r5ZzBJ3rm3YhR0v0HS~cwrgZXEfq!K3t?TuaUUs0o^)YsN@hbSkwvbiejbp_1pq#reY62eP0}r~FVuJF+sUS#A!7=6r%Y0Dd{oe%!y7vkJgJolj zemDbk!eT&NJSc!{rCJXS{3ahz)q*+6W^h~DHapMGU@}_IM4ttFYRb45cp-qXY*ARu zcmcuRJU{tbFp5TD(WE2gT0Dc1f&#b0vebcO&zJSWbwc`qXn!&SYWK zg#2g}pv37_A1MoEdHw!b2YlP8zC&tgnk}kFSS-?!I{8itZQ~)GHfMG;&11k~B%K*w z+74XUI8B}fa6+U4@)-DO717QD-il)Mq#dOX4npNdRm`YJ7)s2&4z<`ax@W!`T+N<+ zBh^oNd3f}+JW()Pum5CP4$^6%!gA>#&YjEKh%=F{tri3ze7yH%7b90Ki0K=$7fCyA zF4coM7-D5J@!hNI=FHJo#)?pl=b7IMbi@UG_|)R&@e`aQs5pchcw0kV-5+q+m2g$N z9q7Vxs|@y@KQFihVR(ZCJ!-h&8PHE{$BpYTP9&IsxzXA`2@?GQx0RF<2V0wpf1=g& z>3?5u0Oyl$nzQi^{fg|#*$Ubx%vFYkL$y|ujbNa}Cw3mO`VUtld&Y}Y)OIesHxtg) z*vA$Sbr(LQUTgv;D0gUUAKH{4Vq5ZP@~b%Y4U^5SQeCucIj)1Xk3?PjSAyscmsY}DK6xusi=(Gz%~`IZ$G70C~- zQ075-z%s%8vXPFC5(BMstyjMwG7XY#)VGe63LIUji; zhxRh|LwsND0M^tUmw~V(?i_^5!ku(?!Bp&_XL5z(Zi%*6G!kjo2ox93gzQ>O<&?6G zGxhxHn)VpHckl5YP7C$WWY5L!?im^4z$=J3-%oWvhn>ZDcXGZ6VntNWJwv3Y9KLmL zkco*7R+pAzm#S{LFHS%rDhNyD^vNPs7|-_iV4f8i9HVOGr30%xm}gUZSmnaW zsiy;(4?8t>`Kx3gC3x!&9d0{=5;ccSvVTgV)nl@VUA7QFU@(0jj;#f+CcBxK7Qo5# z$`W2MX`psB8I2*C!|xqQhznPqs09Hvwn%BjqaPpvW^|{LqxputA{(@0^uL$+_ z&YkE}zjBifF-{@ER^sSW+6qUh4eKs(uz1Msr$s;9$Cro3uM_HNZ52upVrx^xv*R@S z(NSj*$yME3bu#sbc)lcgf5GP-CjDPrU*DA`e23%QXlkO-QSgY7ObtjSwRHlbjaiT= zmUD1>+J;_l_}#H{=gtL8bW6X1QK8n%$ymi626WS%+}z0(1rDbu)~F%fD2UEArS(Iu zWqfI)(3f>He8`IANT6ocDphWR z6@8GAE0yox>)Q4Koeep_MU-YpDb3wq-z)!J^Q=SZ5L$V3zPQN|u@|EiqGoeE86D68BC!H8%pOJx8EdK`O>>pwYHb-yfOM<+P2e`s2D zpp`j{9}?KDM#~u!tX#8Z+^I@h*^V2=lCFdgr=)VA{r!gz%hmuh$!)~DPlt~-m16A{C3~|S@Wh5#) zJ*cj>R(&R&Pdr$+;wsj^iYeUqj@sJd{`SzHa3IJ~Tsljb$WtjB`Q4)TIX53w{YP{! zXKzA_@h;73VYZdnyCmeC!Pd=+&#cTZLa+c{PE;(FX*^`pw0ea5rh!MVUfs8_E@)&5 zaH%*d?FO_KiemILY|o3vdur1~h!L}yxpDdwPYW(_uf!n&O7J)jb`u*Vwn#OyTu3H#ish-$&yplGaAJS8#zL@E?gq0$+CW);v zOlSd*^Cs0=t(e^ffvSw#$hUv))8IQW#9-n2y3dWd#=G)%Dy70}7&>x3A|Tmh;+4h( z>Kpb*+hn3lvOFitFpFT})m$N+rZM|6n8=b9Sr1*9=kxvJEa9`QzJ>uLi*Q>=7d`YF zoyN?5b(C?ai(3hyY4GOPjg>A*n+?W%;Jwhncua1WuOnW=+}VB!9dIAxCjDrAsFIBu zpqzJl^ym@p28ZtQb%pk@{I5lewu*$LW&~C;0?~y|11IK@51nEHx`A1ny{iOFPH9#h z8K8*q>&(U<$^HmSee})}nPeX_c&(D3qH9nX3q6$4gDOXeaE@$t{iwpBz&5R0`04n_ zA`;m{luW0tja5!mLFlAE>9%XylV$B4tkaj%H!F&aZT9tbcRlo`Zf;hMX^^krx!t}} zbu$d>5hWX4q@*8xlsG_`D}qOJu9fl8DV0+P4j5qi>I*%#@D$pg!JfW{xp`M`YF%A| z&qjW5;7j?}MU>_y938#g>h;iNGASbr6V;kG9|b!T>_%%3gOOveSxY^J>J7fX;VV-} z*3_5jrgLV_w2zi8-0*F&m(?+DI8S3g{3n@+5yI>13jyUg=s*Tp>**01o_N_SYVC`O zIaaD3eMl>z?|>5ofaHPVDt$@!q$5uVRyaB4-z=ar34L~QJ zp5E=-!@bebldc#jr8tkupqem13J>YveVdxkp92}8SRSQ=34SVjBsj~Mj*s`g2ETx) zwnG(j3=868w9|ntI3XSFYtI;gLUzzd;}zqz)__)36rm^c-WH|sl$l9LNJzK6y<495 z?nQPKe7}f0p9#JvbP)X;0Gsq{IhQh)HS2s5KMHACy_d;s=-fH=W8##>GWqfqZ=KO{FY)w4XRz-2{cK)fNo}QMFN0)B(dLNj>%Sig_o> zBPk!4)a)!67Q1iX_$K{(_G~JKlRs_7j1h7+D$ckw)m);OR<-m7f-b>LJlbdkm6zza zYB}{{e0uN96_{V(tE%6?Y1wPJ6<*T8^$ov8Qzwe;pE0!jppj;b4=RAhv_-~op^^fH zE3X$W&W&(wox~9VANMWV7Z~BOg2Ji~JCujYWV-1H&%AEJiSB?u-5i2w#M(l*b`@!Z zF@NTu<@^X9`IbUd#HpK1n>J}8W_hQ=D?{2_Tx7}(b!6#AIDj|!_;q5`4u%nNU_a-B zd6}wd@EPbhVRn#Wdwsuj2Lp=2LMN+MdQHwX?=u5F9ZmnrsX}~5NcE#tn$_|A` zFQi>~WjK9il*&jha9})lf4c-QF>u&R_bmn^j(1x>7!|Z_D0LU#pd|Sz-&;0!E<|iI z2AHTSojfeNmXjQ)YjE3ej-12X1P2eH@tlez^~2|`{3|2I&S$DkuBa#5?5TBeBt-o`-y}GwWD!i`)Bupw)vo^m4&31GQWn%@_A(a z@1Ip(BN2AA;p%Q6d=6ao9Cs+u{4Ny9&EOp@0Tzd8)EIik=3n`Vl{m{DogUJ}FioL> z@x3gCu{>eh%w~z0iW%5Bi5L#T|jUGNp(CO z1MBWTicy;@=1esNt(|IyLrI6}#ttKaO;wK+IQ3JQODQE|Lr1Zc#ouR~{cIt6G+f0y zg%aKxk$QG4FT0en(p;M&3QvJ|D|YJZ>A8R@XULvS*-YAdmrw?P6=6RIG9!~rVQXjS zVx)yH4lW}Z;iYGy)|RLEGoMOAqqaTcm{7jxgV5^)Zn6C}v{bN7JA(0u zn3CDcP9ksv)g{cwS*>x$>!*~|U%%=p6*@g>LXAkx!Zh-g_?u2)GD#=$bD>5@9 zBX1sypuMpcQ=K{=NZ1Isg%dZjvNmd8k_3fL+`kLUDF}(08JO{?gQ9`TIhT$Flfr6r zoPt5%HHG`)ttVTvgb+Z{>F&>`G4`wVPP!8KLc@MO2oDC2L-Ve{S|VZYQ8>(Nw6~BI zWVF4SbYjYl$v<-sIslI#nFu9$L_{}G0r<#9?u{4`2v+Fcf?|Fnmv~%)v6L>4p_GWh z8Z6toloo1g{rMZ`m4SrI4OHAqZmeKk&%Ifru&;hv)k#+cVZPV;kedF znoe>}lChE(CQr!+{@u9q}S$Q2-+Z~r&+%`u_M$LnZ^YyA=L7HS3c%>%GL{g z11?_%tJ^KT%b`}2BG&Ea^$UDO8cLdvwmXP4q?yGFkKz887!hLk64AtnZop~vdykRm%rTiRv7vRQuN#-OoZmx0o z?OV4Jh7Lf8M5Yj%jU9h8VrN;(uJ?EN!2(;If``)4V{w|Y2k*}Qg9jh}6|#77S~KFD z8V^kE7L}bw8oZIV3@CT*3Es5H4ffgdYE6b*Oq1@P-QqKjaguTuo`zujx!kALEMTl$ zLHWK}Oox5GbGI%bASpc0^C+U9?zs}+KAq$dRYRB)Jn4yg`}R$FC_kFfS1GMeo6PDb zHdF}iR0cx>*%|moRYk?0-dI!g18imZa_Y+E%PwFj_F~F+X_DQb%Q8mfpbZ}b z?@XW`O@D0Z1tih%2kjr{>gNn6nGv=@rbs0iL%m5CC7&R7dR%?UXx{+^L}_qJpKTd8 zFmeGNNLj)093#MM85g8A?%%r0n5@bg(8-^{R>qG0U02bSl3!>Y70GRl>T=D^$4$(b zn!;Q54szsZaHvz8{Ru42NZu-;Z)DT%E!xab&-^=|hSrT1`jiPQZ#Q1)&L3=}Z9&JZ z-;#1`6!k$>vZp%3NVk#{IICErdOz4l&_j2-at?g{ZQKw$uc&&b(c;AjT(_+a@ahKQ zc*p57n3PAlh@u8ahe7($|1W#t|E4rFG>YQ{g)F_OK4i=1FlLd+o4xUF8F>z@sDyIv z&_nUS1C|-Eo!?Fgczpd(mSuB(Gc?YTjpS=B?cUUr){>hOMCHR-&{qYF~rV0N^)~@GPv56 zMJ!Ci++L`@_twIUCaw?KcZoSPpRh8{X2ZSQ)$JLhb&2AB0W4|O@)$MY>Zy52jYG4n zt0amnI03cV0C_BpPRZ~~!bWn^#7A)HxO{v_oRRz*e(!7go@FcsMn;EDxrojKW?GQL ztdt67I1(>-E}w>~{b2w{`x)^iO&R^q_CZRzK)Uo(N;v_jC= z1j_VVWULbW%qqb!nZzVorvRtm!O?~p*Td+)qoZNcG+(MgFDQ=NdQ&xl6emqQjDi-8 z!|ubH8X7b#R%V#?cOcR{rieZC-4 z@SS_A2+YrsRjEai?EmA(YO_(`h_dLj=mZ5BF*&82eh{Ybq;@^VF4NJ1BIgVeuI6rK z)kEVC?k1)Y-PsM0HV{WZuK3RTqH-Y^l{(Etxg!fQ7t?k$2DdL+!5$g$-*y`Cl1(8@ zIy__f3q&Ed=;`z4oq)2X^VkpW_y~Sy@fSexA?MeY+{Db``}jd)HYcOm4yM4>-@xU0 z?t`;aM(b5Kj^Lnqd{o=HbAVw?e+{ZJCu@VR78%`%9b*Z0$Bt&9nLo^&xYwiwHC3l; z{d7&@RTlwAi9|E{p>$Vt^mF4IXo~BJ5bNqwxuLawAal&N(tOs%kFnA{Mozs6`MBK6~z~-=pQ&ure-~p3jV+?F_^?#Os8g>Z%#zWq$a6- zPa%_l(ei?NhGYL~-tQB~O8E=CyygQDqY%9`8gwL0Yv>Z)y~DWO)T)-%Y|3?|{$4V3 zKWV3Z+mNot?QFwr9%9}e8ylNZtBXy@kKxR|1a=&u$ya7^NYUCA%a-l(410(53C#tQ0}~bjOjSPp3AnP~&u_AA_jNL7Vo!CcEW`kdJznMarMv|SH_JF#jz;F2 zq%XfA^wVFiF*_+@G&2BFSS1mfI&|PbP4Q|!wSv9U3^%d^aaMq(7g%dk`PnA2hOJNa zJnWt0L$%+#wmYq`h{n`qU;?=;qwf#UFct-Ey_EIx<$mL7VMn%X-+pwWQNMm;$ltG- z;=Zk9`H)zac0sxgpBv)P+>)1tg`s@@q;q{o&)GNBccb!iDh(XpG7ha^vdf?;3#@== z*eH^o?m0h9%=z+)Ep>_OeyzSej8(9zfZL$~4Opo-yH1@wdmG#{agJ(eJ>8$72VT`0 zSTsFc*oS#J<*N-n*RUl_jxTSjtN5UMqq`ICvzY&;1W@f+l7pHyk7Y0@BEDO?v#pxXrymW!DzRPGt{>8#YvD=NBRk6O-L#qlK>s#i`YE zNMqr!I=^LLq7XNrSFdK`l|%D$-4|l5$dJC2@0&iY=6_s(@$ggqCAB1Z_JpvQ0zFm$sHNB zwTFMTbU)E*gS6<6z^R0G4mTmc_-R2w_##nw0ZqENxsCEip4yoF7N|#MQ(0Nr|2R`h zEJTLKZY?m?e?L*}dV9sSls=}+jLu)743R#Uxqo5QXTUQ~b)TR-6gfev}EEm2+3@1kK}`Pv#{}?Qa~+&Y zY}PyQG&TW!B%Q|)gPWt5H*{rnTt=HMWNqPZ+I{4?K~5c&3X6F=ORFHfg`1V+I8FcycIMtuDqw1M{A&~JblS?n*^DW zHf_OcS&*>4;ny^!otyi*MU+i|fQaZ%r%2PoJ|j?Ew#JMZ<2sP0>AO}K?^~7&1)i7U z4H9cjV`kuv^bQ;5!>$!>3FvTtCs9?btFN_O(|U(t&}c!wun+Qbl}O4gtGBuo&8)F0 ze8y#VgQ#9}M?7QtU9Zp zml=J+>l@IrJ^qOX1|h0*i6OYJsl-qh3w=7t7ieOpl8)5X0xw!bQ3q%Yb8UK6 zA(0YS{BBFn5%9&tG4Lz!bP?sT?jsp+(;zSYO;QnGfq!msL4BX?Ovomji)zOND>NZa zzg@;f6$$Fw3GAG}h_IK-dg{UQ!TXRXqyE`bpulP^0iO#yVsP;;=ZlsKgRDt7>MJX5ZC+%=BXsNy>&tMc(6xzJw zmhE>;QP$qcUK3)*k3w*6Z!=lt>rEDs-LVQ^Ay9k%^_NG#h{{{-P0T67f`Ws6dW}PH z${@JGe9Cl6s~*xd4x3K-Y`p%rAEK?cfCleJgQLmP?zG*qi1zB$LKg(#$}~nzbn?q| zD*s9g+PsNfnQJif7<=mdPf({qo43Wq0aQxMH*Sd-Ox)EAAAyMhsUW)}pYh(;C13%% zcXR{&Au6+^{7oF*9U)96G!680byIKO-aOZfVxzXUHm{ho%lg6&%mohicMR*Ik@m;Z z0nNI}j1~Tcx?hV_Xp1~JJHjvZ8t@?t`v z6`ear%h4nflD-hRc5h|rJ*;HsRdW&o&9{fLQBGGK;CYwUaxV;;gtl7xm#3!%oXtD1 zP@g6wCy&AiCHfSc1(s&qf3s!M=)l9QVV513|3A**wBos50RDT@pYr6ytwg*!O{S4udoKX3lgTARSpOZPZOpRFSf;ddICAc_q}X+9VG6Uz^iCO2q?H*=`Q zu_$W~JTvizGAbd^FuAZycmeQhH2qviw=zC%Eli>8YSy`Qq{Ogaef8>9@%!>%%U2jr z`oShv7ib{qO$T8~dk3%2toH!voWO%*cVFP)oE&@1gul#WH*^=$Voi(nzw3I^>}R^4 zrN!Zb*q5*S`C$ma!ha!QLob^qCF1j{~G;j-TMM#Jk@MmPdPD;wETF;Zbu%I=cxt&=$nZ|NC!8D?kY&>>1A&_N7;8Z zM@)JqlP(Z7A>;pe#8ghu31!g-=w?lMdD(Z2>$#~+u`rLm<8ht-kbyO8g(4!w02lA& zyggSS#2Xt=iq8gWiP-T6bSx1T%&Vd7l$meoF0EbqW8X+`L8|gBCF8(`1-XpOsJ+G$ zPq;CZ;3via)8HXYbX|KX%AvbLEMy@9vh4{87XR*iA4l)Tw@#r#j?Lo0)cua%yd2;u zFb`~~Teoih&ObOaGHrS}6cIn8*xDbI;_%wW+Q-$dN`54AhQh+^l|5}cyL4oR$B=u| z*|UHBzf=_e!66!&TD*bAsO3(s!j3jwBLYg+w4zXjHH^BASpAZ96gd-CA-fNdY4AwP z?RJpbz*wmlb53dwH9uM3s9t09Eyo}TcEf;;k;wfQ zBI-w`yYB#~iJ*S#Tn9z=e@INGdsTP9=fZAhG;r-I#|+PST7dW?M>aNfrqpLw{Si%i z>P#SH!_;a2S%J~;g4G7HAJBL`16@do@n4cJUfjed+?3lvTRZLU-K{_N(O6h8zN4*d zGpni~u@Sww@-;BIa=$bP0u7BUQF?Du(IZT;f{BXn1ux~2YX<;$H+;EK27NzK3G83;pOb7%%U-L zoQIgKxqRdIHV4^^uF`pnLDiP>jeG>Yd4eUg{1!BlJxyVR3$aGai^(ig3rqi=^IC4s z+6|B(C&1DM0z8MTylP96102u5FmMV6?fGw@6QcTy$hA@%-G1vhnlDOsU4uu|5s<5* z>cuCWJKS^SGx9C-E7eoMJv5#khj&3s*_t1J7z7!!1-HA@X*{m0yvV`RWxPhG_4|p! z@7Tkl12IyBHQbehx73}P4QkezvC#922@v)B z7+$A?(R>=CP#U|p4R`zAn)P7mq2Jd)+JoDqav`I#k779k3swYEUpO<-;JIZil#6bdQEtLbq)b z)#R^fQ1}>odlNRV>2t{7n|Esd5Jv#DF2fy5V#F8!og*Be{$H(|cQQYRCmspDGjqkGRqab7tV`!oCgArszbiw7V5Yx= z??U#O*_MIqR>tf_R9idO^P*G10_^g^Td<4?0|(aEe);mHh!;M;MiovQRicew1HIg& zcoQz7AjH>x^S6jUpyqe_9iSDTm?!*+q_C@Ji5_54uu$ycPN08*nvpk}!LK)dL!pyv zp+AKTmbI0_iTEVd0k7)l_BP2;!8=Q!TiTHEYrQat-w5^ivl{WGsaQI|vZfRik>r~qLaq=R zmwRuy`uIjQCJp{PMcYA&uPZ9tK)6zlz}#oiOjz6$Hnx0uLse^x$6U**SqO=M;u3j3 zZ^JXx9V3RKW{}`u=}IdJf?ifXB5}nKkUhnJjvrU;XyWBuE@niWh^Qy-3m0x7D4Ct1 zDQ(lPU8MF^p1L^Pj-QIQy0*S<;1D|*GlI;j71JfiA~ce7xR21Z$&9y}2>`wZ_ZSGV0S&O`sgo!dsJ&ZQNf!op+S$v?zQr~IxTYcn zFit`Ww6)Rvy84u5urMUdqlmzL#x9`NYo2j+8ktr z($uN7U;?r)ym~*zD`~gb2om$6SAjanH{h>BBZJ)zA@8|e9b~~)5qg{A%hz~ZqJ=`q znxqbn;XW_ecNq0PG51Y^us}Js3WAGBh2M#SC(xJ3&Vsa?Fs4)SIm3d-|}T6_V1t2 z|172hH0WmvYuB~{Sa+6Oo?vJRF%v8>>$hydsTp%BU%ZUS-#t5~)w5o9C}c_p)@Ud(KQ`7!-Q4k3F><5v~X@bsn)& z8FpR&H*`lg>D6pa`7UXkbJYdJb=k<0bc@PGQ{UeC&+!1vbY~jLHyFf)=p&#ba~cz-TUpEfV*pkRN@)9$A_|WbAOz0h+*IWRT){|WW9#sHAk`I1uI##3FwWo=CGp|of8V@EU zI4FgGd-r7fg=)HqgXl-9Sgx{_09neYMH#>7^QLf(K803)FIjVK)t4rGFO9H-ghj?D zzf(aCzXN%p%K7R)v3SccJ*M2 z)A^EBhLL~bt&NQ<)er7fj$@kHL6m6<1?**E*t1(7Pr>5y@~nlj{f6W=(2tNNTkcq4 zIAj?_5FAX37sBSpNyRc%wr0{&&+Cc7t*BrUp2P7W$z+AChgTs4n#_+}0I2kHkF2KH z7S%f{cbCf#xk5}PhFj>UHycB*9{^q*@n_AqZyRYntrKF{O0l)@_U&fe(HxD;g6?UD zh@sx2Zi^rdxEG}60N=}gy0z*Y<>#P}DB%Ad5Cj8B0^Y23^xqndc2O$dRw%&9OGeSO zaNSEQnDwznX2(`zOo{lMGEpsDtc_Hrhjhn$a{9lfO2b!%tIh%5W=Cd@(K@C;&ljI(vvb2Vp}>vhG;}V zyFt2)Gh`a~xs?dcSS>q#BMHFEc^IEWv`bjM?VoELIaVQsRtSfb$Y1pHHauTTTR4*l za-5dz=k)@)Y#@{+>E=?p->` z{hQ8v3;!$h5aRM+v;`B1Vw5;DN9=_LIC`q?NCJWwjA_SmH3XN*UBEz9m?;jh90WPr zn?q(%*U*6N00EF3pP(!hLB5&`)rS32hR(_gJA0$zh4Z;(>tarzK&je$dFAOCwA-J+ z=!rC^XZ9Q3k>&NDpy!=5)&$$6gKWWaKT!nfW#vvd6oCFRcyflrX)dXn! zhujERj%?B$eXA@9cQZVPfs8nr+=PsyLCl)GMH2;Qa3J*|bfRBK(7-@O4vHWp>m`4FR^)AUo zaIfDJ-~vO39Mz4T(*6KN`r-Hyf?>F53U80M7orEHTii@*m;J6^g?H<|vrH$(u=N8w zhsuK(?aBI?j^G8M>m#C8EaF zPPKeGeAQ=Va7h#?u3v`&WbD1MZuYOft_y(QNEJ8V=F(#+_?tZA{zC_A?Tgl8taEDu zB^Xoxh-c(@IjvKsJovBU!vD=#`1cRRC=z(>jMq=fFJU8e6JufUH8CmjO9u3CW`3ro zgW4j95};L>fp6%hzCl-A&t=fp;UGMwf=?LANMd&u-LBxylbND>x19l*uZEz|+jTmENlg14HM+X5kVSDR!SB!@)d^g|? zq$gp>X|gRLCF2x)>Bm0DF6S-&WpL{vW^vGREb;E^`}^mshRBxq-0RXCsb*2fr~26F zcyk|JGB>wfI+Z@lnzCSSD^BaKf`MH-bqWLTacJcWrJY5cN$1W`6$HE{gKlE_3Lsef zo=A4bh^f(%9NJ{*YdcF_ZF`w5k7bBX4q; z=FCp*y%?*C=MVFjJr;WX`n7^LY!m=k*Os*QFn6e#*$tpw=lQ|{#FkMhfpzl`Vbs0Q zN?6idyE?Cz$>uVm0KNk8n)I832S6FK+jR2u>AO#s|HpKOipgMxOge;>{n-|&jxb&0 z;#q*RDp+(Z4B`kxJ$xp>+i|aZjc?aVim|WLX>{c@(EXo(!rzB@$bD261@ojSQ$~%b zBhHEM+{=7MUBmrq!(Z3auL$GT!Vt9szv@umU3VsCtt|)W3A4@q^s4%}V2N4R8lGN&~cOzJI^T@JiK{xsWrJ$1pVHmds`$ z4QQ9ad<=%tGqw-qpe%+7yv?93{pQV00qe<*SPr+I+XrfZmr?6b=7ntB(VDCw@?*ytGagJN47*Y8KkR(s*s*Silvn%_rmvyr zOc;d2kW@b%0SDl6=(2s{mt|<^5)Fo=%~2TAuwCZ%|3WsyS62J4s{wJ8v{98y8;%8P z&J)dloO~+4IzzbI4v~T@#Gt$IkcGYgOb4MLeR@qLA*|qlCCrK#&87L%`mX3?zHX+@ zq*#W@Rk@&>pU5prok3j$|9bclXArsqhrj`pbE@<61XU_8Z-T~DKQaEmgqSU`QlTDs zPWvyDr0X{xE+(S7yB!#YeR1NMHRu1bPoh+p##etlG7MqS!fY_S8yXT4p7$A%3~R== zmTm9WGN?iQU3Bnub-g+H1q&G~yQFWlJ5>Vnykbo=7;lt!ue-a}{-(b*xXZfth`b-q z3JZmuRBuMDxNt4t&Q=Pu0}zEOOaOOlfBg9TdlxoTxM)s3LjfiWw%<3l!!qfwt)4S` z_3L+&qZQSLRz#e&bPY1%*7#Jwf@(q4vCWAmXvX+%tlb^z7-ZU+Sw~1Qs?oEV5s`Ov zS1%1)VKnG}TmTB$L|n#S7K68I^kQI8z+GlU6yia_j&b^^8B9PEu|tX3Vf#F|iBtF( zn1@-sKZC(SLZ!+UPyGg5jq{7n%)A^((f_vlei+C?jo|CWJ$&(ER($i`XkcLdEG>dX z5nix~rFC9lqLZGml65n;Zr*+v0~TzL-FFtE7s$n`Nkq+iEe7fAupc}21`VxtUS{4( zA16qy2~bRwx&y&dAdbs1UW+~9V3-q!HLf^QVz#7CH&Y|7Fh2J&{inb-kD-Lw5bRW; z??kq;NyrV1)_CS-Z8Br(RN1k){|=`^grv!Td3j|3)EpnRn$sqp2-|Wf#_o;K%>4_O z@hG0^S0BppOUvH1Tdz-yqyJfYOmY&HiuP6hL6%SM`z*XH?zhuN9?hs})Am?;P@M>! zVb}p#fx=-GmZ0)88a97^KM!o3G!-h&0a!#IF?qSa%g_gFeUw z?5{9j*eH+Z1if_;@xXqw)Ccs?`s0OQ&74-u?mBQ}yTOnTxC1+b%cnep4-A9bJskpQ zWX77Yc6I@9C=1>IR351$!!vJp#Va40{axK=VQ4ng&q}L?UI1Yr}bGb7b~7rQ0?{ zBKYN#@31=$2#{C2<%9l>n`kX;m)i7Ww;MIGy9ugf*g?Xh*H45~D{}AnKM);VSjs67 z+#PxKn-!z1taN(~5zH3Q_hcjPN)*-h`NdLp^y%|@_!E%Ac+4@2j8H()rjmWizVzkh za$%7NLSOs#gsl$Pi3HFQ4-Tg0LW+0zh{MvL=6O=j*@>qNCYrL@WK)-Mgdw++rUHRr>@f&DXF_ThSy9R88taf%_xTBUdd``+y0u5NG`uI7S9T4U zvDIvmoVt-`SU!!Y+ci@bmjWGH*xRe6g+s#Gvjd#Cv5?X)T+lDrk4r6vnzy=b(;nD_ zNA2~HFi_S;l+3gzO?l?Zb2uLe0ZOV?aMaOw+nV47+erpQ+%dO9=rA*Oc9PDL<(0G6 zvQ1ERSpKaqWQ}|0`Bb9sRSn_%*%o{Qq~ZNr@8zSXay|h=vJc@n-V6wI1HgS1gz|xB&&!o%jBH^Lv$N9tRpEn?4(uG zJ|DfP0%Vu6FImQlD#mUbIl!*+dgf%+*5g3pWSiq~-FBTrW=BA^B(}Ohl&swjYhs@Z zQJjvk)@47o8Pf3}32+b1(CYXpk5$l00B@6XslMvhyEfmEz0;Es)8)RXk{)0saMVr!b{0xSonGkgK#_i6FIL!~<)rdbdVDWQPYLHL97%IBqhwna7?hImb z1=IiVLUz5#j~_Ns8F9xPaZ6N?CC~g#CYEAbL*uRc&kZ!xdfgBIZ_f5N-EtOJ%*mup zsjsz?EZ~8ElHZz}eSrIQI1zw1of0?tJ?m8xP!A}f`ZExr5iEk}c+BYe^DJMDSub=P zqNAcx>2Q~qz$7F5zRb4V&;&l=Ud44(@DM8wmKquuh_6WYYTCtOs??=3c~C2%SXRA3 zSFr&m&w&dUcBi5XmRZ8NW% z?O|J&5n{#v=|T+Uh7f>h(o_0@{{rBb?h6BVMbg{oAF=>*9;dg1`u9(Tukc1wgfiey zp9&eFr?SBP@G>+Au-0}Y!oAMon$G{pp$9e^gICKUicnSHoWsBNMnzcwFRZwaSt+C1 zn%A%*=(|1oqbiX$>7=`CErO0%aBey6v$ULGuF^7aI#YcH>O`2Wt}#H^rR9vd9=F%O3oMZ3eys} zQUg#e|KR2?LBhX(*Irs*F&lfdpC)Yarz`whP$O^v zv8hQ`@#mdDS0WVfX%jk;$s$CsRpvon))}k2AJ?!;B5>=E|J%B&2}QgtzEjR1 zi3$GqUtt=kq4UKQ$AF)>KRwN1P$Oiq(EGoMztykrw2w51ubXDuL^vW8iUWtP`CO(d z%BJd<7;j2J{3Z);oPt3dEvL}XHslg%O&Eq&$5N?BCdD#>Vj7@z3qS0;gV*V3;Uyx{ zRORDzz(8GxADE+Zrt}!y#FH2!b3or2*ZgEj#+#X}?-J7{qfZ=-PCa$Ll{9JG*pk_Q z>9~y9vp#je+KmkqYU4BmquWlu37+mo^ZHQybY*Mhp~}p1)dQA;k!tQa%XUSN<)M9- zQ^0l_y2RRbw=63C^zFlpw-t;Ha9T-I8-`gNC-VOg_U2JN@7@1*B~DS)Cqw3>B0?pZ zsT8H8GG%O#;s}|i3=JBTM4 zu63?+oufYQ&-=Asdq1cBay7DpJcK$cZy}k7M7UObkKrL>zorszv&43oFV-HMdwEm) zQL$vAH$?`&=j7?rr}GDNxV5xf@7ugO0zp1ZU;>n7cqe~9ST*Nfhi^ z(5T}n59CGsrixVyA(m8fvB8MXEK>Y5NQ1}hdW2DoCMkgTdSX0fhA$#}(L;U!y4{j3 z6Tm%9645}2Z4|Pv%bCtBMudoeQz;T>Dnj*q@-%Uw81<7Ey{4x8tn58TDn?n|w}5&Z zwryLhpoj-a-HK<#&)=|{IE-w>G3tIm?py{dC45!bqUW|na18ZedhiGv&|aFGLeDUk z?Mox2tQkQ3XJ}(1#ku#&@VFs41dX(()P8=?Fwo)LCvp}z^^Lj9yuBsj5EOg6;pUEm z!?0jTl9$qLvYe4??{kPiWRbGdT2iQnXuv!$MSl*G3kKgpg6hdrr@ZNy z;vY$l;=)X2^Qgo3u?6gr3p`uAj2Z?|pSgKhTJ8>%Kzi=>9H z-~tDo&@^Z!KUmPZP)QW}9YUX2Ks1H?*!5@_1~I|sX3=x!!=t- z*#{Ey%VXP8y>avA%K!of22BV6PA-*y6 zDQmJtgghKe^SEgj1KR?&#p{0Vad;(eI_jM&A2+3J#?2SQ=QnMOj<3B3;ta`s8}yDL zWhR0?#il>J_uoeUVTOK++fYFjSu&sazzyJ={Zi?k!a=xgPuJl9t38Qqo1=4hFz{+? zSthB4#7e(QQQPndEBzay7Uq^DbvPI1i(DoZ>Z zjUH2PTqjR9HfwYnA!O=En|n zKt3VHpUWp$3`Kxm4lPHrS4Dp0yCeWG332I1%JdY5Dfv^akKEiAna!2!52pF?C*z|3 z)kjbS2ry*v$fJ)9r!@ai0zD}9#As8zwH$St1o%ZRs*`MKaKAzCD6Q9b58h$eF{AOj zQC7w;OVr=|k6E1PxF|^(%*u)z=e*TSS#qHV;7yeNf8C8UDH2di|k58dYzwMdGSTZ+Z6~07@GOSSQo|M6m)8UwkFc zid>;@)$(^{QO7;h&wHQe*Z2x-r3+= z7n5e+LftPO3JRe{sLi=4EfUjfxRpL7hUglJ>XX5zrE>+Xr z+*H6rAIY3Y`)z%xhh=Q!$as{?(x#O3Mv}?(_Wi(U8S<-`l!Pn@Z-vUk!{VvNQfgN6EUSnRD36cUZxJR29Dh zixFmie}?!P!fzCOouRKghp(@C1}6lD@W}mlPtWzoqmq~}DdAxSAY`-mz*u43b--_z8AltkO4wWg%Ox)V#!}WC}Eu3X=!FbIF z?dCJVI|x~>@PAB?^d`M=BZDPJY?>Kr6!DAEGz88Dzb#-csZ|R7;HVn~#tBlX`&#wq z?W2?W3~I;d0SMqyTEYIoUM(4!5uR^A@7wV!tl+~ZpV5jV`0}Q;@!5vsmuaOqZ6S10 z#Kw*KhtDF107OYV+jPxK5jidi=bPzP1l4GOKC-qijok>Qv(x8gI5*iTF0cWK90PQp zGGtFL%B>B}VOzBgZvo(shA(%o3~hhR?}N;E5D`=MqQRoYJQy+*aWgB|OfvrZ2xENS zogQ0Uf?p<6TEh4iDFWpkD54@ng@=g z5o2?p&w3fA$Y^kSHHb&dWgnY!iiuNpOixaOxCI|?A`1Ltz+&PVZg-cw3%y>wZX9#w z-H-*}F;|GOgwa3$iTgi{Cl@}6Senc?M!5Yc9u`ArcTI2>%R=-hpCj42eWzF8xi~Bu$68Jz099!qYCT=R3igr+z0wOHXD_HsKk??CjhFdjp);4>mxNr%Oa3uGQ7Lp_K%x5F^m`F z$6jjtvF@_K{K);54mTM&%g_V;c@xi$4A)KX^Q9PW*Cq0%sXz)_Tb5kCd^!9|;QaZ= zLWfmB?|Laga<=f9Nt~dYI zkfFYc)hGffREAS)X;RdtAiBe7Qec~g1XP&6y??v!(T`|*%ig4V<6$-Rb+j`}`3Ft+ zz|)j7q9P86eXs<9m8r^Wu@%==Tc@OahV@rW@!l6X+uqs1A&27g`aXM`g85>9yg&$! zu5L@Kql~|Kq&f(kvoK|$YpgdHpTF-hw4anSzR4Tqwdj&)?#n*49_PQ-Y3sTttGd6l zGZ5K^2!1Lba~ee6117jI_ZIE%t$+Wudn}n2;7Dh$o*+ODpMFXedB)v%2_4)cN_qtD z(d!;Md?m3w6m9pvpFsnFW}4Bx{(8j;GVK1IQJLi%E8W4Y31!aX3rgIL1jZA6h>5m1 z%ArDsWOIX7@WuK#lSoFC`RtfJQjA88mPJwkoPPbm_MXnu=XM7t-p@FG=7A#a`=iy=}|3y8s+PUMFAuXZ5$<0L54L z4(BFjC=|+hJBA;1n0Av5YyZd1ZP+ZG04i}eZ5j(mqgcHUcVYFlK9|xzUEtR+X|yqI zf~pR!KPKfIA_re|W8Ot8tbthBr%9*Qt;0|cR`-*XXvUF2cbTx?Jvkfaxt3bRHNW4L zv|C9;EGgUJu~ycU7t}Orq9Hkj1Gdj*cb;4wK-;B~IRY1!cVD&uN%nCpM==O>zv)bY z2V`@rsu!;w#mjG*c#UB0!mZr7QpG`6RYO8*oL20vycVd zbX-{;OlsmVDdVnKY`YWw1~uOnc7QCJ!i6*`)0Yi0F!|Cq_5D#??93Q2!CNQh!TFk* z+4UUwH&%SmjUc&krR%7;3?xO-o9O|`z6rFX!^VsF%886M)6YVD7=)td{SiL#*RfP50J$swJjb4b6+ee=$sIr0S#R@)9)nVH1Fj4B-aWM0jEiaB$GPqg2S zof{w{Rje4X5teNE?haB0)25zAC6WjqP#mKQUOqXY%22bd8EIB_U-z2dv4#ioaRxdX*>&(@4cK#S!`0*Z zdZ%dkxn{)8ebhh}8zOtITY*9gC^X?S0F|s~i=4m@e^u1fWeO4(>GG>!H|M|9e(`_- zCG)iYN`;=aL8l4~B1gPr{4A5bZYO@T9(=WjF8xQ_%3>lV6iE{g1G+Tf+^GDPLT|fG zQ%CnabcOiMp`1tGwvGM6W@`Y>Zv*mrT_3}|i{vGYk>|btT7_FvGtCi7ySPm90Gr;O zpB<*-;Lp4fx^5L*z^_*MEh~Fmx+W$<*Gf@)PXH5K z4MqLe{~c9$L~|N^+hgr_5t!fAk9ZjdEwZTlFPc(g@7AlvA`WO?_JN!%>a+0*2F+nC z^D%L4ElB6JPbv}h?uzYOw^po{^-}$&c=vt|H(+2njnmH3^~6UHx{L_J|FyIEAo3p( zhUy(ogK|KZvfBeX1oZQJC)f|xQQK;IBls{vB})PAyI z@%7^3gE45ARE7Aa+f?2=V4dJp-GhJMSC1bX$8o}Jn6ciB7c_jyfA==6cUgal ze3;w&Xdc>dsJ16`WU95eC@7`K($vFrHT5<41)D>0m!Aa{v+@2q1N_WAW(PqN3_H>^ zb0mYW;zgE4;wJMp-f#8dHfZE;$*`K}n_%2So{O*%JjT$%YyC9<8Px z3!OQINH2_RvD>!Ul_@%PilJLIog=GRF~+RCEYbj`)It83Vj=Xj^J%X3bN;imo3TX` zbY$y7_#WAaYW{+YT-H)H`GgQ=q1EXJx9Vn&i0@AHWqn6(^a7|Y*Xh$Wsc?~7dh_5; z1n~;Up4wT5k;W&>b^Esrs?^*U228!YYgQDLQJe9Tb|tEPc{PT9i~;3UC$h3Cf64B8 zNkwCByX$(HFl^YZWbBO1i33b|-M~M~0qL3yMJz67opTgZ*>WU05M^kWzw#Gp(y2AA z<0lFBh;4IQav8-*(RGS#dQ0mvKm;{S%@=FfPf|#?X*`)Ah+Onv3*kSuzsrBETeF7< z_5esLbf3WGT@^~T8KOw!fp3!CG0NY{@>ReDI25YFA>?FW(>R8Dn4&BT6t`y} zZ}=GBdDoBp(uOfQD~LIWSwR+1A=E+JGPFsuzFWoik3ar^v(vH?%w~TWgo@gfr%#`% zzXnuislC(azXj*u0HjYVr40c7(=rf#2U7K)TB-tc?RlGT3zl~Kb`}(e*EU8RnIztE_XP}bZp&w z7z?^zs;KWfR|*KnS-A-eQ`n4I8)C%#8PW+!=hyp8MIVJ~wq_KgADZ#e>#-~?9>Vf2 zb$gUOq0c7GepcX?#0w|)io);>@QnR0ywKVN?AfI=I2MFXf0D|pcV_`TsoR|+WQ}q$ zfN0z)9vRai64v<6sHU2y2J8^o1u8Gh2tGFAoSumh<9rLwY#vR#asSVsKJ7owmNTF0 zTB%hGn$k|y&=N-Pf6GWEhG{XRT^kN1WKj5eDG!iWk>AVE%dlPl4cD)LVUg97ym=Rb zIbotpL@ zo7Am_!+p}{x8Y>X3V4hg0W@Ftm(Wh!E*^W>n=Z1Ftnr+AY}6->d7~uJ%=#~e)xnMI zmY1_gy2>Jx%vd+iBkJzSE2X$B96Xbx?kD*3Uz<+5HPr`;S9VOyh+P%D7`6y z9@nl2_*&3GN6wovym>V`(jL6<4>ABk#>xTCyUpz{TcRZ=Gg*&|nrEv~1SUhDoO-8C zlu~}(E%0QXpg~w740k|%1rW|cq8vsH2h+~bZ8G-ua|f9miJs=YJy{^g6@1ng9B7eqiuYt6w!#}0-p3B!JVoas zWer~fRC+WR+MJm*}tl&R|yF`Q23tV)3E9AsX;>+86h8xHNNXG=lR z_KpJrlzuZmwFHZbHlb+<&q}tcoD-8em(iDUP!pOp{qEQuhR2O6@2uL)Xc*$ybVz~2pw7(zxDFxlsz6ffKLt~LF! zN%|jS%3+)xaC{sZYI3 zD%cMju<#2ci%ba_k}obD_S7~B_FCb>-s=UXP8 zE@r7^^5Dn;=RdVW_yb!|k)EMJGMu;wwp9~!gykpO*$a1U-Q8vlXYvUs(?sa17&n%A z)%N9DW(0}SpdYX9%)eEC8GV(enDzQUrZrVkqO6=(L%*Te9mGf>`w~)6BZos&c?fwf zR+1H2%!07H*JO){bP$PV=!QOQfV&T8d;?GJk}-G?>2(x!|EByT6pA7FgUomo+s8^W z4b|=AVrDf>v}j(j09HP}hYlV3gCioyWs|`y8x0K)k2?v~X+zJ$!i$2f@6nY>^Kttd zsJ7uwM^U>#!*9R1!F0+mUPnE)VHBmoK4{ZEXp~7c=E`+k>pkEr&Ciu~Vd3G@sa@aj zTCFup%??N8trVR%s!!puD!#$c$xgJk>_=;b7TaLrA>fJ3{7-^?P^L|yWFkfJzg zgQ2hJi%l&oLLmip+Sa_o3#*D+Jaa3OJam_{JP~SpHo61FZi+$-Ot>bAKUehV+Z_RV z@JC#IH`(GM<=bIN3OKF#83-H6lhFG}X%XdSoCgam30}CeJ;Lo=`Zckr15b4LwvD@x z%9Hr)ihCk6Ctb!hCBizq-Bx^js;OTqCze6{?_bHSPrrlycoemwGhI_F$ zkth#uZUa?STDR_f`gD7s`1!=HGMC~kv*-5v5)L5CP)?rQ<}eqPlI*HK=zTNUT13x| zwAD3odg3lHigg$eu>MjO8V>G-m#bUX$ljDsNhtl)iMG=Q;@VaI{} zc{j6yt1BOk20Wni+)^iPcu`wrB~mR6KAeBh(5qb_Y`Da)XFkHE7qD#ENS@ie!ANf< zPO?htzx|EvFF-nodnVIZa^;bV`6aq4wO5|PA0i7@3iqR<_Cm;`Q46pjB6MTC9yh|`pjICfygMja0O}Wkh#h8*c^q^cSFfXpmd4w%}IQqFxdJW+=;g0<}%`D z40H>a&%s!Ooc`)w7qwSNdRk%D^5ul>BB;w7oG$(?Q($wxgP9u^(?GLcq+6}Ig@rAa z=%A&@M(NU>J+#}j5$^G3vFz#4(z1G&=~vRpd9?PJnWE@HK0eujE~W)@@Je5ft~^BR zchC%HcHhG8EP}EtSN4^5WVRL#EVlx{T7(*tw@4BIGOID?ctJOCvEj35dk83W6AY_q zq_K&KB*jfWQuFE42*&D}dBjJ+4HSkhm0z`{#_PNzN(CVF0g)rpLR)!W;$G!!5N=be zK6+j7?Y+sLyLGSoKmU@v+$HR{Y?-&5yk3BO)wyzlsDBa=;(3OT#u?l3qesSv(X%_R z2@GV@lw()ci1q+7a0sNq`tU~Ho_T%a0m2)2x#0buzJd!o>HScz6@sg3uu zgWU%cQIU1x-G>h`5C@;yMlLEO_60p%o7sDaIPr008<8)K3Slca2&#w%@ z=NO)Iioe5e)!}qpe7r$M-e8ti!KAZBucG0`wQG5NV8fSXXyG^ha5npIVo`^D4L7rP znAV%2V?NOQ4VBoWaw-}k>;zov8rAq$vKc7VMND@*yhT~ zMg|5V!MXeMU}~zUxW3)MWz7N5s0Ic)oR)ULNqTnaZ$j>gJrI%kz#YtM1ssWmRnLaT zjDJY}C)Q5)odQLX=TTcHCwl2pvo3!FIpOmydBK^GdC`ghqZ}553}~`|+}xzc=uRif z#!QB&`^S4DBayT)$3(iB>=e7G?&n5x3h8v!-1tZ_ zz@ncf%fdN!_N6Y|q89b-0Nc6YveKrWj!7*7Xk?wV`IaACJk?Mr+UGA96o@xCT9Zir zvQ^W|B`NDcS;p#b^M&85zw-2HM{aAAsT;S11S`pgAc0Y0mJ@LTmu+6QaZ9MagLcRj zI~CX)aUYo_OE@7_(Jz`dG>@c;hb~{?rLm4BB`@cNmhB_zzZU|@R$8!w1>ea02LRfO zT(z&jGNUu_W@>Usf1D?jRfqz!Dkihv>(fqwiO}b&d2`H+ceke_N*zqk5M7tssqvX% z#0BzvXZIJ?A81zP9b$tFtd1xpq^Ls`O`7(m4j|RisP1r|x|$M+K+Cr(+lEfg3&Zx^ z=kS@nC)#$t41=N@ zj?%eSL=^*?k5PI9?ZyJ6O$WkfPi!^Jj7FImZsv95f(1OHNLk@)w&uqYg*Om_9pQl7 zCSeWY4!FJtcmK;>!wK9VK{^Ca;tXfA(s4@L0K!;AAJUoisqQu&eNl!=$XW|eW_n7S zj}9RQnqe(E7e~)fd)Ih`5K}BVbz7isTZRqj?&Vp&zPW7q`>}(CNBI5So_8dQ4O3Ee zW>op-ip)lWJMT`#MD^oa)!0#YQEYQbnl2{FR1bVKh6X!IF5KMCmt_+T@I;ee zG&9}|V%OKoDm!|Rw>J5)Ui&T}KVOeQzt2t@pXK2IF`X$)At)g8m+dD6p$lQO$G;P$ z1VlKlPdCeD5&ooJt4ho9s&%{0UIAR-l3SMb9}pl%*R^xku+>pfhNN&J&`+j6@K2C% zLH2f!_Uo?U@OlmTE3@rwj_NP_NqlMK7#IC<(=oe>9O&hSXLfY#cD*5ZRHSBNYQ=Y;2u{0kAmKJfo0COZ|OeZPMEn6m8a zpcUuI@{lj)WLDL(O9E*%4qC0Z;Hv!fl#A zIJO9afQC%W`sdmXH|hIm8#!ekUFPmXr0k>JA7Y z{6=wE1Dl6BP-04}G4Ap@nvu_T!;aYt52DS)hnFw^!7bQDhu0;H$6=((52yG3%{ z>sYkeu{(E;etZvu4`s6``Ug-KMbc>iICU_Md-}jiZW@Urko7W3wbB}+5!To{`_Gma z1Ic_6nMdO0&WS!5b%xxl!)lNa+0g0*P2j(zc`nrtbYrA$Z&A3p^Vp}~rKkhXM1IeP z3=@gYw-y}-NzC&^J7O{!@kY2TB43IkoNhi2Wd;Mzqgl9H_*{}&mLj4l#f>pY!}Iva zhC8E~VXr*DPU;KiZ^(WAUm3O3ZD9zL*mPB}o7gvUUmqsP-c7jKfKypKd(;>*&<)-Z z8!J@8#mj;@!kIe-N;61>u>8RP;<&`xPa74jn{%JhE1a=Yc_uv13ERO;k@WN^W3g`D^V3!xF8~LiUCG1mmBG|YoEmPrkj#=)*hi#c%YsE@!`{^q8 ztU>Tfu5+sD2tP}vMW+PQAZKTd>#aQoW3wkQB*-E9v38-L|GfD#>gmWG)5d~{TxO#G z?iKyD)CU+P|5&U1(@P~;YY|?M#JKiB7!Lm!x<(J5cL+Faw{^KwQm`PfxB@-o`9wCh_k)eKJ?74yolU_uY)3IgsdB;JvvE=!w z(;z*{MpPu>6PcqCz);B>z3)AAXh+anpMfxgL{DDnJs}#K55fROf zYBX&2Zv!q$nKmR=8r}I(U$+`nWc9GgQ>F+QbmbX?5F^Ow$1}LL5#R{PzrJ<*_Ojb< zqB9$SYmm8b9ZFx8$I)J~j!kHqj4Oq0Y90l@kgz|(D@Mdlojh3`FuF;t$GW;!&|Te> z$SRcu5)$cB>F@8a79kn%Jn+SD9jO&?J&KM?oP zmED|#vPB=C3~CS)Wu^snAixthXwEqIeRG}^*qwR$zqJ6&@w>_;#dh1#hO9YboSaeA zSjiV~Rm8K;F6?Tb9UARydYefPNJ$97o=utgJGA=3yPEk*KN9GEB&7KJaz?D$+M50O z15(N1Qg{lDiA6uBdPFGqBz9*N@tPd!na`W;YDiJ;r1!`$FpM{J9 zNT2UUu-{gDJ=;aS_v~h?gzU0>-xfaSpXF7~52d}3gzj5356)C%uSPFr6EvtBKgd}_ zJ8F|{-x<^o*_8UMn_2z1q)(iHWB|d1r`IKYj)M~5TS7ea|06LN#N1q80dptxm`Ps- z#6Re&aMvuQzn#0aAT5IlD&8W za}mCX6(2wmQYKzQjZG!NQ&#?~5B!x_XYg9SI0h?Qwt0BHrxF5yLCm6RO_^;(QxL-O z%>{RH-?N!b>)?OZ1w3;JOjN09Ft%V~+@3&`9sUBYP8f$yPda?vcI_DHlRO_6 z6g1`*b}9sz6zFBJ$RLW3LSO&R7D`FUUCh!DBRL4>x0xktFdKfzSVuyH2(`&!>&#d% zic-ztp1i$BuU@J3qvDMUV&>IID7B<=uG#&dq<8T%3O7_|`9LuO!W~z+&fcI}*0S%P zc3NklWvCV0Un9=x4q0Iewq0vNqwMM9V>>1@VYBhp=kOk+)0?YzvY4&Kev_PU5~^ij zWc0E4G<%l0cJJEN59m74qJN~Jo{B$kg$*K=$a`Rk3z7e;Y~zIE64FT`bX{GR19{$o zTXa_=Hhm)rSQ(#t7ygWRxr+-}kYvMmln-W(E<=azuD7+wi!7PNJ@W55@4CGVOV`2x z^a??gYtzU;kj3ONg^$j!>`N~{DuB>EKcYRNHAm26JXz4Ic*gbOR-1az?c9AKYcH6F z70XUje}#FgQzK9(1f^og8@w@qt0NzV#70HFlDKPVGBItcYob0fc_3?>sdggVvVH?H zu@KqA9+7c^)LKWzReKFD9=Ha3r_Mrt65*URYk?MW+4!qm${>KjGlzcuP5*+dRn8-e0VC!Vnrg+=iCeavYc4t49(XY4>M zBmLgvCewljb^3saMY2V<>JiaRztU}>-p&mhS}Z1Liy^Hv%YHy(txhNkxA*ZV4z)=; zhpW!d23KYjqq14^aALv9OkLJh&D46>g~0D_DS-_{5|+_SC~oYM6jdEgU_UO1F)MHK zY)RtC{i&m+MM-xPEVyblt*Wd$sJe#tNc=^i65b_?TU1u^a-cWfuxwF7{TflZ&dmuz?SjmXA316zK_0+f12SVw1>v6^;`t`#ePzWGBm^(gs9TpH#ZX+?`*sDdz!Z`2 zrQyNqJLyUHk89ReEm-i$a^B(ym#`vt8{-iv)`~5+t&Hc~4OM7v?5=rWd^?MthC5w) z7>)?DS0r8?GOqAK$+x`4(_Y=G8&>u0+xMHpVuqc#_H<3dv88{L`L%m)FHOyh;Rq?p zVKG}e$!VljxPFFc4lPiS%Mv>TeAF)HW*7B zLbVWeG{i)=)kaN%nrV7qo|3(wj(wK2s#wre%gACH0;L=X1T{ClRS_SrJqR4{DLgCQ zuEp(;WKTRc?TRDoVHe9l{z~6>d()d!7~>F#H7~me1A=A$N(|~;9S*3)GeLHdm|2)8 zUsdg^zq2X7M3D;W7fFtH3S={HB-5;e#$WgcjTZMvaIBezB5RW0%pX_6(u#b+5n1r{&sZ#^fHfdgN!U@qab_#|Qp0UXHDTTBg?g5lpl zW5(B?pVimGBA;hD=QwX46Gkxg!Q&2<+9ZR=1?Lz6Q}(eG%T&zX<#g}Ns1k}=u%-Zq?LXco%h0ZZbj5`$R;;LAj8+XbZg?_F{yOrbjLx0%%4>p=FLVEL z#&4WGDZ{E5*?{pv`PSask)^@ns=&RJ@5?lSZyp0P8TxwA$!*P`J{htgHJV>3^U)_- z#>m}I=vJj~`}mF3bhu5;?&as_OLDTLf92fa+Nu8UexJ;_G$FtDGzy~Uzu8bbB@e0; zpS*e~TJ8~StZjAkt7`C>J3RhZtBFPH&4s9Ynh9AF$*_kFV@FVV*_s0m{HUghLNqOtxQ( zC`rW?5A|r~{O#RC9a@pkM;S*BA3Nsz?h`q!+&I=p(XOLIEqry6#cr8J-VNtgJhM5s zcl-7*NObK55$cvIedpmq2Nrv$sjYEA?<l)Ll7+KIlS=-w9Cf!dE&35NAx}UmU zb)f-9tdi38?fv7OId9oPPk0kX0yrAi0+#Z{3n1Kbtr#a@@~%&l6dhXMtR>#qfiQ%a z4+GkP2i~W97BLgVowqSP#Zu{?&b8t9+{}5p6U*m`C2L#BYwp};NPlOz=eY2WuwRnw zdm7RGjfzW3{EUb$jv~f?AF=~gKksJYMcXM{X1FtE?7;=-MU7UNM>1bp5nioy48Vcy zST79g3Ed{)(5~BY7$$nXc88Suc&m~Br`~83*Bc3J8vz7Vyv)pPO#P&~TLkZveGJT_ z`Dwk73IU_c3LtMHly>XdP5t$!O3oNLC1hu5jd)=9CJUkX>pO>943~W^<%nQOTd0!Rc+%4tXs{SW$KyF)QWDu&c?zo)H?-7K4O7A9}60*f%bgw~%kx zezle&61i{n!1nFhafQcxHO#DNsQ-2;K3;@tBaY#yMQ)%@JCWH7q?4h;FeHgzhA5mS zK_DlQWwPli^4~D-?EwS|b&LZuleDvu;VcT!A5PxatN$gp3CV^^kdkTZ$PACpXNV-j zcJ0@1g12oHuV2tozVmd*`HOpIC$;oIt_5X%nF5~>8iQ~IQJL=|4pSKA^XfogNSVZq zW)iRypHJAjSVPjfYqD&XFGiTgxEv92Qp{CP5{@gxHP0*YE^M#lJ}D+_L=rzS8E3nr zx;;PG6!1!-5yNPhHqsSk-lb!conYs_Ve)E0LXxG@1u}-^9lL}kqB6n4OB9{5LbUCA zNoz!slR4R@c9mBpFRR4)xlhi>%7R;H-qTZVgq}SsS1(z(P~^zUFM#kQpHj3J4rxHQ zIPc&5UeHTxx<$QC7&N53&NGhCNuj=-Y%M`JJhn^637lj%dCo(crW#1Ln!emEV)rxo zhCL#w@?riu%x$6?=-D&)i;hM)VCe6_!;=6)p&Z1ccGN48Jr+q<4g^uP7U~gGBNxFR ze9y;TwpOAW|BimUfJ<`rH}7#NvB?n&UG%*$NE!*vzy1v05>E1{F&j}GtNw*V4_V1S zwzGt-B&Z|S{sRKX0P;yy1|j+2=$o27y13?C{_6`MYBjS^Rhj6wD@$vFMNk_zTj?npc#`)f z*YPl|`BmelDEDqW{~R(5PH#diS$FnM))L}RadM~_y5>hqUb==mYf6Tug{&sVhNAfq{G*l&@7-|a0pNBZk{_L>TUo|L*lEAE{74q25cXG(3jpjaf?8=CHcpjUG?Eu*43=lQk96+zOVmU@>^q|(D(2xwQbpQRJ zA+QvS?b;~;{702N#mx_bs5%b#Q>1d;=b>05g)`sz38>+pi{WiUL{nXBmpJLQz0ue` z(R?CruDd0%bavA2Wiep&Cu2Y1053yx%0?i})<&=l@07cEN0xkel9;m$Fh{~;tUqur z;l3NGD_?Z~&-mF6N*JLBB{ltV3lw0jQu7H7v|67E0wAI9Ig+-{Wu-dT`@lxEg97@b zSEI1ZB^ZmnlwN1YMc+Xz;rTrf`-H^cHj-u7Tv56aur^|rC_4|ROvX)yK_z$(u;<&Y z`C1FK52GiJ5NLZfQL{7;eO-0B?)hig`_Zqu(T{>|?b*g31i`3YnBQr|mM5sGvL8H1 zC^kc1_g^E<4aKi1I8)5ZrY%}rW|%y?%+16>eB(j>fW9=^YVCm78iAq&v)pZp<%ftq zgab9%#QlZ-zOV9G)_)@=a)0}d+gj$?`|F5v}y%W?1APm_37{FxG`=oz|U<(C zy8BIh3KW~l>3fk=B1DumfQp&ul#tkZRvx(dBWf=||4f8W(H^)dy$<~=5dTq(+K_~211ezR)!LE9%EH)*K)8c@@b#&uZKhh*0%BJ~@ zo--)^RFc{L{sup{?A_afnWdg}(Qn=vd63EWtFBJMeT*d3g-_E(H;SAYOOz51dvG=+ECr|jk|L~FV!pEu@^laFIebS(NQME5 z`>Gp6#);Q=s4o^F{&+Pd#?&ybJo6(l5fRz2bXS&~{pX)t1aMB0XMkQVH|M9#4pIWH z99T+~1NbodggnCU*Sfm<7grvvySG$F0KU~fcEH;^_?c!i|fC`gU)DOX&EDD#NulB_$(sq=+~*Ts{P7U z89zkAxgvq4*U&sTV6?OI_Sw%+W{CZ0pehv zx|87hray0FPqRwN>PtX@x-|XSvuD-S)yWNWAg7@nBg1*p1mx#k86hU08JE^>LTvG4 z2hJ`%+MRE&na9N|6?YFH*YwEGx}W`fHdgW3zGDXpvS7Vagkb`L-JQRVa0leT?q z=~v2(jTk+%Cv%5* zFmgz$Z^a$e_BDZ~WmQ#UW3eJfaqZ7=!C&f5c(v)@oFB#08(fF7XD9BmtbO_&ZXn}4 zAM<7nWl5c&BfXo$NUM?Nqa`{P$f*)T74Cu!Q0JFQQ79m`F|;JZhei{_56B4oiAG`)6r0Mh!+bpO7nP z`UsvG<1)~vNK2Y9jm-cxy*(~oU$o1!cR>lAnB=KN{(AGr*eOmu&Q@Jgb5j3_Wk=a}2pH*M}6lHSsCXjk`Vru3Eo{MWbQRlZLBotL)kSLQx zV07eSl6{m5s3^IMgk(gG@#Rg)Ey|6Vn`rc55 z%h5aAvR}U!EA|2Q(~ZkR_3=B6JrO-k`jDOzJ4U_gvFEhzv)(@KLHz?uxv?bQ+;|D5 zpZLXX+sQ&xSrh*viy?m~f_QZ|B8fME?0)=$a0N)n_SAhOL?}9>d3yf{rc37QQPg3? zx;_sx4jg#CPWiTopw$m69eCgH6BA#Fvwxt@ta(TwdViP8fS!x&phn2+6tNQCedbI; z*%5lv$Vj(w7v}wMEkLAyyNpw(3b`4&w~+W-a#(fC!~#n1`=gE7u~D&&8CYf9G_;3= z+C6Bgep$;g8v%OLR&j@pV56YMp~I^-E^c+@c=x}f_Pw*oHDAQizM`UlV4;5DUK7R- z9xy;KKh>Eqx+?@79u{anaUj)?9_E)scc)cT=oba$o#eZHW0n{nvc?A}5RV5%%?_V3BLlhUT2+y0`C+K%3^=0-EG z#lWzae=iXO3s6<&*Y*|&mrAmHX_n6K&NCpNLlZM z8Q_s+T^Cp=@^%4pOu(176NYu|^11=5h^AfiQf)u-KEgRnE*{-1cD>)7$g8lsoX1dJ zR+haon)V1z_SIs=&XLMV90?H;iv>N^fY+S_VwVv+kwF51nAd*)@xz-JNJC#`MMSf+ zESHe`cPR941$Af0E^vGVKRdGi!&fP3p~AkFNW%)5?lq!N7*7_z+&#{k)U7+JqeLK?j}t$<7Wv&KLsaNirgd?9eA?1_)tLiMF; zsTJ~`dy=V@8H*zwJ>?N)ObYn`Q~k zFr+_Gh*S)LN}mb%&M#2YX&#y*d2=gg^&zo7 ziZnciFTM@khM&3U-2~tx9C21Y=TtmVhL?oRAn@p{Mte&Qu8+H=_$a-2BjTqZS(6nW zgZ#MIrdl%6itpKrf|KuE7_1{odbN2ep! zQ|sW^LbaiXV(&&Ac!k`O<+g{y#z!x0sqXMI!`WsTk|ATWU(@Jq7+I3KkaO(-qVKlz zB-gsrF3U|N2n3Fa&iqAZc{gEbawa3d0!49H5BI785}Z$Srn?tSd<=-HqQf?J&mjI- zI0$%&R>(_c{JmZ%G}gnz7@3clSFi+L$U$mVct#h99(@9$?fRgcaND801qGWUj|@AZv~ zE+{pS_?plEtRYbRmmmMuub>BAd(LwX9>)#e7>{%5FTAP#Rf#wN1kVYprHB$lqG~%l_0ihy(GR7j}s*W0X<7>OF4T z%|VagN)QN~_q4gwbJz~0F}Yo6$nqXOOq{ZVw@TDuxiyl3pAxJVkA}tm$}Sftv()lSVGN9jKdx^t2S)sU|YRp@!}9({(fG;`N zv_C)f(*I=m=QS_p*YRe|hv+cOA||~#J#psY>HjRrfk|6AiI1#=&cpnL_nn<*{1_tm zyMJWO%ZX8YC2J2q%x5{N5#!W8@N!0&w|!AU85-G2x*v_7s>HlqW)Oe4=5IVwAhyE} zfwSB)M(5X_;8@mfwkvsr1C1{0#N?c5>hn?|&H#-~5zrZFF&quR-x5^CvzDdJ!NE9u zM1jLA|2y{va8qczSKfnyJ#)~TW{_Lmq!LsVd~wT-rEE-L`IXDGY1f-(QwR)R`Vl7Q zk#ftHK7hgd?!J!5VZ9r~2re5qlHK_8n6~b$W&A-#lJgBK-)v9^$JrV~NdQ6jQum&> zLlg*)M$DV{cfNTTS}n0T`#i=wsR2vV~$+#tI=F|JP!>W+>C)q|SVxlZ5aGITCzx>VZ z4HH_JZSrJvt3|SH@XuA?^w;}wf@okEe|D61744u~n_m1hD9YO&SBT=nKc;Go4vClC zAUy_a8oSGn#?iRSe2jj(?P)Z5x>;O33sGYuPE!m9i)NMd?(5(#14Zo`zXExH5L9OA z-~wFn{z|)G(C~$3mvrB5rncT#Sb`3P(n?nH$_^<-WgrmEfl&-))aA*_)SKTqdNUCY z0nY2{v})N>He}n*z^e(4YNTy}b`=<}9ayBF3Q=p$q7oV^y8;V%`{s1!HojZ?AM1rn zOFQ#KpRY!H)lcBUyZpMmQ4zyo!?w-N|4A^8x=q146~D;iyfNJVA*#WEOu$MG8*xKb zB6G?gj^964DK&WLyi5JEXzQ+IGvYpQG7h}OH6)_X2I;Gs|Dqr0@U`coAAHxM&S_E$ z+D4G4`D4kR1DDJ(X@FtDdf!EKce&shhL+V48IDw!a5}Due^0TSNn>V<_x-XBEo{a) z$3Wb>dsu$HS*e801>UV#mtsj;PEjqJ2rE#nU*FS6MMVIGCgZ4QGoon)n6PBSlExBp z%-5q+cv!(&mK=PbjOy6AtfuHnT%7NXopvsRm_>sCCygU9_J8ecw@BmW>D&JM z=QlcQ8|pVPa5O1IczI0GDy++(R*icayHDD(a%1_0;Im?Set7mF7WkO=+A}*qz?hQw zWj@loWnB6OjBPZai(B5OBOB=KXwixLLklu#ruu_t&t{}7>4z^7a>PFEc~n$X;Xw3 z)^h=I=~D|25@6mZa{g6LuD3w69Vtvc2OF3DM>HBz$Jf_+OJy;V-mds~*^bp@c!tf` z^sOwelBv2xvruvViVZgr+G>vcheXW9sIZJI8TpWrLBJ0^VS9@U$9N_O6>BJ z6prnJJ)S|7wr9vnn^EIjT$JR5NtB)*CS^ai9xDOo^2KLtU4(sVz~I4g>ZoZX<$2z1 zM(T#Ts&x;iWl_uo0h|_TzBM2kw(-o8H;QgbO!(rtIfVxzM6`#okzvYAmh;C=y%;zU z(yjYc-C=njb+0eclYZ)i;=kFc$|@FW{}ff}uK zb5rm&Jy?>-ZUI@E%_C9W#El_wk!zNcMhM%Wb}&D))vdcd{EyTfv|wPd9K=Ux!Q1bz z6_0_(T`v>Y^L%X`|NqdbmA!vIX0HWxU(wO~z%5`8-NBHY63rUCk%`KswSn#&CDmU1 zoDrC;<3ea2z!ri_z5s3&h*Au>)x@C)e$U>Wi7w(b0fxvZFNfwld)A4(<9H<$wdD-H z{+;d-lTYb!$c)gQU-2Qz{IuSx{6{V|KPBLRjyu0_Hyn}(LmUbz_;OeLxu2>)*oLio zQukZUGP7|(bDz_m@vVzPEq)nQ4{y?^FG%+zNh?Dx6yk71X?Kb_amzuRtn~99v<6g+ zj{}Xmb*tp?I^FH_KWockQ^}eM0ddP&!PS86>c-%$yqgSyqYFr>;#!F$$|yasfsx;G zJJrz(;1|V#o(}DJ1P0EUf#@qca{PX=6God^N3qR=!QA{VALd<5)$P<~qB>|2IfLIW@P2gfRAu?YhKfJ`tfvlAqyu2!;wKCX)%Yn~f>KzR7=LG()5O zWn9X;UZeN&$rL%?u`uT1gL?CeEn>N6V;Mql@Yv#p;tDXV(ZWwZ;=&UIUvw}IqbwNW zMZz6+k`!bt6F0XA8~__?s7J?d%|IB!+SXp99UYHYf597s7Ub|`PHTfitcl;rW+BA} zZHMbANO@(@wC%@6Zfe(})HNorb z4Bf5CH3i@4!PNQ5v(!hWZEESksE;Jn@5nU*C*UnCh5)R;JrSDyL$n#jl^{20mQ#++ z#{P#((eI&6U@!Od^_%9=&s=7dR|{<@J^qHYNh;Cyzc`5!VmGK7WrI*ZvkG~)B0D~% z4MTk(tje_r{yNF4+vrjTm|saECD^jX%Uc|DvXRV}FGRGO^=om?2%o4|SyraRHxpB< z)%8gJ`2t=hR)4jpUo$%M;nB&S+(O2w;DDh#y~{n}PW17Qb?qn6gLGW`kd5bNXRj}@ z9-yV%BXs_u3oTJ#L^hwnW1`|0QjAtOGljaJTh?pd61Ff%+F8{_KM!#Br?6DTY7mvT-Ztr9%Am{i>0{b|<=f-0-Pi7g8ai;` zfLb72H2a|s(evK=duo57 zd{c`dJF6%2&PX!y%{zDY5ADT%VW2lf_BoQite>NoJp-$2=!8%WA5AX5nfnZ#o_2ON z1G#tUy%b^@6jgqpEV`3a@tkoWU9xJS3hx%;@LaPIkR)Jv<&z_|eJWnbqu~{ly=*_4 zndLe?NCTj8tn;=^{!TDAVC7drXh4d!I%1VqQ;Xp9{Oi>E+|nV zE`wXwb_i-{>nGc7RDN;kNE)cwJYr`AW>Ex^O$hd)ec+#S=fu)mGdkKsQ8Me}^DDx+ zUfBTcj52IoCv!|H>^gA^Rs*8?L#*x&K?8<+5fhsun|GeMVV>q9csUnys4`x?1M!Zp zW9+u}{G6ilnx$i%Xns?M7!VKvg>#wey|Vv?BzvW7dc>5oXA?_)fIgx?J-8X_Ru&W) zF0?7WbunlOi9mtQoG=Mam^Fd1coYauYCGvQxl5r)uHLd`NSASST6g|*ZV&$?*7${j zKfSOK;WX1)>U5dbnDyQ}m^9MhW}BX#!Ng+p_XiU_?qrlr!(qO(hBb10ode@S~`NJ zThf2k+(2Fie)pLDui!0op3A-q=)r6L(okZqWm5H_!Sh5X;4?ckLoNIwP>awd*YDv; zm0&ronF|(drY>v8E_-`dU6zoNnX~Bzy@?niEHKH@B7XKDowTv5!@qNFk<+o&P?0+?=M0y}eY>qZfk>$p%=6vv9N1(e3&*MTW4y7U;P(&?S@&fCBL zBq>4*m2Fho5Gs4tc1xCG$lgZT%TA$^v{_Qql(JP zdmP{6ci-0^*EQG7sL$v9el6$u+|JRQ9WajZhfgn=Q4dHX#stiOAk24%V}AL!4O~Tw zk95xI(9CBm+{aS^*phUxv5bzw59_<-QHl`p$X^o0K9^9(U~&uqny!k*)j^MD*LSkA!3J zbidS-0E2aW%uiAQWqOaL@J*S^> zC{s?dl!fwrvfa6AB9o0@2Crc}gW=;af6-dUSKf1E3jvMw*w0 z>FEWA-{S@|+G%}u;p1TTgGC{yPBuO65oaBRNr4YJ!sW*O{*C7<7$>#g=)3Qw6sH`_ zJ1zmotg|~zcaOd|8LAPd%VObIMtzY^1E9rN4;O4O)&0lu+#RFBIa|EjwG5U4+Z?PJ zbl#6AbU;x6qVleveVd4sE-YdQz8#m^jUDUp?l!3G<2Isx1HF|{aJLU!&v-Vx6rYL! zk^_YlQ-dBu<;`8cK5AjNXF#@EM>w5T8~##AHmF)pFvgxgHg9guyo0H`B=d)NP=yh& z|98n4q+j-kI!+ysu71FtYY@ZRm@$K@rv%;rQ4o5^`z!bweR}jHf9CK~+Vl?cR;sNB zFoW_71M~bbpit#I0+f1*;juf5heW8HCJ^z;yh9T?d*_-c9u9NX7uXE-Q$pu9a35^! z^1zwY?{sekwSSFG3>D=@L9?D8K7W>Zc5F47s!;!(xx~mXB`l=rd?b4G0Ar(1!$bp? zX5Y@hd_Pj{8)r)|qKq6m6mvMx<1Rl(H7TB%F=U6xaC!5Qh~Pak0-JcP8oc6N2+1ez ztbVWIa50cQ%@toGq?Gxd3D-b=@r2*_UV~2CLyT1l@;zCA8g3Cw!%VNWXtMsNf9l{<+8b3S3%Gi0B#+=2J&2f6< zQ0zT=G?Km8;@}nL3vfzMER9~6Oc2i@i`i7k%z0`%fldCKbSvs&ULo{a&_(e*4L0W6ewhcE&F>q3WlkZRU8M9$op`6lB1-ol(D5G!J%<{8n7@?5Gw$G+C z&in%SsG_Q>zktfCHR$G(JKSo6R*W!T%1^6!acS`2=TzxiSSt6{wCZftmfc>q0Q3A_ z3Xa#?xu-^%AE5)2v*9x$^?fMYRBaJ16QGdrIQQDM?7Tb?>w9(P#VT{~y?70mNqlFt zYAG!#{gJ)8{tiWrPBN{hM~|B&3~HIryo%Ik-&k!T+S_}IqZ>hrSb+;pr>0udqgQOLPP`m zzbP$MLhN&CG78Ka}$7|GDv`<>Gn*n|Hf?O*tf#%pmyAZQmSPkfwQ*|tTTW*|h zl*%$Yd&fkqjh*flt+$gBsmVfWaE^Xe^yy=fx?lC5fRK>NX#2r>lv1-U2aLM176fw0 zx*xH7jv)1;<2D=!IkI|j>1XK1t}y4{XPcc+U;PSg^s%HCxpiDsQ5V@M_LK$|lR&{f zy|^?cy@vG-?X~4|9Un}9C#{QGCgX77s?k4tK3jyC3*cV$Ota~|>aS7MR+e3BWBd?q zRIvVvvuK&ZsDjngryw}Pfit3(i-`~}qm>$ZXQ^%fx9i}4Nf59ike}F>Y_~t>AsmuT zaI=Iq)`8ZdfL`x6z>uRB93WO-{mOE!nOfntfWuN~$j(3RADM`(Ip^dnNv38xxaY6` zyi`}0CgOLY%dF_j(m!XM+E6)#x<)x@Y+`?XW^*)|8FuRFi8c3`^S-a6u5OCmXGUlv zwG$NkM<{-!42%gzuOa95!=wkGmsyd;1?>#8aWj?7R3Z{na(0l{$DKOi8q`G)7nXD6 zU52|#n^C-npX4}G=q$R6W0@g@p8{0^gcJ3xrp|cHR`$*Q^fwl(@Q$=zn<#Y zy2atzdma}hNyj59fBP?cC-@(3D!-($P*Mz(Fx(lHv(xouyxc9K1r(3zD~?wg2_6*5 zuH_t6*_~6ZqZ!DnE@NFL_0-YTmE7NxuVm^jcmBQeU<<7I=mAs5Vk)Bo za}t4e3P@`3S5_N*dcw|3Xhr9Z66Q!PDbkr~61YG&R5fTQquZhCl}Il5_WEyJozp*s zX^)*`;~{yValTa7kr)oum4JxaiKyLE&TmG<>Ur{ylCERNYROyUnjSe)lZ320gT*z5 zCMg&T#))JoaCr$;q;I=X^r@u?4um{w%svo-BJ3MumdlIfTddKh#h%H zonLw|r*auzNYaL;WK@MV2sD5)jWreO;=r9^s$OV%4h zL63y%^r^FZe3GfMCgYO!u8TOm9#nbA5*~vnmiBxFOe0(|+qn;oFHCpIv)fejAt52= z(SQH*&sA}{4hJ5neL+6S_FcQS&I@(`=nOV#JW2cnwGTEaJEd!vUrIh=omsAJVvpvr|%7Xu3b6lI}trO#JG^#Ki2U zPZJM*;n8K0qcrV0`%Oyibnc5e5*MEN6Oi{@cUUKFYO@_a#*5lGY&yX96w9w$;6L>E zLoN4(rD)8t^*;x5f(b{i4@uUioz>AQTn z*LVpXOAJsN*c(kEFR6(K#~w@8)Y*c}8bsRb<^q!o6JOxR46qsLtD{|_P>`0;sVOZG zEgz@WpwUO5JppJ%wsl=DRSVU2?+x9zq)YT5pCg1Ax?z9P6bj80 zHJpM&qNmqobItKyZ2j*M{Y@fz93zerTeNO1j;U8aHCMl zy7H?rk|Nsp7dgi~5xq44gPZT)pOmX@5qcCgj-3-xZW6`1Xg%L;eSK}_6_InIrAe8@ z!R490x+++80gnMXG(j(k+Hk|AYv1}5LeKcggtnj99XgbjmYNPCRx}$aR=1CBYwh|v zV+PfR=Q}kh_3W&q71V$5!>UR9_wUyrx4F3Ynsl`_8{u6mM0x;A53_21qf=VvSFa^)>{NI7Yqmz>I=Y~^WQ>Qqu zdh_yS5Vcs$rKt#GQJ=r$GF5v@*d!_WO8by-{tM!k1Bsf`*fles2C4{ms(PHC zCL<^221)(R#yfmEkr#j&E|ThLp8(|dk7&_eie~G#V6e8cI**?`Aw#PJT8?ycn?SSW z-Z1qad_LoFr35ilUS520)p#4-8R#^Cs|r$8cfqmuN{|s1`!@V)%r`?~=T}?lZW+vF ze-%KdZa0joFJO+$YGt313Ktk~oe74KFdDbXx1dP^ID@Rb*wtnXjjZl-wSI8nItg<{2sd{|X}#UX?FF-22(uW1Cql+!86JX} z1;H^G{GO-BZG7(#xA7KmotW#i25-FEkb#C&;U$&cBNA4`5t%p8f9={*>g9mjRJ>ia zdRiT6)}!~%K;E_A+q*`)nZv3HRS>TZ<=%yYac)07Bt|ro&-NPhj}J3E1mw-H^e|yI z=yN3WZJs^J2g1H~Y#$r`{Gz)^9;RPnRvj5KP30Vop!vW^S)hT=3uj5NS}JmV zQi1AzW9ly|KDEm)X!^t#`J#-71i1u&4g*#q(7`bL=Gk4k2p}O#eFZ~e=lZ(Xv*{FhmqWTZ)PHLZ;=0TID>;h?QS znDgJu014`Hz`??DBIWlJt8uxQm$>$OlCC7HCKWwCFHc76r* zfFHi6T<*^ArM#gJ>5VW<)Vq=~A3Cn@SJnC|2z^grX2eYN2uR@z*OT)EH)H*L8K-*k z{{3stBGo_geK&<3g@ovuG(AMF$JGUO7~UCJ&jNM9j>RbWgD?u;`9guBS)EKWc(~|| zYF2U}sJp#5=hu{Ty?hzr4OzJRDr}kP8ORe({d*#4D+PA=Kg-}$n9|cE!8tm=MaQ;% zBCkFoJ=d4BRMFkpXx^{iuC@)wwZ>Fq1=f<2E8$55Ej343pZgLYzf4yib{nTCHT;_^ z;TTb9jaIMZSbFVnO(1t_EsR4YhB~SCPB~pue|IkO=3$Wu5Xn(dIvjWaP2t8wN#Q99 zEGU@J;Sj_vZ@ekwRReclpG)t%}eU9-gq0aPuV2pA&#A_4KEBHc= z_V2IiW|r}YSgnDUjs#T9+=tGj?`6O^sh?8Dp=t8+MV%5wWRlfI{x&ej$@)W>FMwTw zE$E%%TA)G1wyP})%#&2KD!dOnVEvmjk&BK1W?m!rP!SWKGJi+D&B?4=FR)B>Dj9C2 z&;xB7youetc5U;We#lQ*asjQe1_pAaCTyX6*~hVns!i!{ko3*ui}b1FI-PN4{jQ*~ zDi538JcYD1d_@9(j4!xv9p_TUlP5dB^<^IlLFo;S(gMT;r)kXTc6-Mf%Ip=4JH{7> z!*UQ?)u);q7JH=kQ%9yMPKx@;s0R7f8$*1~sOwl68Y(fO++zZlZ#J@2!7OgLfWWIW z33LMm?ne9_{R4099@>!t$@m;9Z5yuhI}AGM$ieZ!Ox*JlpEO?{glq$?Bx+YDkJwvO zO(N(<9g{HQ;GdiR%eYm-uPRe$ zi!)>Pc(vo&nG&jix-uAx=DKIBw!I|L=XAps#YS`T5H_9dWEoa^yJd+bMmq^)Hv5Y` zEH0<-@jG@X5-WYu1vZBVHFsGtfR->LloC%i0t;HLcxTsb6A@*J-MDzw=v{>x&ICc& zn6nX>@Z!g7QSCQMWIWl-EHo$c$cPB^KXxV0DQ^V%>H8bmcCC0FHHln-4mLK5Jxn~_ zC%Qe^wA@yqvxR|U{da%>>6G;h2M_MZuJigL_5*ON>T)_gxoG!-h!!q<{r>&Uh*%~y z5}b7_=pAaj)eq_OWE$wwc!mkGNsPZ?d$a}SuWJvElZxKkG7t_q+Vzb&{GwR?AS0C8 zu?Wepc>P>LSir}7jHA_<^YsAIefkEBimy7u#APFv$qR{#jg{EG{P#fKOh>vC^1Jy; z#UWf#%xh=`Z0HpNhBm&N%(W7~1r}p0o}`4n$GKYN>EYx8e9pzo{4#_UP=% zlQM{A)E}V55Es6$Ke7J>p_UYVw;R|#M7s8jEID^pZOu_0oo2^OF|bm%8tW#fy5tmr zeRpME^OiVKiZhlCj>KoYh)>IYV3!sO#$OU#Eyj2l*oiJ=hJ~vA$Z;r_dRG0k`DUW2 z@^W@gaZC0ZExBAU0p4C-B>=Kc9s*yo0prREPfOvt$o-R}en01HtPYXRiT5OPY#%{# zlq>ffP|wl4wn-E*1N~0#4ZNR8m$oq^i zzo*raYSM)B+bu1dg-9+>exU-{&w@QFP{oyPK6!rJKBDp$w7P5(JhVknLatyvRwzS+1+FN4W8-|rjcuz zoB#1=_RkB&z3$v8Z&Mqvq4L7<^T+>eKh%Fnm+#Alm#T4OD`Xi5GMe;!0+iD7Pv!GfI*% zfzH#W-3XDQg(tQVmhE6$(28KTbkhq3?y9OmrWz4<#USa6SX}Wk6>J$tgp!fRuTury zsJ}Cyt!@wK)vGmHce6fFnHmDyab4H8=eFa9N%j<#72L&*H%I7ik%nztJCuDX@o@Qw zwqzXQoDcXX2U=N;HXH?;=YD-c{x_xgoa3y zYGBO?ph$+5I6BHmNlLL{*&wJ@0DEysxQ+pfuP(igDO-eFaksfAX_;G*TO9Re3&J+C zP_+)Qm%0Gu7@gC(J-Ki9bqtZ@u=%TTVqf*JwA+vee2klERP>&MhVq!`9G@NSrLfy| zzv-uF-9x=!jG^h}W!c>FK`l1$(AfHWU`%n9;%lM5QIAFI5qZ>>rUrI#3+ICJJq9EB z_TQ&oNByabRI5xa7tToL%iW(Hu9B#B&f3O0jkKHh*~B z9)rN|pcxWJFkwEZXlm|cP6+-}Bg48STKn}9iVt>zXSTQ`C~UVAN`C$t3V_0A&zuGo z0NJ36$+)uwROR#m7Nxy#WU})}J;0JukH+0H5`uE?JPnTS=hP`-| zLBg3)hqSAu;a&W@a<|W0BIg2$Jd2hRswM_U4a2^FB^SQsf*?8WU~i2x-#oQEc+4=h zqg>~BdrNlxsXYRRfDJ@F<->^+V8w0zg6RXhbMo@RcPrbYy$=#I;AMm*&K}yeoX)JL zAp9rJtA11zcBPBid^eItxfHbFzOKydM&}>Cn(k9#E=M;9f0@#2wnWIYbp(7j?q7JU zw(B=Z9R_1>{0ozYg0)wr;(a!~2ZmV!D+Yq`%`XXgF@I z`WK)VIh?c#Ck2|PQ0ZRHK#8PUdJ#@cGGQ}?g_lqg z;rcD{XKYF~(Ob2brxBEc@olT!UgrTEzt8#el_SYb~ymAC*Md>}7tnKoR^I*U0s z+O8bNxey;N(XrGrlxnS>Jc3tfzn6JlfUHMR_R83OK8_f?ZbM zq2SKqsPsuQ=+^G~GhhUF9;!Ax=NVZbL^&CUPEsJk_Q{|UcRyjh%=>Pf@f24~wY7DN=EjyoVF|H|8u^UK=_$ z-D;``3_%W^NT-gXYQAg|w8SppB9SZYUECjW^xEPlY_LVjy5cVu7tQtvM20cD>6G}y z=A+glqoGE->b;)eVTs-IS8ufGJ6cpqRQuk?&LjP$EZ)p9vt6~_vX`4q{Nh5zPUC8H z5mS*M1IOqjgW)~DeB#8iWh&yTr;i@#(0J5u1#6L=>bV>#6b`YC6xS~|$mJOX^XRRi ztDDzGv(+#bjMofOTDujF)I(>nEIS_2G#k13##TV9a7Y=(Ioj}-@fCxwTHOFZczNgjoTFxMq9#nod076LW!i1;9W zg+3aWKaFU^`0KtA;4MVXw?KPgqW6sMmOItNdLL?mO|Ku~0h8O*%}LEpTW2}DO(Hg8 z<|3!YK)zJ%78s!nR?NpAt_+d&1EfaKL`o0X@-96l>Y80a*Z-U86Bz|9P-WHLdM3`8#;uOiuB2m>m?_h-e6EQsiX=p$Wn`emRyw#fmmxm3g$CzJ3& zNRCYT5CrqodiPHT<$j zaEWHWyT;yrQ_k87_U-DmYfrW}F*0gK0c2u|7T=Iv2?DKb>;erQf)*ojA2BkiK7|IH zm&{^oL7RnRx6hGz$k1iRnR*CuqIYcM?vQMkJ8r;`^nr@|xTS5GhEp;QwE?bbqo$V@J?)S1 zT@%-9=^7b;)V+JeO?7S*6-vV&i&=y+-^1ty#c~N(TDmuhHNL*SZe`Def5cz zpq^DT)eh*K(d%Hk$!RP(DO3sq6_lkACL{@32}#J)5Uv;b*5g?n=QhZ%ROz5w)&^=a z8DgT~lC*!IKcyiDnlJ-Z2f62gZM-^33Lwv3oCZ`5$6vG^wFMaidJxxrq5$Kd(6z4GZpIn0V;FY7TtRHVCED; zuexG_TBK(0|DA^<2l~gCpeMqEH`Huvpy#kj z>YfF9Qb!dv!HN=sO)>hIOFs$~w_5;wu9;jO4h%7C4I=$ZqYJ)>7L-Mbzp=eg!nq^z zoJ(&^gz3yKVg)=t_>S%lDn`|u85eCJ z{6Uxcblgp38}a@XZdd21>+MD7uFc63?QbU^?9qmR9|!l4~Bj@A>QL2a?fjEEPTdk^|s zO7Wk4+%goFzkj&p!*R3-U_ip?Dz~;6@X^uj3+-Ck!k(r9RfE$Q;mTrt7lpj@+KLTV z8h`W8oIbZZ%>jfAQ8Sxhxu@$TKpPX5@o z#G|cZZ)YbJP3|QM$ahnDiP0E!xP~pudP8V){&&Vz))wxJNrY`JTN_wt_Fu)HJ$Lb9 zbsriR{Zx;SJWw2qtp_iB%3xkR6~Qwsti~#I#4L+AAvI^p#EBcFeGFc;a%CxH0h?!U z+D?(B{Eqh2>|=PKSd0l;cD}7a;R`5W`I_U6KKM}DDbjYhxCmp6J}ugD25bk)y}c!z zsTM4A5u+Z_6j3g(A))Zwx0NS@IBu>1=))B|PDUK5&82U$8b3ud$pkJ~%Uy8l?Ahu| zk##E!D!KY3k4IA967Ev|i#Q>3-13RPO#LM`EM_}vcG+c5*!cn&K9PSlny>|ET5E<= zzU9IPY*2CdY*zGw$i|ljFBx$iI#AD;R7SKOM zj=F=MF|Y6Wix)lVvqp{||JQ~ihYyQozl6SB=29D_w*SmcbPkpL$Ac&^*w-HI4xB$f zHF>!eD2a5sFJOV_{u~NqmKmCt$^;-9;XF zHv2MvAm+AcsjfG6Y~LP%`KHT5f&Jz2`0)xt!EAc?gT-*-YVUz^zXjZ@r>EYvntf6` z3$_Y&=vu!V%Ap)O1XUM-T`-B*nW19iDRnj94SOvC+aG6wE;tg8wc?nE-QEH<)`2>Y z(rYqmd5NU>SEn3J%8p?N9^*!cz-+FJ1tY1~MS6GltgeQshYY6`qVcUgU=;XeZ5_%k znLu^06VbQ27$wk!j9iM9Mz$oHbv7jKWBK>ddU+Y8b?wgFB1XwT?CESyd;5&6FhbrfS^Qxey71v z0^T+&M+32}X;NjG$3|foSW+4?V)Qn{I<6OupYTH~wo&QeaO=1K*I!;~evnrAz?-r^ zXRi<98C-nKASB4(ZThyM=Dw|d8=^}!eE(3l?%hY{=Sof~Kuor+oV8qGMXwpm7{VxN z#Um)-vX~PPDitm|k`AKF;)Tf6iei!BQR}zZ3x&h9UPs56ojbex?PKP_t)7EQyA&Ri zh*(5qdvQW{*bDz58+q$7iZ<&Dmo9Y%W(k2Qgbsb{OSnRy^tV}1y!xi5VR|*-E1Dfw z+i8WXJ0Z36tvm~L!rEMOp5^#>Ua#ADg*!p~i~vem5t3D;q#`ciBQ4umJ?!HBf%oeM zZ&<$@ZBCNyQ$`roUimKb%x;)PAo+U+$Haw%mRFY^^8fL+`P^KANlR4JG`Wr~R( zJL+T#1?;=tyGWa5PO*!mtBS7eoj0P7N=nQ)4KO9=*|Wzx0L;bP%z@-R^5~7 zR8MhZkSJ{9%s%O4dgGW!44Rd6<{E+({ z4#WH$g^u(NM(HdUna))r(o$A_dQ*?w13urxCnUD4IS?)O5E7m5p@81ZavS4w+P-V# zb|W$Bi>Ya462w~4K)>Mr2q{+$Szqs(9K9G2*~y$I8TOP!u91Eo$z!rC=mC{k6>^PcM4ynGMN>! zDy^ba=b%ktVZ(mqS%{D*O`KX2L+QR~k&FSW_W#I453Ub`)y1dgY01!-UoJ>#I2gyh z-~io-L@?+C>Nc))lhi>sv0LqsTwu|MJ2_9~Y=STqr=v{9cv^4gE52Hm=Vuw=fFiDa zOGp*Tb@&;vWs3~7NT`u=l7rYXfdz4QoTT#;S;Q2OO>X4km&rshc7oADYOZ*;VAC&> z6v=N3e*(E!LT=ypA`C%lU4|v~pU2dtJ1k8C07I zZVUvq#_*s3t|8ApeZo8A=tQC^l z#L`s%0<#eFM(3udVz7k33cA6w6BW+NGs7|3-kic$6)Z#M0Iix{J6r!BG5YdaDta)g zX~Pmm29t5NnNFqSS!m0isZO>vH&up6*9g0*eh1T)2eA>FGheYx5B+KnAhN#hwa$)F z{NG&Uigo1xp`wCyIEN4Si%B-}6`ydDIKpRR*{M1~a&O9%W!Z8bbtMB?|;5vm=!m^J7M4u4+ zruR&hKd+8y0~yJkk+zF|zB3ZFmwm|RA02$_)}1?lZ3J78A=oJtn95cpf>1h6>}X>7 zf<16@$F5zwG;^PW+;KiydeRu@$diAWE%I=Hg41JUogz(6%K#^m@rlbTG1+7hsHr-Q zPIVhaj!Vl`9F_Bxn=QZyq=e)(_%5*-FR>Nt_2~I9K0<9e@Gdvp{o2k$R;!bPV{kJT zrwAvGOV>D5Wt2Tu+1v=yfwCTq<6n@bj;iq19SJjLd*n@`@7Bk??PNz=2k>^JIFfGp zZYT(m2La5iu2TQhvu8KMv=8VA7$TG0FF5d3WvZP2aw-yt2W`?)Z9>;koCSn#jXEq^uh^L#hxcmqeeP;Pw@aJd1va3L%rVeR{l^qWZ?gcUZO5Zj$6us z>m3LU=*lPkqb-KHCyl5Nbdz$XLMn8ib4{KLU*1K)ZXc10(zRRvQ{kc=l#d;sQC3+{ zDr;Ww&^Cc+-ML5P0}t4%oC_zId!di&TR=G)yLGcD*+@W9)#FHXc6A*&cg(RLTB~yT zssOacJ^BEM(pu?DaV2B15zu)3f!bP3s9RP1nFdY2H1L6h6Hpn|+=HQuRnUUw2dy|T z`w;*V@K?-UhAhi!2f|acqhq#Xo%fhP4j8$6a}=k-Ucp=hk89q%Iln8_nvw49`G+;b z>ApH?^`(ZEkWv_3g-K3*rb14XZQ`G2XF{6rMo#D<2^K6c_EDqaOA)WKrg< zZbuL;B#mFdBi2$SzTw;+8nbgA?;^hjJX8jp9&W#jJ_n;;o-U|f8KSsLhj<(HM*Y}6Fjhex)10MqYu z8`ZAcym_Nijx&pIbdYY#d2VhQ#q-G2;{q$yN8v4(*6%YoG~q*HrhX*8g8AX&{H312 zMbF4xCzn@rhP5o-A_elXdRmk&g+!3>lZFp6LReTar*g4I2!b^(b&_LF9SrA79^{Zj z`Z}2gegE5=@^X@dCnj&$i)n6R7u1&LLno`p6Qe?+fWc+yr`XrJvqgo0f>~qawcK2Y z!QVYx8`)KZ+)FuHM27*3SULO(@mLaa+hShS0(Ltt}P1$Jbp&Q-%_{*Z7l!3r?E| z9W$Wv&Wn=Gytrw-38t0tRec_C(j&VZy>Oi>h6Jr598R$4xwf~iU(ck2IX`bct)+Ox zejBnn!zBg{0pY<;hkE=M$Z6K3%RpZRv;fNuSvZo}ckKum8WA$vO7f9bAz=MrgADng zU)aVIxa2v}5_OOZM11(L@Ly3H&f<8z15j1;d;yYdBGE}B+CI-VaFYOku$JYQsk%1e zt7kDUb|=96iI`|A16 zo;9mAs$8o8=hx~Bi|HDr=IHOgV)=4CAbI#0vnG*Kw|l5f07z#S$EON}0R!q}Zg*sw zLRX7i$CN7`&HnP86r9@Nx1@&3wvw1?iu7YgC(Pwsxta&ukPI? z`}4Cq{y8Mzh8J!#Tguk$5)v|wi_i?mz$GsivU3E_?C;whaTH~+SOD83qH)c9FO}Hr z+1uC`jvf!^kS&qej|~$E?Uk0=i&Rsv5AU*IHx0Wi^xWZ+>5f;*tZPI!A%&; z+K+(MLIIJ9E+!-9vs4L6THPBQ zSLD>3sG8kKEQ3n_t%*C$+2DBT?PyZij3LAPkY&p*>9K+XNH&qyBIiQQE7fRzImnbh zJ3-(yY%Xd(o4TIzC|8HaLgZI!^esr-D+mrwOOgnF!j9|t4l?j4;x%ZHQJ#$1l(EAf z9$vxIC7@>282W*1&KLn#jJL}z6<;i!nJBeqF{M-)%bSyu_pP0qC>c&RL2@q%XXzKQ zr4d_qaC|4QSsAeU(}ln^+TJFmz#-wYb3V&czTPbL(_==2U7lrr!^QwuNCC~N@e*z? zDyw}-k_02|X0gr#ljy)OLvpFK86`vk?c^&$35qxmn8>IG6uHlgdOHkl$O~w(z59!w zl)UFU2<=K8<%j^V;S<*bNPCWMB1(dfv5Hq<{3NIFSpHUu0$e($s<2oQe7emmi^JEb z{{*1NcIdJV3$=7~OrrBurv`2R^Yq!X`_bY7jn=7--Vl6u&)ymn(}w^5U;gS6co#gb zsvyKu{a{ZhMiHF08iQao3+tIWMwYwZ(GI#k(WtibW?c6jnfpPU#vNPk;b6qbU#AjA z8H5!0>PIdkxiYS#)1-p0<4+h@GT+_332G;o3bc^~fE>v&FXchF=$eflS(-rkY>pzj z!(Gta6xxb+VIl;ll>D;85yu$CrBXlS_)@en_iMz6Gf+N4@?7|>$-`#n#=#HKAK!e) zt?$5^+&G8YgSRp8bN=(^=mb7&*X?$iKN0s-xZR=mN@u=)^-9h@vvQ)i_MlMos=LfR zBmp5!S8}qTXjl{8yj2G!>M-TYn1_L6)MpTW%a+*~bc3m;+&F3-*o&g+?)?Bs&7c{s zKJzYKxFGl9FbM)dhpsCe_lXQ4)UH(&c8lK-C}57R#76&84_I!KK$s=3)s83)|a> zxJDZTnbIX;LzXWOA#vwUGHIdI8{VTFVvX$Q)$`lr*gZF#zcPY0H&+a7Z1_Gqpy9_Ks(~t2$xgVNqHuDj zS}ew^$ex*_e*Mm!YosMqoRfdC{_CUJYPVrWs5ERTkam$CXVUoNeebw>Bv`?`DNU8r zXU^bYODb>Rh#ut=wXO5uPze0WPryAVs~DGi)2vk5yi;VExL(8JTxysK0-16_J9CJ0G%Q0!8;+$4AhFMzo8scQ+)%z+^)xVUP` zFp;J(ukvQjQ$!r#&G!!)5winqt=1O^oRCIS$?%t-3qDgzF<*BH0S>SDSKCP4Jo zM)L%O<1fk{v4&8ZVAs4>%CAs~9Qp0osmO zXYsA1o!Im$2gYQbV)7Kx_Si*H34wTyRK^S2ik(`Ie{Bsly!nxK4ZRE!0C4O8fJVNO z!GHrDlde)f$bbx`K9m?Bu{AeAEUxlsD+XL)LM7dq`ZmnQB^1UrX<*#<+n2ME@sEJx z)s%_J3P3!5J7?Hlk&6|E)0>5x^7;l294LD?<2+;TJ+{8(Qm;56iyshU+tSugNkQMQlv2CE~z7yQzLMBV`(cy%8T8qh&8IRD;>)V&I(MP!Q zJjodnUZhnvel(z-*UHj>fthW)5F^bnb1S^Q9~!r-1t7<<>1VfAw{142$1GRQxP)ue zmn&CC1onEfu7wG*lFd?6`--0&i>B2kNS%K(bh|J}84P5;IbjbW%y-z!!@2KcWNhp~ z^BWKf7$+8()eQ{|Mh{Ps$RJrvyKW)y(%j<0Js`7?9uK7RMnY3q*+^&+1)(z9iG`X% z>mznddo;YUwy6lb)L!1d`kl!sflSl0e?ny;h3>gxGO7o>5K|~cF05i~-XYfFjr>(I zNrqW3RHX5*Rp(k)OqZ}2D5K!7Jed8mx3=>oau8+ZJw%@*WXRSJG;!rK`lPOrhKE<5H z({GG*Mvk>;y@>ew_cso!s;tbQDIgf8hsCyPWW`G5w853jCuU9gA1wexg~>9C1jy47 z9T(4^pT$v|&{It1R#rR5KS*|33hs-R=`{2`IJ(9$UH6q1E1 z=Tsr+SQCXN9uhtfP-mk+FCsF`$dZ{U?!o)$fTWV_xK?^C{@nUTl0K*@Lg?YX^w}`u zpx6blWh$cCHJSJTJ1DzNW5!Sh>j^{LF+eV2&NZu7yc8j$^fiuwJN&{GCE+6P8esl& zdaZLG?lE-tVP*E2bZ6h>7L>r%);~CkQGXaNs6;k7E`fcU#ov*f9^-z3T2K_#&7$vA zgA)tiGJ{@-p5fpnrtc{rvQ6rc!nF<7)#t6uoCVtJw;{RdloTUc&P|l8pD&MmVP~Un zye!PSb*onPoKrX~bB($-Ha5EA)x083INV^l-V#>^OnrDvyDGcBiQfpK`EUx~lmC*QiGw)kRP(wRKKDwT#iHQM@=A$zoWJ zjDQ3}lZn&dWI5ioav(uurMB{-S(F%~&%Jy0eEr6xFrM2c7?t>oNOX*){Hsnc0GpI) zV}H=JnkYWvpW(9|xzP6q2?5fHM##ZdMR~C_I<)9e4N^E`PdEftDSA+{d3XTpd-KLf z*1V^T3{u2!lg;p$N(J_9ks({0h{lD>HOG#&DF(`nzz}-Z@eRnEg@p!5A6+XcU^meu zZjf(*FMX8T;Kk1disq(d7Q2dhZBaqMBtkEbjqEumE0pG4LYMoGYtPIM09FqDxt$i1 zun6iObCD5I0LbbGdQCs01`KjavTsY#*(n8VsIO|?kQT39T{i6_XOU#m|9$~G&%nWh zwU)gl$C@^p+DCIOrr7Db|ELJkXne&Vtc9Omx4U*-)(Ik|m1*Lf2FmW?;B{%<@bT$@ zT{C{&d3l^{*&_ejzu&g`P*@G>j@ZZl@O^T&Pa?fS)#4)S zgX{&@8&BXHVWzHk2mVi{$=P}xB#l*~x!@N(fML#6{ty(c{_6P*4*>tK_0D|Q;F_$F zJ!4bl8bI<+g&OEC>1sA1F&@vUs9N&PFqf))apB6z`Z7WIPkH=CSQ_c z1_n<>P0jEy!4$!iP(dHqv8Hv*HIm|Dj3aHC1TxC_3-ei=!A%~B?;WsO+Px24sJe8Z z`XgKc1v%Gq56lIf<7wn@sni~)`vswO(jLK42+**65dI?E!vv*j_axtH`=ROal*;P7F_M<{)|WPg@ZN;St(_T&Q>36q&`eg*3>h@;MuVk!O9S-6mRAvzjbx^o#9O@|iii+z;l0fc3`Qh`9(=*; zJ?UVo^yi~5qJ|^mGQ)c}d3+?z#2;fMmYXEKg|ZUjd@d*O_hG3Vzaa3}mU!RT(oaMO zO!(CG@1+KH9s2sHgXK?Mqw#cY&K}-AMYYYK2^6p5^(}tEMFrfj!<4#F^u?YMRsf6N zt&5m{k#mMKbvpI&e1r@OA4&!`pp#H$-XCD2;Njoi%bEYX^FATkN$$_PI0NWeex5nK zGr=u$l8YZd4rek?|7k4}{^S~8x81dCuTR%`z`lf!H%5UnGp+Szi${p+<>`(d2Zk?mYB}jA1BTAC zUf8-HcNr%6QGtwo-AlV*c!CZ(xL$6LdyjI34sE*+N1J`ryDZ@-r98Ep0 zGb+94Obsc<6tp9wW-asc)6aFGM4+J63k6-$?9ad(!uzCAA zH=$5;npCiXL|OPsGG-}%=OlmS2i`bqx9q6vV>8V!+)s_4cNYG?VTS+y?uOQ*H{Acs zIpX)Vgw4GdAsrWxC}*mqvZPo?o zQeu3D-@$M|yQSn{^H&0ud~PE9M_`e0jg*45jSYjxXi-jZogTPHI%x)`jKe1;{{Fj! zZgj2}5lT{$Q%Nk-6#0*07GlT}UigshbW~#CqT=nem{ztGMM{VF{9(fz4lE$C==Q&k zw3esi-SirZ+=95pcQg3Fts#Sy6DZz*!DF__Z6?PJeO$M*=6(Cx!J`#Nx|$d(a(GKv zq~kvkc!3c^bStKq-x162uRODzmr@aH5iONONT5=dQuk zlu%VrM2(lZ^_9mse-Ts!33uLsDTr|93}HQ*%{Zqn9f86Ibn9%NYu?({wgW3@d03k+ z<-nY?0P??-3+#jVS&|YmoiGw`2-P%aVkV{~jg1a#*IE(!KW{(Pi~}~~rm`eeZ(Q}4 zv4aPPQy<(v!El}t+~(9uPHTB?DVPC7v@z;4Yh1>j_J2gdB=NU9n$j8Twr}tJc5Q$Co zEOOME#N=?7nw5lud%n!t6mWtMNF?qYMHd8!^tt9up5xbWOrSoIe;biZ@*A8Xk&v!7 zFcOL7l2pNlzpN5Hp+&NN-)X;eJKDwga({`WToMvOGf~Z@1QbiAGKSlbB9MwfHv_mg zB{~{*XZTaNQuJ;i+$?vN-9PLsH%oXt&7DXnZmsf|d}RBQuwk9t%lJGJm|pgn5+3Ym z%%H^hjh~pH7;uuwi4=iVi{U>@_;fYH1r(qFeOt}xE9n%YlT zy~P?G?(<|lS^_p4n{i$fqCfK&X<}nMI%eXPtJArapD#nDI7$ku)X05A2N)1Uaft`W zs0<Uob01~lM9mit-5o{UU+@c3~zzqegL z0w{r^`>D^2&3eh})duA9YD+hOdeG5$>~g<|lDDs4Zz^8@bIaIKpd^@b%jb|&5bC4; z(=Y6J#fWxI*Aw1Q%k_SrX@d3{XR$Id6(wzrF{i9(AU$bAWRPS3C17%e%X z5hbV4j+r2bq1?@bD;n>-i4+YNgkSz{YD;M^=2mj?wvF5dYY&-%N8`0W!;Lq?i+|lk zNzObUz9UvX-aBM^docDXsdyrTiEb}r;4198 zVaFaLP7M6X;luEks31=IeYg^E=P!J z1;G9$>fe|KomNwm3qRo!k{Z4&iEj#(Qjx)7Cqib*^@+C6sLVl=AmZ)gd z?{8q!^O(zK+!J3ZSj_#FOP-!-1@)nMEI9rnt`(#-?QU%!3{ zd!nR)&@P*EDdAN_xPi^KDU8P9S!%@+6Qw~PoxBB1TJpJ?{-ki;O0tBx$_m&$&=}O` z1|8p*l{E#AH@<<~B$)fXVJy@8y zGG?hqAqd}l;lfTexrs?HaVo_Cq%7ca<6JaM0(9*8Ny)alzSd&%bL<`Nl<*68Yes4* zRUqc;B~a@&;wd)KPg+igo)g`<$@%*aAC|&fd}!7UB|pds;@0-r3>cLm#fjqW>NP8U zAUhOH*ao^RAu!;Xsa9;uoPIj+!3jI z4wJ6itr(Xy7Jm%eIsQJctb|p4EWcjmTjS5q!MwzoaM+8L%}n|cIZD3 zUldeg`&H4VbYXv_O}zSHhCAaWK3&C2mG_3ByJZ}XH4Q=$f|2^9(b5{WxmvBQqZ8w- z-EtXsO4V>u6Z95zo4hz;>2cUloGg+j9Op_;@vmoaH1B|%`2%)wJx3eM@Firny=Gpp zi#rJ9P4Ah%7&sVaFeCFW3r=*--8$j)w1>4J&azw8COhv+Zi7Ly?9DxKu?dZo(EVke zeS0@Ksf!$9RRGhXzg5F!hVrI8elAgqZ5bQD9oP}H|i>}&&aa$1=s8B56jxC2_Bo!pM=^pA_1TA&NTAG?EXr-+f<33Js z0Z_*|?S1K0g80uK0dN-#sB_-(v^$ZEw0B*5U0oT6!|kJVhniFpn=&VXOb~5$Jp}5p^{e9P(03oC!z@a^dmoPNfGs{v1iP*YNo#l%{#tphKYEG{lFU0-_f>kr`iPV>RC zFe>OGv-)a*`E+!=+?W7^#>;3*9C*VWXDVrR+p!acUHEB~_q)uts7|$JiwTIXD>o)< z*;CckJRoZpT&J9_{!|{~_*gSpSZNRutsC4V;$Nv5z}BOk@rt=FU>Pw=IyyS*PtY_$ zAZ;!l16WPKpP{MLo7DP7p$7>fRgaAr9_fPgvrNVmYiiTdlK)#a^f$ag1Tk2Ew*4rC zSHvbCd20greeB_Z@MWX&^m0;GM2CGPq7o_IzAoAd69X>0UhMg|tw+dCw{BgAtPaGW zEn$wD#bcO5x$-x4b-mQJbt1F>`K{-o2~k(IRPFirFiNi7zjM_S>9Fof3S*ERwsulw zBjvv0rr}gqgi+O8u$kB(m0B6I3p8{653=qZM=JA%4K-#%KuH}x+GJ~&;ajOhZ?xqeZ;B^BH&{;3;wyXReB$d#%J-^L zxK2aK=LkUqCPCRwBM={iKzLYW(iw?YU#4CKPe?uIxZO5*Z>>e9P~m==6tfaIZ`=>- z(b1e!VO;+t2mLwLp2h5R;0pP|>{K_dW>K(pAziqev~txfo;jAi6eNU4MV%ClFfjQs zW=3n6V3xR-4IF{|Km4B+K&mzEfaHsiUutbg{pT|!ZPZu#&1WsXL0iEFbfw#K9i5k& zQ#YI76N-1krJ}46r{!tn_pIW-Zf#(c4nuoijuc6GDYJ8a6n|;>zdQ^7%gHdj`N6q! z=ib+r$W7=irC5&{vDDpT%&9vBm~S;U(6z9#Yllubd$6JzzL^S26>-ozHFr#`JDMQh zms!m^F#@9Neo=|w{Q^>}R%d|3?NFJT>w9qb_O={-+8~|xD^-SW^R1w5m4VFd{W*K3 z5$!*b>jMIFj+<7WF7_D^Ga-;4laIM({&|@pJ{(sz!Hvhk=JiidbW;d2W^^;2LJ)xF z_*4}2DFtp%(<+;=exZ}-r@2F|KMBkx=8)LflpISMASv+X>;fVHrHz~R7+a(;n#+r1 zuH49k#6(Qa9VuVy*A(XHD`Wvjh$U#fw{wKft%01y*_3=5sKouw<7*L<%a`eNkodnc z6^Ms-Od7sn_Z)qV{7pscSa!%8%ld4_T>%>5jb<#27c|V4@>E6{kSAraJ*;kN@(^@`&=3JRJ4(hN^2_SRy6Z^ z+@@qK1=2Jd^ZVq<9u%iCx+wI{ojYy#y$7`4QXk3mw9cff`}+}H@a~9wb^cj_dogo^ z!AiZR4u+ZssfnE&R%MtRMUKt7@CO&AoP9FP;`l#goI@2U8KBgSN-J!(t8IQ)MzE^1(QfQ#)$X)^hAX2ZjoVcb;`I%ETPb9CH)}E%)<5OqlF7TDC`Qz!mSFxt#wi@kMS$U$Ji@Djhu;{QlV=|tNL?&r z;D2HI;wAP)Ky76cgoY*M zi(aI+M^EXrLurulfe=L}R@;ut)48%ZWW3XNV#I*o8K^lLB@b7W6JIk{W>*h6aQJYZ zA(vikkH4F()+sAeYFvYnkvAu!AaqqJC-RB>P+r!06lP&0Ui2Mzbk{kXMdd z{%{+4b@_iP92`#q6phq1F8>MzEPv*$~^89>j|M^*0&Q{&2OS)J2)@R_ByCZh~ z-gklKxNe>sN9$X*?KU>=`lNlP(Jpo>s`k-?VpS5_buhBqIC|N@vE^$US3g|W^YfkW z->1K?`@E(xZ`wb=CN!Q2)rZv!q936}g~BHA+aaFWu?rYtg@R%3jTMpxXH z9uy$zpwUW`rOD;2_puYg9mrf*!Kg?MR1WDe&C{vnXLJv+DE}WV0MU=3htkrt08f;f zAub_4!1yTUI&l&BeFXUxf1xruhi4f)hwB~~Rra%ncwx^bdW_y?ssxp-CuNsQS*yFDo3PbejMkS{-D(ZHZVh%FdAmB*Vox7NRm_$S2^j zWf`oE7;sd*$g16hEZT1o^*R%79RC83Y01s@&SOhd6n}kl^jhcT2AyO+HdUy~!yP+z z$iG2LVC4(Mgwo7!gG>|Wb`2O02ug!_JwWDW$js`kW<)9iA$pZ@V-KvviloGzmby47 zE-WLFs&*0xs3Ko`Zy#C7991ed@RhnD&6Jd63^B4Y&+T28zc1~;$~V$DrQ+a_^b{sX z;{ClVcvK2{&U&?gWk5^?x1q1Pf>XK;251un&jr>)8!nsjv@U~Jn#r4aBdJR9a8-|< zpzLf|IF_LaWN*>(ie1yc63mYz1mjBehL)R#`uh2a&@s{^Ct#IE4R)hw`(5aa2I48- ztPyrx1=UU2`F0z0zB%?i5Bq7{rze0QZM@Rylw{4B3NPs}V%7nc7^A-Ll{p^|!|ddi z!UFRdvir#JSg;2zbHv-)P|-03znIWFf*6NAM}U_^BKJ7*v%20gl*CCsMm>7STOn&> ztVi;n@d>@G3V8GuzUbhFVf?=qxC~fd4w)6t0oIbjj26ykfMi>ye5dM)wt z3IRYa{A@7}$YA`L`0&G#<%FNdsJ4XwlY!NO9&>@+3eF1n)4V1gPTCR_%5 zhOaySsWk1Pb_m(`WxP7Cdw>TB`W>$9!4||1+x?s%8`?09bpB}@ZeW9Z_-mukBgmAR z3X-wcb8pmtx!GG_g-36MT!RrX436FK>u2$=vhSsr*{~%D)D98T8fU*b`Nt4qffWCp|yyN;jA}@ehfcU29^|Fs`U03@i|;hDzjKdoMw*EwHFOkwKf1 zWYPb3pTK5E>nMm(xV7L_7^>NQ|1C2XWZ#1)ot_H_wl^(pV(>o=pN9jb8L6xzjDZW? z@WL@Uz^65W@q)YwL0$VN9;^WufhvzyNu}Oj-Jl(JTW)sms4RS6;=FA!03jruu)5#b z)2H`cy0pt?wsWRwr+SJXcc4@gnc2YOj&Y}=>MLnVSZpj>kWarEhljI2r-U3#5-Trp z&VHpKN{93E^J8^i(!!3*$CO@JB>|db@Hw1eUjqWTPYWR9M+lfL5dh{uARLdG6;qr( zsXy4Pn6ctgF43DOooEz90Cn$yh$P=Vh*Ue}=5@7S#G~*n-o)4tf~1F|RpABI1YQm& zZ$L3mWGeX2G(f+o8O(#6#p`h#f_E zGHKiv(5s0-t%D;QeIP?~AFxM4DV|zEL$wA3J~!E8d$8|vG|Te+GcN#NczAkt`(DIG zzztY&fZR0nxC+QA4;ieYn&H6~nE!(+h<9Le^YCAPZRQLubia1(T33{oL2<}(lXmTT zJBj{IiM}@CDKLd#fh)&B1q!qNgt5ITz~R6jONVY|av2q#@+xbY>C8WiCtKhRpnPx{??_uq)%O@Fdua?y~B{oc|NmW)24 zY!Eu$Lw26Nbgai_U)k;Na!OCLWHeScy3bRsSZUOohX04J_m1oNZ~wn5l_E4q2#FFx z!-woj5wd*{p%St~2%)J>86P1U$O@H^*-%+g;iL$aJ)+Ezy6(s4`#Y~cuIu;v-EQaY zJkJ{%@7L=&j^nY8X_?%!Trktro`3u}*0Zz1eGmy(lA21lL*c>|O!MtztfoE?b!{-F zkuT07LZI;}+fKU=(eq+AIcHgTH5^n!z6ToqImT>x9@?YXv}-hC`ZVG%{t}Dam@$#- zr=va~zbj$uUX~WfpT~SD6bM0?_mSr+scROgU=Z2hO6s=AQ$}6visnruWYCkejW2(l zn0}@Sl!(uF9-s^tE_{kx6$A3^$-g-Dtrj@6+_dU)5?cb+&+l^rhy5HRGs>T6nME8k z5^r83m}m(EbIkBqVEo|*h_Oh3lf0lp<#Rvl*uH%vL0+n8OOI)9+m4P-q@d32xOpk< z!xXB;`_GT2alGa-82Cc2(m34}?Oj`-Mlm{eomDR)$3EX#NL}tkV_E_^E3{q&4JSE} zitgY5*B@bq*&fiGdt}7-$?oa7`&;?`ZFYb@P-bklnOE^iC29yXrlj%*4P-(aM@yk& z0jPd4nZjtt}eYDUDi zL0=F$U;c2eHqF@%7b8*N0uesyU3@;fxH#JSOh$$t9gB}d70aA+2j&rl2S&E6+Ml~M z=E41)=_G0xo|Qs^UMaLEYsQmV8jQMs`M>cz{8ysT&{5$~Vr2HR%s*nzrx)_vPQ5Qt zYm{a8y{NgwI@N8UBr1|ryQJqMyAXS=F;i4pEdgH;!K4GzTytkYP%#xL5TQkPv}Ma}917xU|B9S54^*tw%8Cq31dLv1 zVe|axU~D!kTtjE=(sV!4H~JSpuT{WUlJXD)0IF}2Zl^un$*%iyg^7VGSiVlm>``v?<~Q3C z;E{j<*G%!U;#efY^RG}UtRVEQ^X|ls z)O2NLH5J3G0FRPhcXl%Zi$A^L(%Zm5ffC($AhFxALpJOwzeMpM<4_uHLF_H)i%%xf zZBtWIJ?AEoWfYk|J4}e}q`zIw-R|9IdZi4cT#DJgijCsQ1xz`0;6MOdc>Mdb*yco% z`1?7#++*p|d117G&2b%5e(L;{z`>^d{56}d29?ahE%)x;wd(K<7=j8`f)q2(@;G_% zjTPPCo9(os&EhM6@HH7EklmYTyUbLBx?pzcV&`<11-* z2)?daJs3buXSgIu?O0$JoYCz-rFQgDUvi9V?4`uHQ;n!TT*CP+<7zszo~Y6(QUp~T zXbak*9};=)sBXBEdYZfBGLH?}@StE?X^Adga)edb?rXT)Kfp0AuqJf{oN}%dxq}|F z*rYHq*{(e%FX(Cg?!G*Km8M5d%OrM+BXu?V>|F|J8D2}zYZiHe(#f{ZgoCBT z%#3O#7Yn@7$#3M2N^&eh)5(U18s4AEyn9%^joZ}SGL8IgrWCKtvh`ocL%{cOpl8Oa z_x}WL{))zv+wtI&EsXPk6%fa>?oVc=R# zus`aN?*Jdwcd!SIR1@k5P?44@OPCT#(*oMpFtGFO^+F%PB_6^SdIjMFI=OzS&)g^~ z^94Z}9ctFXT%i`}8vUHz&L!Ni?@H_2v-wPtr6t^m+VsiWp7Fn`KUwdxpqt&J^{IIN zd|0F3sMIOTiZhTNjX$sr#|YaWn_ARZqIBC(C?ri%C&K_yOH*swS{cfrtg5Q5uRdmb z$S;{EM77#>8s|o+e=NID^4UH-rrJq3dGb!9ZtD)HPhoOGTb6O3wmEG=8l$n^iezDA zpSZLCBQ<3|787#7i%UC@SmTN1I&QC7rT^d4p;IM|y4vXE7`^=h$Afn!FK!QqA%XND z3`!3nJLhmB-ru9Ar{@A?Rac;{W9))Iv8;@3M8W;bGv<@-?Ezg2NBxdmX zv$^RfsZT#=VJU#rZ3|A?@&M_f9eI@Qk6BVq8b5CJ&U8UBkO6nwY$pIL9kO1!gjDko zl~*Q~+bZ|x;1d0NUX{#qh?|&h#6X@N_{LWvjTD zud%W5{l9umPgmn4{zpwAz~8@|!NKkRfn1eUvJm`S(&qeDbCH*>viBWUyx5syQq~L( zufm-WZU1rz)jukVglSSv0J^0d76L*seO% zqU}Ghen#1a6SU5Jtc!jj*==T?*+yNH+mB-%*eoA!&syCc-MdS}^Sh4mZ-C_^6RGyD z(vc_b`pB)ZdqMy8-%a6ULW()|42%DI4YsIF%1dU0NE3Je%`VW*XtH{?vPMp?u-~ov zj&t7}IDE1~&B?4Ebb!i_*x=+1jDRDdnRZyGuG*ipI*#$H5Ej+Fizklo9akJ*4`IJ} z;ggb*>)mBA%yTNWIvET@Uc^5~8Yp^X5VhYOsw3&}{8L-?m?EB7FAI(%O!u8HwkDz{VE@(|B0(qo4jE&3yVs{N3eRba4b}t>xTde znvJ@D@80$!`5n;1z?`X>&@4|==n@mb_2)t%8Sx8tNtWxBflY%AW0x#k*qunS1x4`L zqRRC=v-HV;4oOHXgnkj&Kg=kSoOr(e8y5i8FAc$>+uLa!#!>`^D2v`0Zk`H=gyN^*g9byXpKlg5z~*c4r_ zQXGtZHnSPBBgsKU*zh)r__gJP2TtirNR=C}OgYf-ovv>dMmw;ud3%mRcvF2U$MBx7 zQTg#nb1v&60Ot>r`ohGdtpBm}xPzlyWzVq2MNN5f84;brPp?N=jl1b`c!XxGf}4>Q#S-ne-V%V?{46<0&B&9P^!? zq@#4Jo#8zDfS|{`Vt8*hPeD^xH~-r>+)7{8aNYB_E!pjz@BIWmp}?P-=upgeFH9iQ z6hu^GIx-<69GB_}d3rM@SQ$)Aw0*jqQywq+sPvOZ zPn?+h?j5j<)Kd>_4@L)#0g5_pw3R3Eu5~f6IGowSihz26lsY0EIJFO0@PRqdbXI;%0E$?o(_O&->od zlp!qbppPpz7T>!UKv|c4kFsB8u`0ERKBVny+B^1)&_z~GFOO`al z4yk(t_jsCRgxaWFhiMZsWC#^UiN9s1Yg*;&K}jVGe|F)`o7+5Yua<{R=y?Yqd_R6f zO$1H#b7~#$hWf#NBhkU4z8$`)^JXV66$>%#Wlsl~p;#D^HI-aePHg{8d~d{Q#lyqC zwf?<;4M>K>T$>nH*2acEYeG}gxx(`W&+yFiR59=FWOmf;a-RF}<=;t_-h%um=@5IM z8eaM7RrmqPW}kPPOtbW?FnziYsS@gaa-2MDlJV zTvWft2@NFQP=GJ%xx95f*oa7PE*5~4-in{|{3tet!`uQjW}L@>Uh?G*qaL1D+V$u{ zCB$UGNs00EVa7}F$Tm%*Che@KbfN7&3V>mBgDlE}mJs^p{kFyw>SNNmpFl6nJ$ZGJ z#97jxIn;>e2Rujnv=_h$n0vl~aqr&a5*M6CN(C_U)?>3q8-EsEacO>Do-3}u9fP4% z)oD;2JIa~DL0vhRMnXmlUJ4ed8zPv{wmR{!Nod{MG^APLN6os%EG8uG8k+d4DB`SB zhO$@5H!$1hws>&_>Yda#q>-wziVkir!d4+X(X7cK6;H~_yw4)a6D9o3$+<<1qL-fL zh(lt ziS;XiVpvF%hIE;JFT`s-;gAC9^cqNr9Y=e9%cm%|V8HVgR&&(@iW!X8JuuNeb=RBG zwOAL=E?>*$ug6Q~KbU(H>(9jJ-++02>g#qlY~*63zftd=8#6O{l(=PB(B3B@6D7;_ zH4Wls>=#LaG8wIX`)24=Rd2FD#TXht2mv+H^6FTp>CDW{M2w?FW*y$%h+2c-7uWxfSRoBYrkx&VmfuA z%(NV}1EstG^Ro^BvDppC#5#59`^j#zX%j++9O6E&+rqhU6|B$ZCVe=`5_vWf^v}EL zVkL?F$ULi}IRwCIYBzF{$}EGo8=3GbdpndY64r2SQqW* z`4^wrf#% za*4m)(9w#cCy4<>s?xCcr7_)t^?Ks1*-lQOC>pkTQ5uS}efkXL{L22Uz7-C^2FbiPMc>aIJ8f;L zmg2!6cvFqZ~Ir7FZ|l@^)PHPD>6B9HzEZ{qA+lxrd(0J4!0$NMP`m2i;FTyqS(#9rUwrLaW4eO_``^K-0=0L!xQ0`c z>0*JEZ|HV%;CuLNUP^A}Dhp>5pGkA5Zn%l8M>K=MdrwR33I~7oj+mNWvzEahgPAIU z1DKUOdNi?Y2#An0kG?iJYw&ql%J9iHVb5~qkxeX zZBSXtjBCXTYFVgwhneX}L}VZp9<;&dX*Reo10urNtbBC4%~pd4x0eY!QB8Pnq_B*! zoiiN;RPl)ZXcQkB{Hj&&+WEsmAXeqm)mRla1~cIiT~wD}N=orxbiNNuKF8gN$NtSA zk?}I}&zeMEO^P)4=MemU^on1^0IfyDAUJ@wDoU|wr{^^*EEPs)1J`mJid-=io_UPu z0zBaL8bA&$3g?E7ZHFk1!liO&RtZhaFv{>6FrIRWqNqoz&`*R^W$IOjQobd+IDD9p zZO~+WOPU;z#Ua7Gwqdho?f-RIA{8+r^d0M=p`!Nj>E(gA8hp`vM$Oa-n4i0z&Z$d@ z6QHwZRDa@?Nz9Ab1$aunJ@=Fp)=F6TveR^Bb!J%L_k3nNl z{$(pjB4_rm=#+nlZGC&UGjJdIiv|ZIDG#koeLbhOOc$U>)-!|j zl1?doI}-TVmfx7FP{zzp3i^UF%)+S8G63-lm_8eFh8xXp7 z-~db?VK}Gzohr_unh&Lxa(GPzk{CqM6S4^eZnJRYe)U6tYJE5w z(~E53>_@(oO>=UNi0rEbYU-}`XvhwUeaVpefmokpDlS!C)a z+6+lkpMz9v{loemEVl*6IA)&%6MDIe@B_L=9N9LzXs%A5-rnLxrhU5L0!NONf7(=6 z>nGIe5^pjw1ld`F3U?KUU)l&S$an_ z4x)i*wk!U5UVb~6Iod$ASY8ONa$!h4+@Uxz9<3v9K&ZXaowRGu`LMz|9IrZl{5Vd* z8=nL+Lj|}G2(3j8!WoNJSOXTkWdqkyv%8N5y9=XXxH7xwm*K?Ah$(@HrC^-AmSkL( z*!9|^Lx*IFTXC|BuKk7Ys9{A$-|s$8S|W)|CXO5#T0Rf8k#MoNeogwc?&hWV6h5QS z#L3J_Z~y`?=QiUsHVXenv%#{v%Dk)Lk|*$Q1+)%LUigHP6e>yE zG#5)jtuu=wh4CvAxVXiB^0B6xYq`n*F@g&&0(Vh_>rD#{b@R80g&5{yZ1X{)WqIGH z;wY!7>~a>?Q*$EDNQohep&3<+bQ1&H@7T3V6m0HZ)EdjH%Waf$5IvG=za{yVkaCC- z{I~FOI~f~qj!271zyH@BW{VEaF6AAe=?DVFD=t=hFw&|P%FUIxSdfATH1$A^9Hrlz zx){f7THcFM50XnhE5M;~lXw`AIp{{T1CvoAP_sfj&RrvSmJ6(e@(`Y(*wz{;W7#41 zp?I^1s%T2I{Up(#(Fvo`|5zj1%$`H*AvMJ6LvRLsfMEcog`eT0YOs2ow8u9fsKM~w z4X_7;=EGwVX5IBVmWAQi zA}}xYHaBAa#DiZ zCW(ilw^TN!iWNJM$(TD_hZxcFX|M^XXbtrsCI$B0jJLo?mJ z-$i;v6pF~lr-T`LYbwq~tcJnbx9#gW3R@vM4@2@hi;P+hC9p_!tkt+?$#xvr>FXfv5`K@7L z;u{a$4}QGm;{Pv^@W1Rlk%bMv;2v>O!)JG#}JzYkg&;9@$S62h1$v)OS9}B{K(*dvTA?*E(*p2F}})irDk> z)7*S-4k?aCZx_L(NFX!+kI}khb$o2Doj&#Cr&4AKru}wl!a%KdOtx0!MZT;IGoK~U z&Eu|=;r|jnUPP5xQv(I*U;?8S-?PADmGsemZ+DI%g3zoJ z|BOQR%u(DW%#P&#vAac(!m-^w;T+NW^NWjtEX2~Y;T1ck8XtF(E%a0kr$*8cgbtm0 z_8jx%JpisutfmZ0TP0)1BtYoy`1Z8=R9Eq-%;?KQN2B&e<}tCsb^St?@2>nJ&hr$J ztx~jX`F8runLI>fN9MF=gvaA+A$>|oHjNpe^UDP78Ve(3@EwGD=FHC?kZBX3Y6C%V zhfziPq?Ghoj8LS6NxhAl)NGuP-jbXxL^L`(E@Wi5&}Lu`Kcp0Ruqw3!Z5$fHZIV67{_i3R;$>4Y1ND1|YH#hno9 zbg!}$GZ>^D2Xm4iySIiKaa0uVar}%KxB6^1dzI6Hd1@&$XTTIvn`xJ-(p-~ggs4ZEo_B=v|73wG8!JtN9mRC8{ zX^C7W_v}0gG2p|#Ey7IWhgGH8nMo@^B|T1geC%c)dmvMd?1ix`GnvJKp7K(c3AD=)g)Rdz(UQNoF~> zEG}2=8ZiSk`+?KNCU|eF1Pe^hJ$d1QlqYpERYlx&3bR`-Qdb86(t!7C+GNxxm0tZpj$7^H&L ze=5({ueo_?k!IPqXMm5AqCQSHFd=Iw$3T~rftOwbt(6d!Yrcba9#-P$OD5J>b2-hO zoEUPpM{_ihhD(!Ao-}^rwPHo3g0YRS{?LuFDkmFAd^udKIY~kJ`SbVh!zt9kjPG4P zjFPESC%^b~ig5H@hD~L*6ie>jgta76qj7RcMqeedTovKq93q=}NbXS+@oyQ6V1?6x z>G8I@D^Sab^uY2qaeW{%{q~`7$G~Q{c^R4xiRIh(mhlyuIQPDvB^5ET%~{?#(-y{E zwe)&39^`WS!!COyTYPVDHFvl0r_rBEPj>|!TC0%f)wV{TeqIKsSsdcVBn z4-|_M=0nr#;SDAqB#8>}V_u&@VUdF+%p%V2U&G&-(Ud*d4HD)}o;kBUDOhD6#9+)v zJFC*$d=`}3q6_Yc}_ed|B}X~k+07g}UJ zCnqC5?Jl1(S*X1ZZo)`3)vQ73+k|Q2rxrO-AKG99+;SZ53wc)AX}SS)I;t%$6F2nW z9iygga58KEOP$!NBlkCMSHq4&}Yov zz1x@TZW(SiNH9Z7%c}sfZKA`S-sYs%Va%?5zICYgSs8#V`+esPN$JPZ+sFvj6@0i( zHSAmP1_k7YHQ<=)?Y|M*A_g%3i6B2Qd-jadn944&!|QG{7i$3y%o`8-4;Wy`uo6=u zds=Zo=-o%P{x2i%4O-Z z6nZ*|{}MjCnJ8ih3>>&%9HWznG0H3a%79;R%Ojl_-PnbM$E?>28E8jpfd5laEx-bi z7+xA@u3?kBjO(3QUrtBM&MaJuIbQlxBDbB~*V7A%H2g|Ar+Eh5U)*D?mlZEx=9%8Q zuIT>P7glmADlwUUT}G<}N^#eQ$Mi^jCu2zUB^XsMBHMz@Q#dnTTM9 zX@b`Gii#A-2nih-z|Kj3?iO{EUibW|_ZZ)(mVUFZ0-+8p<1yffc+78$Ns4w!B??mU z-~E^bDCG2t_#1SwxD_lj%~c}~F# z^ogUsc4B;^-K0tP8WiGlk~aMO9JEPt**I>N(|K!8M+oyF4(KS5^-u3G%kViAS1<~; zyA7!P6p?aVD#XD{Jz6{@)NqdC0}ipK=$+?{)IhsCU_k`FMTV_bbU?i>Z3J0%A)Dj} zNAyER-%drQ3X4+&C`-?kdT1D(R&o?-9*n|wJRV}1fFGE5Q2g5w?fPx``#CF9@(h~Q zi~a*XF)TslKRq?pv3S6*Ts%A+!UVt+lWvbvCK8J2J!}PYwnMm(&0orFKx(AwXH-wa z*iedbAwzO2%q=V=vpe!SDS86-s(0rNJ*0b1OfbRk4FsUx9eYj!KmbAjog)Qt2P)Sd&rO#)i(R|hiaY*y1OwhwTbJ#PP?Q0yVm zL4D25v(e1xd9pL?{Qr_?z>Vg#coO9%eVkI~2`>x`y*XC_aT75%#DC=j*P?~HzJ&{a z4#cW^dj>|gbKqAT-=byFL=n@)a~qYg$-;INu3`)7E0c^QA*9(e z#0#mO6}I38HBO`=r+O~tufWyvL6h`Cxsh^i-V9TeLAAWdu>n`evEatEyl|444lIKf zmuWAt9;|K79*rk@`hzWDT#|_0JdWs3{xvtRb0{M-tvq7x`%iLsoL4Af=LX^@Mt^=)7fIkFTeLe)2ui1*X04>w{k zd)>z0#HZ!@$=6%hWf$?qMs42`I`9m3vQc5GXBV=z9D8!ka*X+F8%NjcS{#OQkVU@s z0jWknQSQEWJ^H1Rr#R-u0H#NkV-k~T9&a#{=Wz`i-944_HY9l@2RErtyKOY@c66PM zK*Z^G9dz;8Bid)Bt>K>U8l}6$LwUwIgqM=5X>*^n6EoCI1Z&6HEP&0Hvi#z4Dio6R zHXgqIN^N){7y&o;tNDQT;3sO+A;(vsWJxb3#a>dXJ=FMJLira zYXXD1BSeNDQe$hZkUU4InQgu?@IE}F+YgyMhsZtPp*}F*Kid{8ws zVnTla#FKx+&#)HSc;l*W4{%`6OKvsALsktE43goQ5V%aJFte^*npR%4#}25(XBUsn zA7T1v*F9cyM?~-3X|Fr;`Z2KK|Kk_~K!XOw@!jcIijOl;Vut@jQIg=W8#n#}z|j`B zy>NmQUgpGNZQRYKL+xMto3J4lkZQ+BCf7Wtj&dw!vZx+l{ej`sNX?*w@3&%drP&F? zh~0X@M!v~!4tM+7GjY7_8vwLg}(beNcZPk6J5o<|tF<>#*iHHJ^euHxleV|4E@xtULk}dYJ z$ZNWl18kVRBnxLLWT`}$gIUb85h*Ebs+yfhIzE(`t9Ijz)o!*;#Fo#W4>Q|DF(i_q z99Ph4E!JT!iyVzT_JbBmN6~A@8U=UTu}sz!9cNi@6m~+g#l3|0qFT)zJOB)zh_Y>O zw@VieB+WmCv}}W$3P=0s!;%*xl5T}b3PuSwvAiTE>8c=~UicqO;))z+PiZX&iisxu zGy+y)lsw8{%*)@syXiKNim#-kWc-1ZOpB38Dps)aS4mOcybK|&3>1ARtiB{Qq^4G^ z=&40T_S6I8AiQk!o~QR8(}H4IxMOqLl#{;YPKgVa1YNPDsG098G+ zjdfTQnz)N2vokcTUm@MH%y(K@NuNgU#!YRGG^EL68~rKIZiK2lwf9jy9n<}{aDK@g zmPd|MU!scZTZ4hmw0X<1mAnl>?*h(oi>eRKD4Mxur_-N?1qJm#Uw0I-1_jZ5{nsFT76*92~A= zA=$j=6`fd0F!{l!a3ldc~gU9qpEWSa2jLu8b&m3Otly$8Y{@)(+G3>+{(P^SmeIS~Fu()Z9@i{fMtc|r0a zR0F8J2*)Q!K{f6P4PgQk22whp)ZkK59nR94zXyJzJ-@1?E~#&Z1$$>DR1?$dVzIs` zj^Lq_SCzY{W%lM2nXhQiYh*h)7|=!A{QdV%Jri;`*v0o_1pq>!{(AZ#?`j2L^g;c2 zuOl96eV2q}wvqV*-ee7s^Q3JCj~1N4XE;0a&-Zrw4s%@LJ{u9wjTy*eI=~L%%hj4U z7p0k`nOO9psTJm>Wd421UjEJexZd123_=bMo<4a`6NApi9$qItuaNoFkz2ORn-FbQ zL@<}j=~?V=D8Q`T+`woLU^A=THg@q?T(k}B^+qL}kVuBkpXtI9d8W$b&%LZAZ~4GJ z0tBcLMz*XbzO({tb8$;xa93%u*YyB4l87bu5c^j9N=$6Kc8=M?!&KVF*EpYFp~4>W z8o(y;21h&o0TT~3S0Jxt__T(q^eY?xgaQ4XJjBRFN?P?KrACd4OG|G&#C9cOlL-es zHutd3<6=P?HT*#W$_oy0s>A26{+FZrtp|wis?K{o|EmQcRAYqa%gRcBO8aQ7A%wV! z6#d}lpdcG~d#znai6k{`E8TNGCCt%PrC_EQd0ZARE_nA=+_vC6i=9|@rM*B&!2`0X z>=aOXvI$KaTnllS0fFiQ82EMs26*er`o^l^_auVur zW-RnGn%1P#8$cIf)PiV)ESmEE|5aI!rjDVQ&$x!H&>v%gWw?wdfptmyO@0{RF3%se zAW0J%vvWIr;%=bX{m<~zPHT>9*>_b>!zx^)JY*hN{fKYN9=@w%VeFr&cqR9@#4*u4 zMIV*g2MWwcAwXdP-T|3)&Y77?^u@fqT<4Xpt?|rUd(Ukn%$AsXR2u}+pi*zs);!8a zQxld*;Kv_ZX$|ZwH*)d3pl-n_o!-?=GuXw8yg|S$551}^{`p7!UE~ht(9M&Uu(e$ zmO6wO0~I>Lih87xl4Pdl26EsDC#pM(N0t`+iUmytT%hFU8lcgvS$}mxn>9f00)eXv zgY*U%es~2y;E8xnbrT%LaH`dSM3ye&1a^0cFnQ2&zNAA4jvCoA=o%X~y{rFUy5G97 z-ZxW9h98uaM_JYfiR0@ubZFd0AH-68j`%n^rdj)T+iTywTd3p+WK$5^Ux)hK{Vp(X zBxrg{!)eK{>)u_@f>6%iuVG69IdQ%S8<=?LP|!3t(wsz&X;}($BR7H52Tm>G)O>$Uv#hr~8 z(%zDKG_dnyx)G_3yqdFo(I(DI5a%AVxq5XfBBRTQkrCF_I@pRateZ!s)1H|+r3E(M zz?Op)&%MKulhG*A z&B?$sBQBxs9%O1R|2g(|#OA7-VRcHiU(eXwY2;M=xCHUv!dX_*}W2Zp3b%IkKWb6>Jx@l=L}Cj z4JN$&McR6yY8_Ze7PaH~{)ACQ?cFM7%!XL;M-C9(H;jkMVtum^?2#-16Kl3=6+}i$bm&&-ZWJDm3k3QnYR7Rj_L)kW z?m;gQy2~82d*gxF=l9qO`XG!PXj~q3=I(in%SGFBARKT}~y-ZCtr1noy zM)!c*-(sL-24Jy{ERY%QQWN-K5`v70Dk+Kf6rbRe7lj>A;jk1!`cWgOxMCJ^_ zto?hKSEy$BJ1`;INcYqvy(^tQ?BvZ#*rw_m#^D6Yp?`^#Gy3phzPkw)081j*AfTbE zYIulC70-e)JIO{@#Y)NN^@|q<=H}7eviRY&{Ie6=l9XNQC?!Tmq}Nj zdd%X>-T(9zXQMcT@%G$^Q2*UsX){G5msjiWN<3}(zYAABvMO#oL^v93(SfyY*30BOr{4#VwImXQW!+3f+LTv0Zs(B%LmxA8=( z*Oa|?>60jY-?SV4uT|)kDQQZ{m_*q-)!eIdh zZZJ|8-W8N~-k88sZ!a9S*Q(|wf}m4EW~=%HVYUs+k2i4Ree%(xerOb{UQyHaj~C^^ z{DH=b`2?wl$oRfJ3|NmuFPC)nUle`kTdvPoRP6OILBDc@&n&hY11y??>#6odQ7=@}1`nJ) z0OxD_$_P0q8`U3V%-CRjzh2)JpCy_rn*usr#Li=shEX_hWGi2GyYP~mq`J4wld$W* zDPj9~QB;eIMW~Vinme;9{WxOVb11v_h4aX=0ixU4i&=#g*Ep2wX^H zDE;zjGr$nD)-Pt34w{UvHsm_VFqti8FdLjg3~GrVru%kcYMb6OF9h;jTzZs#_kAMK)lN!s z%uZi$PYoIF860FI&H9pBGImeXnW9MS@*z}ho`e^Jt$5l zeV7hC)lF*^$U0R+imyl(!~$8m!_aG;p(6XLoDNF4HqoVJ`#`Fz#AC<89H+5+0fM}L zAi>0+V^`k$LZhn7jf3Vn`t1K+A5!J?9)w=*#)zR4CQOjg9Xg*mONnO7a{t9mh5>hC zuw=r~plKBh@u}4m=DCneLK$KPMPpP?qQRCyKe&LURN5pXi9z~3w9WwT!4iB~`Y%K^ z>yp76&E>P|F5w|^h?xenxa}`$B8UomnjQ$){yjS~XeM+^RHcAU1s-rX3nq0*4%_(a zZP(SUyN=lth%L3|Bg9vsD8s+VI7JF(wLnrp6(MSMGje($G17NZasun;b_x$h20od5w%ENC`Loejac^ z$8pJ{^w6iTaC(rMwCJZX#5t1BO%{r%sY^2OLKi- z`Yf?yfXxhBaTsAME#i*vz@JUIdvlf80TO+2?nhpJ{>1%Vzg9pRn-(Ap3?cYQDGMpo z>5#>nPN{g~R}(=OCQZG2c=ThM4zsm|6$}D3-mb8cU>A1bm-~P^R6@7ul7Qe1 zh@@i&=&XxDE~<2Zi*}YfP^VCme}1n%^-`fo1qcKL2rwP%SZR900THQkREQ~8jLLS>f)8&M3&EL=FiFQ4Gvb07lO z9YrOYMGonwe6SntiLYZL!^2r_17@_6#tg_!rxtKw_g9!fO#)o_fE!hDqTaBM@;pX@ z=8NBZff1-}clQZApXo7Iz0v&QE3E^x6W_d*&a+wDT;CxGAik17l=B&mU+}+e;+-;t z=YRGV|08PCU{aj7ABHKztZMR%-LAn$Tsx1wZVsh1_S}X40eUow;Qn`*UP1chPi@!e zE7gENY$Zg}L{d+?lAYvuosh>I?zi282pDpmH!pwvgGZ0T_(F$1&R|Q*5@IeB&pt^M zIu*T6iD<-uu)3&yM1D=$k^ODNF@~c1qjpa{BBDd{Rm3MKBd%T5s2>BbHsi!(gCU0Ibw;Uz_W0QLO5^Q20LXhJ2}+^#^%JjkJ67 zLM3V`d1zexU6#DhY`3iGq*bY-FK^H?-s~Uf^^y~Ihl<*6X|Sn4$F;E~=#WQ9C@up>-2a`az^tsom8RwcauhH;E zD)tu+J}q}ag_v^fa{MvqFt>=lkItuIZ{|ID@?<%T^Zs+xn>TbXaY`}hz6HhjyaZY_ zCD4+3M<`xq4qDcGy}PfD!fmcC;hAOw)_g3uNEn~D#P}6V6<--mQt3%NL86dz%0(RS z!sfaE1g>8+SM=;Iu12Z{l}*rwXQ7YSy-DO@ybp);bQ*}H#iGu7Q+D3+uY;6>;ujp* zOG1O0KHaUc#+NT&YRDj=P|_@+l5GQofB!k(K%YE;ZMk+-rn-yVQvFR#AZfO8n~y46 z%8)PD56IORNV)PH>W{&+#+?qSnke|4D;te4Tc*0T-b5n5n+Ar{KVhmlH z)}LbpA;k4^Eq(ohX5A{4NW}ja^kdH2i2!=i7B|rph&NN~Bat^k^Vm(rD98F{zEVbp zOr5O!r4c*_2*xw&@Wj+UvRXQ<&ex@+lhyU^(@gd$pbLn8>BAJr7q)Sv=7 zT8T7M#KzE6rY|(zL98WL?smVAEDAvbo1Gs4+r`HNp4RGe4BZ%zyoQFP%z?GV^a(6Y zsojqiA8@{6zV7xP{xcc2y(@NY>aocw<>*NS4Lq?!XmD5!iH*zKMSR4G)tuXbz@-vq z2QO4u^JM%N5!{9xNK2V;(*e}*1h8hNneq`8tfT9atZ*xWuWzmSgD)}ns7BL{M zkXqUrp>J1Du*L>0(?1U^bp$P5%_!kf67avonM9{lBna;&w$QgypyERMUvr6tdH_+X z%UG@hAkzTqI zUyLCXN;v%L-cxMDUW1&eVAkWb9+CDdorh&=A^8eiN#i#ihB`-MHRK%PYgd++ev#(m zE4f)EJZr&N^JQkLgu2E*C@$7w+keufY)ztNcG_TPUdIG7XLUZ;ieL59HD|7q) z{muDQX*C?PFFhx5v{j?%898P9*{yH)4OnGMDuS5$?rha7(5|^f&M{dCZUqq_T6B?u zm`K0maQz@4B!VsTEr2&!ckXPtO19FYh)j+T(SDw*QeSURp*Yg~9ErkrCZj;rr<>ub zbZL$MPn9Y{c_;XkKw+tQ)WtI5rTOEGix*e!-LJ3{Zta5xJ`x)9;P%H@t!^w7_ntvp zoGu{(=~Daam*hVD*o0wTn*DG$iYTHwzYIdxAW6EW2MJZ}*(QAk$xXqQ{0jau85!PJ zyX!yXH)$6D%t*=wsB^SeRQ=&ixlh)UC`>(NbVrpx?9_+`B7zXVnnB`h%^>_7 zf6sP{aRl+hm(Xx|ULPx~n9${72FH@w;x+&|U}QKI;FP)a{5F(l9Ybmxl=UzfOEBDZ zTf`Zz=ujN%F}V&`AD=q&HO{*u2Iv zf7Q<3UP~{ur%fgEQEs|%Hv2&8bQ9%On?!1ZM4p^uEm{ycgJ#^sd6xx8)j#=uNl6Iz zH;vjq?^GP-CQ`YtsyJ=BZ*| zJBf-9s>!fssLq2v{(sj&fIxtDTYc)tkt`~l-GlO6{5^WKZrL)6sZsW~Xh%zvB2iNG zn9|Y!zl1GE`R<3(hmnFV;@3u7sm=<8FYV^sI(z)IB_V~TB}UOch|h^^9QFML8aODc zq;HYc{aTKELrL+~8Nr2>fLYWdmL@#t90s7`dab(*>LC*$;;(qUow#(V_L5wfu`f>; zlPt?OxmM0|7OJi}iWr9X4mN2lLQN7pvb()NPU^~C;QotgP7&Oq5>=ysDt)c?+0}fp z*abo$_ie%P!fA8PQruLd5{ zumO?U|Agnj`doQY!tel!8+J}c`rva^*m#f zJFk6E_52lwr^q_;|09Ne$aR;VpivwIrU#XMTf1C2f4&V(QA0}Puix>Le%l#my?=J% zklp8SMxzn!(lmr(mmC4Jsfe?37>jn}3!o;?z%^F))885kbz8Nnps#YD1%D)lUd)2` z&VJzFl~j~ZIIsUgN73d0+ng~OH!~zTo8B+YjEKH#{Hy*U?HIhwcii6Q38aEddEFAg zo#|xZ+WHJbK9C5cbk>Fqp-{f@sE``ZY@-mx$4}Xh{_%{Z-C(&~9J7Gs${KtOu_}iT z#UQNF#v)K*1>P?lw3X{_Aj`om-I11|v!dLiQvB^d0e>cagEuD9Y?h;wHz{~o7@Smw zQK*ZP`#~LSp3nmR>%j)tgEER?xf!{VTv&A)2$DW*R8tj2XcrwsClX3KYBF_vJkl7~ zmG+BuW+w8Yi$;-d6?V0l!+D3-OwsuaBHKVUO4#^R5Pn+>Bhxw zIB3rVw%rJnE1QoDUTh%7KV2L4El1EBO1z^sg0w54E)xHB38AdT=;i&??C<6nC-gJ7}!XP z9pj7ileni|W{RjCkwWR%Vv_#2KfA3&k}(_v{n@^fi!+!}mQw%i{Z!za@h!?o@ol!0 zC5bxL7l%vpAdx~aokgP$g+dM@n6>rE$;k}av98*dnK=~wfu0?#tvI{R`f~&ZF?`d7 zi@RPuBxHcKc3{{EJ==lwgTd@`4A@wi?Dh7p3ZSkZ%$46s%qSoydfs#?DIkZc&J^u3V{POdTD7lb?Qmz%#*Znr zrer!2u5-c^dBDfj`;qt|a3>ugL|$wOXxwt94JlpW5a;#}Sn?jqN2b#Ku=Q36dySMh zh`M5Vc@CVf=*HeZ;nD-)F&!TmxBPd)HrCxRpeXt-;}V9Fb%_Gd^@Cxa@iz>GSRCVi z)YN3lJGv}79?_SN?!hl$5>5z0FtUZ7fnUFU69H$ZVVhH|wj;og?Z2|#wX<=b4{YpQ zGODtFoZ{r;64H)kG++Y5C@gj(X+b4xTREqD{kf90PlsroZG5=u@fIcqBy|{sCT;zL z*;s2h_~<<~^+HegbSvk&eH+J=tA6i(Ruadv@Ik~}%;}|v(wZ%EWOpXm7EN6^ zTP)f|uyiKb6_goF|82N>Z%m8}?VD+YvO%k$Xscj1v!wuN2!~V;>GQ}CbNo{>Fx1FM z*coKngt`M+D{J=zj2!{L{8$FB2GgRSd5j`LmL^YJ%|5{yjl&aDF$_(y>; zh?oD&CGcggkURkQ+QoE`n*0egE-c)33vSqTE)m{ywyB24%%f=Sj}a z5nR`ygC_w|e>xG>fAFUISe1A*X+-GWDP(@`8tE96xr0wJ8hyCG4FxIla1J^rbCzqn zbRTrVeyOH*tC~-FaaEw0^`lcnFi6a706kdajr1N+dySXglNdw#G>+6ZCb_>X{VpJ2 z*n*xkWsei?Y%ZY&^QcY=b7}M_=0nQ~dfQC-ZhVp-Jm=<)ZoKTy3)e7Fn1(N)o+4Jf zN>c`lP6yWFNH>s?0gyu83+$^kGePrE+6-$Lq}W#3Y&s4p#811`h;cHTldmCHs99K+ zLHZIFl89$|d}Vo#ey|k;W#qKrOiNAM2BTvR(N>R^jR#>zpsbwv5KzIK+h*RcN5kcG3$C(Zr_ zXcYRak`G^mOYxJD-YRoH?uc+1%V8l|#8*{ec&weBV{?}q3>ne^0;7`yrafO?l4wt+ z9H+w!VtDP;JjBrZehHX>^H6k^!>j?|Sg>L4PbxU@uSYNtTe$L1mH(4lX4J#z*rezT z$^4<|z|d!@z(#E)azBIRYt~%jptuoBdOc{I-xh5UTq&oss4Gv~%~<5-ro@z2ynG6s zz0%?GgBdqsvw-%ZQ;!~qKASBJ0d+nyMkMYsh>l&T<%2TI@vf29@LxHXL5O4)$s8pv z<*n`+<8We8AG+UVIlQel@V&(voiKfR_ZpS2H8mAaW+1smp<7!fBmu~!Zn8XAy@v=$ z9nLzQS#wivD;YU_Z`{!3YKOqeM0cUDuU}$=r=aqzg{#uj`jgVqn3#m9musze%TyhF<4#792tno&TtzcjR7&%~h+C>qbd1499Zr-i0Mh9xpuM)SFxn05MG8cmC~3k(o`{)wW~D z7L)+*;xQ0KAUD;Xh$X_09Kxm|+!mN6{woVh<{@8{OiGklHr0@l@rMszyZEBKTmi(M zdRyzdT2T)N9nco3BMk1}xG}QQ#K3`V(9r2Od%jX{rwUP_ACu-wHY1}yk z?s$)3T2rI@2&utr81vK_AW*l`E z`q?&F_wL=RAUVJ9OAgIcpc7RUARUejO)vh-fH_$a(d=a*)P3@VCR>aZ1+x;6AoR+E}$+*-Tr}IlUk^yQSCq-|p3v+R>L6F*28qNf2(AJ~$)5J2fNz ze7!eLVt|7WP$hXxTZ&Wk@12SJRuIL_G#o}Cr37f-@FHp?{oyYPY6+uZvqk-##Ni}? z38EwGCe05Z?!5J9SOx7?D*wKOIp5+voxfBx&z8^_^*^CUD+X-Al^HdEa{6OsU(z;$ zfp^>;7SktF0sb11)9OSDkOYT$kwYDe9;;gAreW#Yc^8lV$(*s@?%x<5>XMmFc<9T2 ze_vve%CuEc>%vESeCf0~Q(287bU>2gJYN3vj076G3sC`-b0kaGSAl{H?aXvv#F&wi>@{!g`t_qfo`nFDOsDIV zBr;7T;uW`m(5p)ADa$M#Q*HLSuDku4hgNp~qk&z1W{c#xNy7}|sXETDU9*0&wQ4jV7>MBnA1Q&<+ zGmjRg)^hKjJ?Lz1CmI_XD!~fw&y8?5fmJ}GD=2Vp$b&)tE3V3#i-y0O>OP@y?ghH zZNldz7c&8GX?rtdOfo{>`H$}0X^y7MRsR{Om`Gv!Ta(_T%5E5yDSZs5*7&x%ZQF(+ z6w=));+UEmx!slU4?b+z zU#dkM4b?s(vZqpTb#C6Qnb>g%_p(j~njKYCMbAhRBlGY~nXF?<;^k718=)(eDUt)GMRZ?A9V*6+Nvqa+4 zR5^9|XrCl%l99=6fdeC1UNx5dIy;Zwx_tTagc&oMLf7?bp-1{7L3EK+4yUK_dE0a9 zwb=?V$xJH8DVdy%+IOF${u0to{psW9+K40sDdhZ_cSYj;x#2*+ivRDFK3eAvWgHik z{FsB8EG1+yp#sesF+Q>!v}5snwNL$RR_#SB^XysNF7LYsM@oX)s*XTcS=98U`E~2F zbn;)gX%5#DdUMZ>F(F&fX|i9%NTSpR>>sC4bpuB8u>y)Ane{>X)W7)0+! z&g-lq$s}Rin$1g4U+sZMVbZXqt}rp}-@lDyM$jual<|B^ujpRf)c{9#)+cXe>vE`u z84^xlb3|{VX2WF`C5Z~U+_|pRVmgyBg$L*8b7j0hw?Iw{j_=w!+PM^1iL;Yhv>%|% zy}Yhf*tcU!)ukmRE+BNueR;%h9-r%dW;r$P2u{o56Epf5{~_;K1e?i6M8%ZJ_I{UbCKk9fYbhZYdE?^p%rhJK2gQG15GZrFBqU)UlHF( zP|`L(ZXyO<`YM=p?%as7@P15_%QJshY!Lvg8l^=mV1)axJ~BQJsrO>@rahARo9YPi z{0s~Y{ZE|0X)lGT>$Uv+MB-o`p-Y&g>mn_=OumYZwJJ&D0zl2^JJOvT;y#C4Z#=?b z;Byj{WFXlDjKUmwc}LOICO!9hTn_ac3WkbfEXYgPB;oyjM^UUm0SQ#?`m z-&4UGVX%IFl#@Qbmv5+AQq@C+4giExZ-cw0U#R@JZtYr0M;PgW zTWH&tqbSVzF=ty6@Is@vg{?I4u=l5@gZUt*y+x#iNlpEZL=uZj-JO6 zADL>uehhg8+l|L0uI3KfGUNFZa>559MrQJ>RGd%xrBATu4(JRlB(_9*Onn)vXQ9(WYAS zbv80AjrnD27IWO(Jb!+kyT2GzgU;F0V@&!Rdvl3lM3BY2_boN{$$m?Q7A(NYSMss4 zQVD(}|D|AIw1$iK{r>s00I~du@HUJ_&|H;gPOl;FXzr68E|9 zU<{J!%`6AMN-*;Y)23a2?Fj@{TU+btJP4A_(9L1FcVcYR@X z)u{fZj1iJ}&E|&t&|dJQE={FDt*tuu?1<%u2PfJIPF?thU@#8%n~9W!{U=YEF=MpG zy(=ES2>4(5nM_r2(aE5@*SMp<@Nf)9R=#>AvQedaL_sn?TlpbdnYe}jhpzXI=X(GD z|B;HQ^ioFa*zH2)Xh$f~piWa|rKKTSCkY`U(bi7;G!E?{n^GzoCz>Ksr$R%d^1I&8 z=li)_e!t7__xt?u{=DBUuh(|o?FJo!Hc=*H@Nw$#)V&c zOGh6ySym6yoIhwRW^WG7`Bqh?gED6B{7#fw$i=Ovx-FVF@6`(YK3jTT(3<4tS^btC zfSe8kmx%;UaM1yHldDN)H>Mhy?8~)v-9kPCQA!*VpJpuk_-tzFx99{}Rj`T>5Ws`yK$KKCs?+Y~8a z;5-2p=%LhIX$o6Vs8zxMUDILNo@{S)Jo$y_y|W6Q=bc1~NxZGiQIJz1fu?N*XOb?% zk;JPe$nCv1Ydujn%+zT;I?0%xMFus!N>**h(vK_5=g*sW5Lwo#YVpy@gG=J)uA9tB zVD+C35GuGH>lQSGFaX!*=U>Je^_ym^u>vOu@ZYq|eUXkIS~uH2{#51v<~wjPvOo-gxuzH3k+o2?4u>%D!>n`I-_VG z5Qju=9W#nZ^kkb(HMiJ)`SOd|-6VdZ0R~um)aE#@&I4Tdj@uJpy=dNVD#XdcIcoYj z6}0nX=Sxt{bezJ@sJsH2&P2zhWMO93jEBLyLJ}J&#|D(Md!Kkk1MhK#N%hO6wAH^6 z+V%Z=X9pkR&o;kW&);5}kjwjW;+)R9E&TXx=DCzyS-Di-iEFLGjvg^o=7$!&>KVEc z={K|V%5m7Lu_!tIa})g}^k_;G)(qK@)Z}#-oJ0Jw7l|w?rI6L#aX8_jJJ>z*v!g2HN6GUR4LUWqMGWz148)zx2 z>AiUXwZq7*i+W$dlUmL6COd0P#3s#ivtM%JnIdP`FgXp9-EqI=pIq5WNLpINwJ!;e z;BReis%`JQV8Oq`mKST3Jj3V?s{QE6&e(yh z6aX$FxRj10U{0I&|I+PLgSS^)##Q;C$s>4ujgU%xbue2^jR51Zh4A8{5rlFd-v9Cj^6Rz8L$&`b|dKgDL-t;YAfX0^}m=>Ho}N~p25!>L0G5B`bzi6DvVkhD0^N+ zG0clrT=hOtF=9r$=kPyzX|;+RhZK=I)unNufsd|qxCi~nllfwq%Xl?g_9JTmMy32q z@yek}SbIs1A&rpECjO4<-8-Du070E5fw^aov#;DvgIUGb2 zVMJX=RLZy*?{qAF_%Mud+Ry4WWtcToa|$0oTZ`0m?qG;x@v_tI?g&0`_r|A0RI5sGHm|;>9(W1Bbcg4M1SS+0p@OyfLX=8}&4XM`A!z;+Vr^ z2`{bMO|?ZVYeFaDyN!|V)uop~#v_iS`v(<1dqpDPn1H563D0`67>Miu@IBq|=OfV< zue$pKBtsgsKHCfXxh>NGima*npynMYB4$havz3Thv92$glN#LPlUC7y)DeZ$XI;?& z)4#6LzQ}UJqFn9>?%hOHk|?coDUh_2$?vaclmZI)&Az4VPNWM^1=o+*ns z5(7MOm-(tcMySU5rD?|j5XM-jO~6NVGU(0BSbE;21|vJZU|^BF7S%{Xu&cj*oxPj! zP1XcD7ciKKq^WKQgo*{b#>S?K@rS2XwU?*m%!!F$HrYH3$tX$A8qxwefTJk?f>v=~ zym;Kn&1hw~giA`C&U>g`6 zNjinl>>Pg#Gerf3pow31V=E%NNc{fj6Lrll*|c!veg@^w=vH^LBaKGKvi&G%pn zzXk>ine8n1a=cp;I&yC$w!;|)5X@}8iDruaG=m;i0=4rBQEu|pen`Bx$h*H8K7E>J@$BCma*=_GpuysBG=1^kCH^3hMFuMdhW{bu>By zcqdD4&bE8UJD%SXvncH(@1Q~S3qrW!x6ECsK6_$MbNB=$HpL{{e$qWOTG~n=Xf!BrfS}<*Tl(lHmU?-T!}b_>D~uOq`<1D^0VZ&Zrz;kV<+YGVR32 z2lT$`>Ux5okmC0$oVW%M!FJkv!3o)#KK9ObKD)g9|9&K`vv`t3s$w-=X-FY#1p+Hu zA<~J~LQ<{s<7djM&jrlQ7mwiVP!+R9zgLFc6swWf>B2aeu$*3|WU)!df+eJpaQ-k6NlpCMOllR~va|;pYNaI}E|Ev0_{1)N<&`{_S2<>=sO#TSly3Fx4Y@8<=Jx3KxxDg_*j&K2Y_HS&$1z*fB{`7BGHHvZcU1oknS0WP7_24&L6o<(^Wkmj zvwOsMJU=T>O$oN=;ri%^Tln4|jBuS2u3p{aldLP3iEM}LW6;qtGwcfG zH#Jz=DloW1ZlfS4>DKZ?G9$)3drpVK%WOWgdDEsINKrP=7MoEBm{I@z;M1EEFY^H7 zxhwfkPKTUgfB|bgMgEwZO$+;s^`$!JeX{F5#{w2C6m$tOh}rJz}1DR_}Q0B*Ao z#_;V@x)7wRNSo_%?ROePSpXq8ESIJ0;)yhq2*A~}j?va5&N4s4YD~vwcky)AAbZBR zeQwDMLZN8Iqe1|x4fI2yjnl^s`xyoUC4)5OCkr0Cn?(XC8FZmJp`P##5}9%DF%5zw z>(qzKo(S$Grs6u;5bUJn0ikU7`vDqJ`jXTlC^s)CFwIyJNO1lc-Xf-^2S0PbysL;4 zBzi@+H)kCPyxnWs^UX@QJtH^~$46H~feD2K%A*;_bG~HhQloWjApY5U{8gUkce9x^ zm*D-FBxrIDK#hsfb0)^Vg;93hRqHXyWlE`AUHSO*^!4m{9Iawcd{BKU$gY6Y^K*bt zpda?cFBWuBolLT5Lq&IEP%^BszBLum zG0EiOKy{lG90`as#=WX1%1Auizv>w`NEkSK_BvK4$v@rhM#xR?mNLq4V6p_GcTE6c z4K_?_%jVLJWHas5r-u7X_|Ln+`iA;NO|^8^^`eL~YaELh>D*G|z*c%0q#mbcT-zS4 zh7f8Ayi>D${`LX3YrKeH7R+R9&kI0|^d=^DB>5jM3IgsN?Dp*1Y7sQw>`@BJ#gSi&-f%aZ3q+cc7VkLa6 z^l!>XxuEY#pEern6#^fKgdH5fc&^s;m)ZL5W^OK>q4wLXcqj5}V0LVDZlT=R9hKeh z9Og#xK{tEDc_ev5RfXKy`2G9uc~(*Tq)p!+t<PKdFYw^yke}kN6z==M!8{qHzc2*nStH|2oNN=k{du6up02pSqj7iPK6lSW=b=3X;M6`s|8iJBlU!k{L5+pvnb&pE# zv20Uz9SrWnj|pLKL)lrb1YbZsP&SJ)gRlcCWJWAkCF{wPT{%CHwy@1Q$8%iI(^@sJ zsYhIAbdPqO5SNvMp&9aw3S$Rno*jdpF+M z+Tz`JE;Ynzg?YAQl`+tY5_)-mVnJyoSM3bXyR6oNLph9*?&&uS6X~?YD|hmRHgQ5H zd1@z4Z_^ekO+pq!uOUEH1aHW?OdQ>RGz~sWC5D=#okBRk`9HPpcx!^eN8+c7?*lzR z{czCHIifpBcSuqyH2^h@65mkl1m&~G9zhG=}SBaqkQv0U2s&}j7NecMltm=qYcAr`U1U60;193#6%Sw zqmPNfwCP8zkuHjUz1Cj-F>KP-l2$1>Ptxl+wRf}|L0b-tQqg22qiCi32 z(sQRy%^uauItEtkN(8bAP$R|32&gb~ZJ$%VsuMne-WVAe1Xm?p_qb?1fd&{7VRL|< zKm@D0@n6p$Y}&(2@r(CT0-I}3^0R)`m9;+GgTr(ril_ZvkY=!sOW1w;Vhe!FqJ9hJ z%`Ih@kKvseIDptoH3p5qJj1VJx5_`T4PkYPS5VIQ>ViC)a%y?D#p;P@)pLl z1PtoWuiws~krO?CAU z)SJacd_E_uK=p>M@l_aB{U9}?d@Cqf`SBb!#<;w9KAZwOpd^2&n|^}1 zhYEDU=%WGrYDb@UkN^Jd(oXdbgVD(4?;W(`;$(1)qisFNB@qE8&!(dpUrh}%3QETN z|I4!fKXZSh#_)NoryHYC0EKhyGtjrlT$*Gu9h{eQ#W!zTO|GP3lY@nw;oPU3REbbI zzCdZS3%7if9uL8l=(Sz*AW9tYnon+rpDv434XD7tNWi8DrqH(^0chNDLuyXMZElG|5z`{MJVxi&T4I#4u)gICu&TkbRn7-*X-wA6pg-jUXofG_;Wrx#*&4(H0+ z?XHF0&wJDMUv-44HS~q;D@WSdEva7@cR)H04(2%r-r-KdUYTEel8v}P7I}>?kn_x) z`vVe~*#0t#D?u=31`RYe=yh}dDaS%@^5HuGMb!Cn2p<^sB#W& zKEaoglF|tcpSw_-C758fNniJ|vVvi#W(K*AtblAK5w;EQLGIZ(e(={dM9r-KDA9+x zj-WhEwr){1-0xoY-Zm?~friN*hvg8$$m$|5SD=BJwI+QGnbCpjgrz8-A|dodQBf zQ*Kbg1~7YWzo{zktHcW3`@y}eT!FEtha@R8Cz^B!TXmseE6AO0P*9NbKX|oz#^mV3 zOLbfH>~e-x*~?`oOTXFv66y=`u8lWg4U!hu@o1Z@-&9hFU;$j-6zt9zC8^2w;h%dt zRt_UaM56_NK~%R;JrxNTK$w-vc;KJ0>SI5u3tbM*V~a52rgG5v3|fnao_ElwQu2_o zrK}o<=tKevM~8&{Ud0%(@}n*r7})Wf^r2_78r6${BpDzm)?r=090_$(G(B{)jK*zq zMpvYKQIpT{Jg&G$*Zvz!WDW-`y0VO-PUs7PtZxCxpEBpw($Qnzs|^pNS}HDYVx@EX z?j>H2DYtsx(ABk?%w4Znq5anpPyd0v^=nrYk`%Pl@))Cj2}i{1IgA;@Wf|IBpa=vV zQ4gRJ!Z^Urjb}VT7^1DQ7(PdAP6k%sVALFwrMRTT;6COvFC0gCBrB))CeMls&HV1n z5dsOCe~z|Fl0^J=rCXNH*reaL8Asfa!%HRpX$_atfph_L+;%FTX)eoPjv5w^nTLVo zLJwT|&!TYuYEYoDYG*mdtcetS%B@@Y==VX{m$!MP``VMU*?NVek`&^fe)4>s?lt2^@0YpRQ+sq+7}sa|4e zn8~E>FK2!_O_)BPOtpbiF_%{2gclE!8786=d)p&w1s~Dt86NkG`9h&xhX#LUQmxsS z(E2E1X5B>?H#05h&ahcrAIhL2U&DB2jTsGMI#lpyt-Ul-j-)^$D8f15?uZ2G=lpmx5)$oLzHE6e2&!u2PC{%YJ z)nPDHa%;9^_DTDpLKAZKA9&XyGzH7N!DJ+``~l9a2%Oc)1y8Q{cY$54zn-ILk}(=B zRpEZi?%i8}^|Jadi-Tfn*q3*YG{7nj&FtH+Ut6eG)%!BSku8xRkwF7}c@;89je(i) zR;N{qL{?J$;xzH#a~3mdbFafOXvqq*W)7Me>PZtRfBxUUZ%T!bfE77~y`H%x*}M&e@xznunCXh784qpT91 zoKZRCxKfyHm+n{B*=qC&in`bH(*xRg(Aaz4fpSW(tPZw#Oh^jlVI*kIvOKh2--o^b zxytQcz3$LfYkl4~4BR@~wY%xUw$q~KKKje`{BbS&-G{?_&U2Y|p~r{KuIH!NF83I9 zbJu&H`$f&OVy*$jo1Ui|&--&OnGxV;(pctG>CU*CJY zacB1rWcG$C@vh4%mb7D)({#4&5F{Lz_m_6WIuQ_LvS1C+JG$zmQe+fif(|Sq3BaB7 z2g&&VbP<0u^IE(k+QefH69ULMefr>7RH~+O9o^m%P>Zv#IUK~Rz2!Q@IvcCQ)9&GO z<9g>J>(}c;FitoKg8#nDvdkFI^tHXac6GPN$mN;(e}I{{jG@R5->Z?B0#G0{3i-x>-1r^mE%7I}nN+?tJr`u8Yz| zibir?H?-4G&Cfovk*)=8>-{Vg2BHFGme0E%R@BOHbQ>vaU}>how-}(NbngV1!=xF{ zI-?pw?k!{+slrVL^1XZCK8tA8sRdAD`(41^q@AwHAd5wf;+>jq;g#!7i~l}pI`Ih- zqV#D7cB`0;8>e<=)Qdn}-bWIPm=Lt^o2={8yEi3Eb?y6eRg%R zTQ!uoMs$8nXOI(%v~XuH+R)@Mv%B3m1i91!-}&8iK;fwhnm;T4#Ld{sw{In=M>7>^ z8cnlTH=K}dL5mYbf9WL}SWT!7Yj$?^ai@I%09cP?;hga`Xlw3i`BW5SVS5uoGe|To z@4KPK`Q^FmM5TCNmkd=j9#LnT*F4X8!>BAGMVtk1nhMiS!`-8B)ptad1Ao#qpu&zz zrud7d<=QNsDhonuXBs5Ox)!aYjl{O2W?KcGKqZaK(0RI@NR_-$nZEewrh3|~M*fh@ zPTBl2=`F*BEI%l|LK#RTe6r*x^bvddJS`a>ds^I|O}X)1u6TZfLMszuJ*ik;fK-}B zSblg^jlDRwW5_b8p2_M>krf1wbsnarA4siXs>upN^sf%ptC(&>o?;|jaok%TO%!d$ z(%rl$lSs;7CM3yaQeEP?IsKdS2@r<_34C3?9SelKErOn!F>DfD-K{b~B{SjzZ31iTYdO#Aacv@RGxcq6<6*AfonD(15jpmmX zq4Ba68Dgp&?YNtStF8l;>Vycn{yL;~1U=;D!3d8fL4DzJ1fFobhEoyAY=90lyUsAD zNWXJ9M ze)KEf5SZ`4{sC~o9ki!o2L+)+wk(S{OJ|UC9Bp^@B~1|W-kKF|kUv1YbIu;);O3@= zZl>0Q8%M18iY!f-;4OP-Q6$X6djJp2MsR7%1hk*QaBq2hiydaz9>c~Sx1*N+Kotj){Z5tqd5yw+&%(Q5@iL@Sp9_VK{0RX~}Th4`* zo`Ny+@d7>QD{hHN>+BZe{Q97-E6!p7%26?G5tt7xTgkRoQ1($B#GhZaW#_}CXn&vFySVVFtc7kdnf?w!KjCJy z-&mFLr0Hof70I&Vnivo#DyZq*-eelQk#QTi^!|R-;gSru@uhThxQ4xM&;ywq9VFq^ z8tsJ7`@TTPP!yGI5J+KoS?r@Fir*$}&-KUR5*Nl8UZpY2HMrDN93A|KfQS zb(WiKj-d7ZagBqBC*Z{X1cFb1;#MA{k8A^EaPVqD?+pX|CC(C488VQa6A=-&=M->0 zPo2M=yMaj^#ho$`lj>7k;=@5)b~j{48W=vR{P=OZ-pc@NT9nXHCKloXy#NNRPr?8U z$sW52ub^-B`e9yCrW|c}2^fV)bWHP9PDufeR5(qlK}&!dPYc> z2{e`L|7Ps3wV9J675ni5lAL~gfbPKg%0{3@qN`dp5={kO&ia|Z`xXU*phlHzibDHu z2nn1#hu2^+a!(Idu46Mr!d5<@2p+0x39%T?inTWeScde`1{@pujyl6dD_t#%-QZxf zzs1FAy}QN%OHpkB>=X1tPHbRWjG#feRJSW}UIcu~!^Q2^&Ibm;sGsDL1LYutdvR1V zCuLV~)hMtQmnWYas?y5P&VBg_K8H8_&==c&UbHu#SM#@IRq~gq@?`NROmF4$K%~qu z>ee=uQ{f}orWRqr^!(l`VD4OJ~c_6;uGyQ+}wjp)#iu?P-i42CVooek{*VT|DEf_cLRuzMB`hiuyI}Xu!JKRezVkjeS(Ir1~30KNUVqDpWslfcB=h6 zPI3ahKv5U5kt48<+o{f)a9F9vkN^IAyIo4+GiJ*>uZ8WD zokHuTu3kL|J|R9X&cGt=_ujo@g9{kS?`&X9GP$6dq|dY^Z`-V4@)=n(W-#IXc-<${ zI6Y{H+Dr#~b{UGQWg{N!5}>Kd_wP+=&aV3Gqu0LuSr3W-Vn9ooEUp>4z5O0E#^h_I zg_(Rr7_7RJTYzk<|H~{|0TZ3m$drr_1#_8=Cl$%~#?rBuYS-G<9fb&{FB{y*fmOyE z9T>qm^$M|_$$40ENr}y^h5|sO=X&lGO4OOx|4de&#~CkcJ|WdcxLx8kK!W6|aShN$ z)Dl*2(L_Kv6?XYQJXz01ji?lJvX^q)pzl`KU;9n=A&G6qXv^6klTqFrHdpQA78jX9GzR4J&L_$3FCk0&Y?>UKe zWLWcmoCNjF;#4&wp#~d&tx?Ga>uCu@w5OLDdIuQPPeW*SGJzhM+yi#fI*6=uK>&Xi ztD5WjI>ahJFy-c}@I3agaKBFJr>1!Os&;J`;?!@Ax_fV>o!ni zc_&<0ZSUcmXV<(sDfycFxOmbGG?|Qi7^zTR5ov_-gp^>DY7JlfP;bNDg3t?K^I2$e zjvJl>W)`qSww#qUoa%FKF$QjC{)A!IorKn8!t~I>?O;C=nSmY(vqW+4Ldc zd0WE4NMzxRS}w3MuVP5wOwL@v(g*XnM~F2%?Mni(0JX!s6^mF=4$crlSJl0SN{Hv_;w;R^=*|d z$wtlj^A)}u*_w*_ucYT$@jIs~ zO6+cWlN!?N%ZH~s^iJ~+oSWEmUd{Kf3j{D)X^M3ed0NUf6!eO7uNM0Hn&V%cKOXUz z3J7`cchUp#a@995F-ZKEN*PS6ig|rQ!-L|}Q<7z| zV#lF9uC@%X>BE{eI7&r6XHB=Mj(*if2cmBqjy^5Lg!M>MruqgbZ2#pY=i9Xh`Oc0r z93kqy?A%=0EOXa|>qnrH;S`qQj3lADd0Itzxm3oKU`CF%wSppXw`Jn4nt=F?E1+Kl zP%S|Zu$o(SC0IrR>N74u+L3j>Dw5vHnhfW4fpYY`-jU%hQ89zj4>0E(ECcUI-O&T>?GUbY{}Y7Tf3I)(59e+zP<;Xgm+Wvs(GA34*@`p>NhfA`EqeWD!(AC zPygJ_%_tm4X|;irqR}!N!=I4=1PJ{x2vE0W0(+=!$+w?+7j!|ZBRYOVvNOQoRTtyOk~srGV~6HU&$1WLp`K$# zWbkE|P_{{OXcRc4Q{uTPsv`}zX!ZE4+Vv+F=D%oO4P1PJ>ykitM-ff3l-OKKx&=8T z`O*>R&<4JZl(ZLc`mz6Rk-2 z8odPBAj4`-p&Dd4Fg|FEoU-eRH*p; z=<(x2tk$Eg@ENT5;~Jt6%koegMPAXjH{#=h@@d6Q3-m#o%0)~nd<)I9P3aJu1U9LT z`bi^u_1ZP{r5vUkKp+>5*1qFJOS?_4?{bur-&X7!j*GR0fMpTc!4ai?gVg#Q7=C9%;jWkC90m|l z>Ude&QrP747ijX!=nA*4XZ~!-yH`?qnu{YbW}XX>S9Yn{k~GrgEkczHILdI^z)yV% zBMcQShn4S~In8NubMfLj$Nt%#5NJ}@v73!*D1@-!!$v?~Dx*MItsTcoW zD8c_?2aSz3Z+R2yLJ?#L9n{j3JOG-Mq(#pM2E77&kU)lzzYs1+E}+v9+vz4lpP@11 z&iNEeuseO`Lsi;(O=RJU?|1mt58lSK@83c;j!?r*mdBGlv-I>dP%~PEDR(6!4}xQv zCqG1XnYQc$W5f`9AXhfHQse?!Ff%G-Bd*OLT@)JjcxE6+-v7fjC5!-C(2$ zbNBt3LNSxVbw}HC`<||wgy>~b?)QRMwuIg_FqghVHci>g<(1q?U|z-(R-z=~=i=OO z?ZG~xJ3ha=sLV|E3=gXiE5wg{s2#B=)Eanqwq4noAZAXWfo3%`!mcw`i<9x;ok$B4 zo_ViNlDw(@fqtc7VLJEm0zfv(-FZKpu=9{YEEWaNpN|;{ zYPy~?(EI?3OpzSV>2)g&bTs8DSf$9%_C25%hgMRYY|aR`Y6m&^rpwcr&l9prS;{V% z_(06&<7fN$Fo{J3C(*b$542Z50}(FS8X;98J>uB2Y}20R2q}j19Ck0bLo|=C>Jee7 z;eMWA-O;@vQ*z9@E5|@rL^D5~OI`{-_we8b*C*ey{%vfi8fMigG7Y!Uey`oK{?#ol zs1sff%{nmw#l*ZWwjzm$);8)&Y-}qyh$*$K%8;;UZp4jjnFg6Cy))f?d?LPl35eBU zE#PntAMIW)|LA~EyvkAxx-2PoP)2tx14S?!SeHJW$1j4vH{R<{RvEZ~oj}0P@qa-| zm(ZR;`t%yGr z`RO{sU?7eJcRGjW^kOIv4z}jYX9~)Z+Z=v-SK0%7jXVZ1LZB+VHjvnzgkN&A6O@(! zw`)ZpR`}>q&Nntv-|7D}^tFIrh?q<~)21wQV|xph%&7SA#8qRnA3_pH`lNF<*n{;_ zr}x1-wrnx(`V(2Kslzj10}nK(J%4{%RD@i<3AMv*Ycf(O_~gx+y!IJrl+^Fi3|b)b zR9E!Ej$So@{5j@36Gw!2%x=R^A!537q)TR+oKk=)UWxu(UhHvw_rXS&VdhhuTgKY0 z{QYSZiVmLuFL|~@@7^7&?s$tK!0ALdms5=Nd6{qqth3v_mS&A=@s`lx6E*l|qUW15x zxVr~~k;L1)dwS__JMH0AP(!{EKv>_KJBX0r9tU0`hf3(I{Wke;wExs+j3B#7>lv?s zEbP_X$5AD@ReKw;OQ7o-alqHdrx`Y$vx^h`N@+s|Yv7gPbZ=vaOQIvE#;YT&0^!Wn z*uCNC*u5=KK=@2SI1>wF^-Un0Z#EP>-D z%QcWxC7*LkS_HT4Yln!EclgRnjZsrp<6VBXFX3{b4|6w zYItA)`OR-3#!-Fa+4-*}i>QOvOJA#fsA3ir{(0HL#0gWpAJA8!6YyH1;ijyIAS7t7M>U9Uz}4A9F7r-@$^90ZBf0fXxVD|MPJEkV#Yo`TSSR% z+qFXjHnu^p|NN~$8_p?em6Ve0m7f?e`qA^e)vz&Z{n~sDmCtqJw~zPi3Y1Z#;=9|5B^j)Mk8H z(NJpN6To3dfd@o)Ti}Tds$kw3@Um7gMc!qq*6r@}P5f9)=uZSHQ>6h8`uyWO^HhVzFj5twiWmf^<#P^@POtJ+~GBb}aBd5x&x{lx#xs+-^@LOBu8*&TnwwEFW^`M?B zog-VB`2tVgGrKK;k4#<9J(WiJp6Hm*U%KR!|KukdC)GDHW4!`)j~T{4?RL55r)vk7 z5J_9$Um9R|0#(79=9v*PCLKC7!WMEsg$~0zr#p^8z}+vm5qehF*7}?~imRKLDC+?k zIXO-Foxpptc>&nhkEyJuv=t~;e~5BM^9*8O##lplG?2km+uc)8r?Fm5iFzA)9|`qv zEOj9ta6pJ8v1Kw!NJi%TB(B#Iu*1&emP)`vLOCz5qqleB)^BT;gLg&|dD8yS|Lne9 z>7j!l57mOXS9=zJh|)SDnifu$LluaSh5;d{d4}DF!nX8AB7=R?i?sTzYI~hJW}#Q0 zW%-kMQv_TN%0=tV(ordqlUOnMLct7jATjI&Djn za@d15`4!DpHiLq_Lmip`S=AATBi@czcWRJG7k~`aZHb$eT*%nqT^3#uVOfaJLbwbG zr?CKtPkau>Y-Q#aP#yCrwM0!zga6NC&PxbHOLH!u8hOo;K+WM0GR0FZE>Bfb;m-@a zA4p!gGRT~!8;1E{e!I_g{+C_7m9@2=gcwt?6INBce(erLqxuI(uTT)Tozb&RA%J3U z8C^vUCi9^62(kWy>|iBPkXAiu?!Zn9yJ1Sxp@J_72zWh82WS+ISKE2&v}u{- z{OK=4(1%D*ZM5CFs3o!Dha|nbc(cf+X$yQ`XtZukv$ioiy__e-3y}PaPzDgRadVHe zrliX`EZ(h|OPbwdq!PeS=@Xn(I`3=U3cuE=X**u-Sc_~^iSx{?Bj4;YT1t9bvqDYd zbRU)n&?qnY_Hl-l0%9SNw6Tl;Vh?sgN9%V)dcoS=PuCZcSJ2}RT4xKw)VV|Vhc#{9 z+}OZCAS>UQT0I)W#;}fV^Twf^q$~#X~6p<+pWN>98}(9d!db!rPYb5uymxL?1QUq!$C1b08Q14d?PDG0vdJVrWU=vCdkY z_S4~PIy-%67FB7WK4u>svs-TytWO)(1n?rBT|hg_%XhGfrZ_2EZ6B*#Jziyor!+8C ziw5xKXo8DMfuDZl(@_{RbgSNk6c+dA_jAnf1`Lj8PtsTsB&N<<6z6Ob>|_C1by~fI z>#u2WC=iVVSq!OyDM{km3TbwLUsRK6rKX_axmQ6eucoHvzAPQ2YI+F2M^JGjB90Pn zk%4~@9C>Oxd>%E&|PvzVVvC zK-Pz?XD%=s>S-H3>Q5eV7d##6&tTVNKj_)}p04ZaT6qqPh%;eFRv9?5_X`jW=|~sN zhuh9Tf$ca}PAQPd+1jyn_&vs@5aSpNeVseoZ*tQf-ExmawOR`aDHq!OiX?lKm^KF4`PN)Xt!y@Ol?FxX0m(~YUZBbIO0=c*0 z7R0Xx$1flyN4lo%pIplJ%q#yd{lNbs0|*R|637nhjSiKL_T$^5iskWP1KT~tA{b}J znE}lzi5Uo|^xckyX)AZWEo0UUt$<5Fjp81Bvx<(~_xaUrPmaq;3YqutDF8@wx*;^) zU8;rlM3-&qGV}WK@53&1{a-D>hxU6@c0TT+>e6IdaS zBZR`k$4tSpY+@^-uEF%uY!<)k3Ni8bfEJ_P?_IQD!ImNo2BA??Ls9&47_52=mIaSF zysQy+KupNDN%B&slnt!J$L!8p8JbCVcSdMcF8C@(bOWXlZ)TycL-doPiQ8U|)b)d- z|2U~)w+C`ol>Kir$b=K%DoV5xe8}>qZNDF6P#^e#cS52hyR4jbY0+Lg?OmwreD3WV zlaR^$DbZ7R_9LdFmsJlCmKI=t)qI}7_0iGMb|ZFjNI|O<5RyZyFXpXKiguF+AU4%J z38CM3af#wxZ_tlqF-gQyL!llDXyC-8c=5lPZ5BEj`a5>S_?M}cw0)z4()f+(`$;kS z+6onykCMNJe6DM zRg>^LQGf#i#2vbO6WFPTr)Mazcx*zO)%j_&Qfs(g{y)hkF~)?gXNL$~!jEDd$?^z8 z`E~CJ=4v76fiqU_!!M9U6;cn7@^dq1>a<360G!2CSI&F2{8z|G#T z>)KR#G@~V$StP59%{}n#4Z|f_`A{U$6DDLWU52^~53bW{G0_UOwD~1x{g@LR@2#a| zaSy)lrfK5tZ@)7m(YIGXJjVi|vG$mFl%GPSv0XY-v0oA(QHWYH|M%eW)YA-~p|JP|2&&T5$C8co_`M>JKo_}T)i2z%DriTMpYX49PqP~(i;gpU}v}0K7TQuMd2m{W(p(r-qm|sqh zytMXhRa#>#V6k>JrzrBjbqZ}nbLgjB{_;l~X4QqDtPx1wn9Kv4Q8NLeXfIRmNCU)!2Uv5sSMmEAKwF;;~^dh78>jTIECx5r* zR5?!2-ZAN;S?FlS;R+xU*QKxUy( zSC>z^QIGMLnSSplCpGINWDp7ZhbBU07&$(6o~_=(>yWj7+ixLwas-b}#wEIIWL&Fu z3>iGwoxOmshY?R89O_AgGakOaKN`>Ix9ecBYuBzN9BtfjizZ;5wyRcMv5N@6h01C| z?c?YuJqe;w9SBkLI`Fp0^Jp65%2&YgN9RqmyG5@b`G-Nx^y!XbejB#0OsPJ{Yp_JYRzjzv72JG&^$a*o4Lau9t*gcn?|h%o??nB;g!91bX>9Qn&-$AhYg1OlI>5uJuCDF4{6bha_bJlswAGQWD&)88kx#x>zsETCQfK>=d zN9_>)1}vigm)wF0ZKi3}G(h(YZ<;+D? zbh^yDSN`qp&70{oRb6N5u0GvVZJD26{LULBZ%32YG<3x3)z`wTp2FSJS_!9L!tF)& z+96>AqH|(wELU%1`s58LwgOUOCxfl0^;d|=yig>$x_rMtAMypnTUipsQO9XypjL}5 zfb$Q4Kz%eFzeu*mOFz?d`o)+Pv*sSKS80#e#GLh!(cBe;Wbq$n0j~E@VUma{!Rg=;(V~qg5*(+Zi-W+b2&P0N}0!3Zo(+Fi1;kPRU&*3jdy_np#Q8FJ>r$ zSlzJ=I_LA*rfy?XR0wz^)g9<-Vr5!@MD4GEC?W~qF1BYhRT^!rpEAw*R7o6A8#CotSTKTOAW0hdWKH#PYHJ1VXIW^V{5Q^EyM zN!f!r=K@?M&#Q62ep`2q=ZQo1X5LI5cJf6JraoYGBH+vyUB;L)TTPdFMC=|Iv%$1S z=sfnCT+L40#{22@@VJYrgD?wJM&dl?oudm?d><{AhYE1jMKcxc%Y^Xa0Pt-;?VL|b zsM+jqTlegV0i7G&vy^7rXKDTIK56NQ^h6%|G|_2R_9N74T)mp>Gr;-*X$Kt}6gjBZ zT_aP|rjmj_So_bVZ93R}-j_6vHcrF_L%(q~-71XkF0LufQGB0H8;^SU^9yM+!N$yj z_hBf{-SpeND~FlE#Uwn0^C^PY?`8CqnI`kZK$e5HBRWo^Dt2459 z#?y;w_atYfGl!Y?5`Vf@&(j~L--{FPtGNBkdI{JC}~Q&)bAG!3c0iE4&d)PU4&hb0k06U7@JOWB2f<3 zh>RX!r{@~BK0Eq`6r*2i-c+Nenp^5e(E6e;jFGmO+i%aIS_o-lDD()WEWiXllWXAR z1JxYEarWy6y@b%yq37s#k~CbDWRr%daq{2Nor)x@SqPnE3064G_S{B2)~2)^VLtEW z4zk*w)m5=7Wdz%uLskI4GCU=8A}qP3D498EL+;RRuzbt5;vL{n*PcC1%KAc+puSt5 zOZ2M`X=q7y)@;X-t-V&=vftajPvxb;4bgY5D9t&DKo-19O?pQuorO%?(_2=9iQrYb z08?I1VRrOxO0oz$u3g`W1c!a1!)0$3r93p5(#a$ptbdKFEnyeLR#-kK@uwrzZ2usBVkC*d^*YiEuwZ&fTlVmuo74!@i%$jNxj`-kq! z+qS8aPL=3-AeoHQ+ip`>Oq>M)3Pgkvy444po)ffuqv(g(6+5c2tg7l>t2ew0#z-TP z*?gIms4z(1g~%9bQf9_$rg^kM*|e}zZRRgrh<3PX+T*R=Cj8oDd61K3-M@f`ME@^R zVL?H2jEp2|R}WODI0-qpRzLFlGr zEm9NX?S_$-Ur;F}Bc?nX#|;+cLc$kW%fO&kE}ke%`YTUzE=m~HXlF{OSiQe3r^yBw-EsuOaSzJdjnT2WJ}*la_jl{kMX0*XGdui8Bv{>1~-bI*9{Ydj34t~ z1GmcutG^kn+@#>-#NtmI`XT{+R*MNfkoey@Jpwmf?gBR| zJ2y?oqiv>*Q{Go99q5Yp zAOA4W!uw^ng24ID4p&)QyZi^I?y2#S$q}KQa2EHJLlnVK=uHND$&WwV-7hxuz%;T} zCGo05D-De-bYqbTBG*8pbWTHEJp&qOt_F#KO8-LSqY4i@bLQj!ua^NVVwTcqRG9V! zoA>O=e|{S{DMFSarKCv6PG~-_jO@edm|V)1G*n{qjd#;I(@+%{L{m%kd^{sN)o0{hMbfmF>AdDNkiOB`OwdC2&>DqB$-RB#>trh$F z;e6zUQ*UXgsio6F$kvpB)GKd!^zK^XVdAaTbmKpe5SEscWYvlxRb(#fbW7awfBZnB zY;su3c8jsn3(|Q67I9()qcYs&!0ou6R<2x`6z>6i(X~&XIsPKVFs;4xrlP`=b5X(s z9v;LtNHk#ll$#YASvkh6Qy``}%BP z*1Hgtnv|uUZ*#Jm)~{O^5dFC2UO9lp~cxw7iqn#=eXYrYm*+oS_^`7kRm(7rp`z-I9;BJdoQx!}}e& zcC7;s^5HQg1bE$yGYq&%BM$)Pp6SWD5xSIEZzqzDBHPAy?P6xO^w@WJp&Ewb(*0|H z9Sq_XX(9UZl>@OmvFoB+yehVX2gmO0>aRfjY@dV_w0JjmCVFUd({r4 z?^kCSOA!^a=3eyX08iiLYqhH>En5X z?$Ba4`TSSVo~;9E&3-nG)*?!^v|;rHv4>!M3A%LPfD-(VYNYK^gHU|Qi$$5Y-8Kz% z640)>ep`*E*#m^2r!H<)mam1*%z?MC?6$!C0M=I3gQ4R%`M&5fr1@%MHt!AJW{xT8 z#UTU)AVro5eSiO5(t8$81Bv02#3WNJL|iEvc3;je$D31V2w071;xeh0y>P|fdEQsz zSuA%zqk8P`&g{fu24Rop_ep5Pqwl(7dsiH z!|25V0Dvs|lyi{eO`InJX(W+*bTWD<e)A zI+_7Wtn+je`o^?NfIAtyE9SraO#!AgK) z@j_260C1Glbi6jI4?OkKR`wBz1kOwCNb!hJs1$#`JK#Y?s=aIigmV_{=eWnm9 zKpsHeT~iYikwq%9K5(90&I&@KKn)Lxj@Ec{0Z&YG+E?S1y1Os4Ka2+trP{n%yo6rEq?P}(FHeM}Ajj|~Z&1p|oFhIfvCYKVh^IXw*QTh^@qXTnEG4W&pjk715 zGI35LKYV35iOKdBY7x`TzcFQuq@6plrCrOeMwMzu9Rh;7n3}!sbR{^hlAv3fc<7Pz>c7RqDRfXW0O74kzS z{qLm8h#|{0kTXu=HqXPbjn;f9S)`JbxwaizPwjd-{by{T>XNK18>6c%`u5cZ`1f`N2QmlMG8-+u+Qf<8OLV5KX`=!1?MKfszZ9-t zh!EMh;GO&V^XFvh^uBK>%?mCqo~7b|s^|MVey@x{OEfc0p3+PZd*P7YHSpxzGYRZT zkR3oiLg7$m{=2Y~@e!nRq0|fLNi94wRaNvkU_^@gZKS=uL?V2{r@zA4c2>&D5Zc62 zu-;Ref}( z`j?UPURAX~9>BcNvsot&&;j$ukg$XzFd!fFJzfxr-Mc8zy=drk==6M5=6L^z7^R7pg+u+cj)Q zFk@qn{rHYNVrU;^-;c!tE*D?o&1xVo8#MK6F455tRIO&oi(L&@`r?DIPV`}lr!QpV z)#h=&RoXwBdUmd*`L75!7jBg1?byi3bUJ~^hjQ3LI(b5lX%^Y~_R^HDkLC78 zTDu_Dk+G}03KYi6ZW+WYEU?4n=j5gUj0O&nSb63>$dI>zIR^7G+}N_{1x+4k$oV{S z-n?#8*GYy0VniEOGj#-jJXG1pE9t;*HQ82a*_iskmj&HF&OkW;PTc?j*8f2Z{9l|O zZ-tN}1M5*Ev_qco`Uo$K9d4)njzllP`UR-4H-RC?t9~XI9JQ~v0d@g*?BUd49}i%< z{~}lZsbQ}@_2Z@%;av%bj%1s4LRdC*1QXA-OneiV;p9YWri9A{g=Ru@8`3n{qcP^q zG$0S&Fp0`6|B5u6;H=H7(D(3ZyH8;u^h$He`Ofkv&8ezK!Q&)|*JdOY9G=7bes;Ql zyed4r5rzd+paDCX1c-&29jvRLYCVfan?WYI<5HGnv)U7NIixa^lUnQjUISc_J$6i}^}K^hW(2KxPU>^ zs$X?Xr$LiEU2Jh4LSL$%f>{)87lY7*7KS_RYu+&Dv;266-t=X{^Hgm^$441doleSt zEN{vv{+9oOaj@XUi&(H%#rySPVFoCYYrn99<8?*F=zc$~@iSN1A@vNIu zmzKBV02CnRU!B6_I`_)+4&uH!<`&}-`clpC!4sj5{wEkgHzaI-}cTD+xh+= zP&`6yO3*lsnz$7Aq^Zt$I3l(*%}6{y-`6*P^&9qd@rsZB9n3n3wV?<=9#n{MioGu? zvq%-OX4!}DWr`dpbsIaD#I#^oNck6(N-FJ^3~D5Px~SpUnss>w&rjuq>4|5EG{lm# zO>0IvFQ|kDM6o&|cdYsNL|b78aTlMB}C_$3uXw@0aBj21rcV%>}RncG}Wbd3;X2zlBNuD- zADsE8MnfHx(%CxS6sH7aGE=l=@nc{nLZC8NKVj*xs9}e@IeX~infP@KdcyLNpagZ zB2uA6S&eELo*hR{lKJQ$M=9f{{T^Yg*Mc7)rthN7@f0C#z_^*}atMr?GTPo1>r4&s zXt=^{zJ}L18TWztmQEmSIO4EiM3M^MT>JZVa>DUbr*h5^^0no6&P;+MI3|!P6Qx_1 zF+J`SfBW)(D^EiYW1gFQx&<|RkG=ioOHo?;KCv>5wrYY#%?cwGmAFycT2OVvW0E5Qt{l>eP#jKINbi-udC4=h3y(jsV`4bGQ5Zm=iel!&MF&NGncVHJ=y8n#& zra3iZ<2RJ1eW|Fi+*5Jb)oFC%eb?g#<4@}pbU0|%syoY9;*7N&tP&=U9@YJXZJ6=| zdt&i-EVxO6(Ph`b9; zsdEUEmlLg*F;`=(zF+?)?EkqFr%ZWt2bFmIwQDicggVsF(2)OArWm3qx50QoijH}* zXF-Uz2o2VqV~#JHunQ%mtMaNv$0ya$9E9!%;1HJan$L`V8jz*+4WKm@ACIXInA^kx z(r0+eMjjXp8i5`sw;nh^ExzKn$cB~6mwS{5X#i%t$&Fv>?m)s;E|Vq{hZg0Y@X$!u zo!zc4c+^D}?bMj|?AZz~aY=7xg;H=bU47QXAuNZ9{tN~~smRa1V^U~$jnr!hVO7%a z?v{45d-%^Fl*z8fn4#1yGP*VQp`v9wt^|MW9zy#8KYqBA5{j9HK~~rrC(krUg9Gz? z`Gkizb=|a$caL{RhDSd9TA13YPhm)hAmu3g1#Fde+Y-wp&yK`xmZt6+bp?SjE(OIn_IADi;;Z_4T&tfeMiR+yrg`yi%IaNFNA6>hL| z-p4S=as1+yd@+fcDSpTZcqgat$%&#VUXT-*=scqsE^#^to(|B-kOfm@=B0(qRv64bs6PYmhtk9}(F&pl zEEOluHP?JT45LzUIi?j`$OxKKm2fvZ%W&6E>0&7RAKL9Fs)@1xw3(KZ90n{KukjMP z%JX0#R=S0&mt~F*7SY>h^k_A%RPmv6WC}QSFH6NafymWrA058fd?_yB|12C$c@TB@ zu(KcdiyvJBGA(6pmA58VSF2>Ib2?Y}vGjoNrVtReqad<4(Y|9xS$cM1+e$Y5;4bR( zj(_mCN;Q|VOv423ywtZhb-UsP4j$dQ2k@5%Pa@zop)1F^MD~lHavCMSB z8VD07miA=?Lg=bouXAwn9A}5uO3n1inSw)e(6y9g+$UU?y|GkMT5b)r z16 zEDmqh&N%Yj>(?zQa{VVmPGU{W7$+B)KdMG>IsZ+rdkGzw5_T!pyZx*EoL8U(1CmEg z+;)W}waAmb*WiGr58`520MZtKtk&|q`Da&!REFA>_-7Uch^>z-nTk%G0{yDxQg82I zrU`>?ef!tp#27Mw)qXlX{K6u>F&8VXput=G@i`+G)QuJ)z#Q2imS0^!VZ}l&;0@p_ z!l^x*h$k7mgRfsn`tVgVLFA*=7;20_7XSVfkP+?ig4wdvd-psMBjB<&X!RR=4;VH< z9EVRa=V#uylX;%XKsL(=$FpxBNq!wpz;6E#0{~B3(xqP8N*MrAZg=#p#}FJt9q5&UHx`9%*wfCpZ*PId1K?(J=xPJLuC1uA*Mo^bn`LifNHy=L=aWd(;>jm zVzRa28DOZ$ACyKwVMFllJXP$q#rbH>qN{MKKq<_Q=u<(8xyO@C$*gn&3gj>YMoU?$ zNLxA#`FG+2LD`S1B830V=b~NB{^|)CnNn3_L!is7!(CwsFtTy-EvT!nH$5s1FU%{S zG@v;TA0HLcleQAbH}G(hedp`c@QWNksS!WeEN}pBkzV8m1I!50l|V9Q*69rm;*}<& zqUSwWnw3J_1&aIX!ad_5dqyFY6hB?mSy5NjG&Hif!VMkiDaG#b%V(l%1FXwYVeFwD zzOJ#sviT)GJ0Gz22p6F9w-&NvL*6uti^G~Q7Tqy+{hr;srOp|obi4;0z)M^0ebW^K z*SskTZDPnd5*)|_FgYSUaG1-0GD1}+X4{Tl(C^)^`*t(D1zKVlF~CBK`3n~+Pv|W< zW$!ze+Ht(ja_p-_2p9^!5R6d9WNg9Z(MjFnu8#C`p{PV2n0#U{AS-d)pzi#PYue&s z7ud3H5{z&4B<`YAQ#(&{Vc# z3)90_X25l@hg3kS82LFKF+P{!WQNY9TXxSn;>bbh*fO8+&Aq+%*$jXmXSMl$_)ZE5 zeO5mI5L|jUCG=fR+zKcG3m=QqI#*w7uiD&Beq+(u5BPeTdxflIq$GkKJz%+q5;LPL z+iK+3n6v}98?}6fySr0)@1L7#&nS3y5?GV`Cs+gX%N!?;M3tV2_f~59ANwn*2*@ULkKDmAk`;%@Zd)?c2ZD z*X@DLL$M`Ai{h`8Qc&hxd(XoipTN8DMAl8syh%);7>uXfushWAugy?>7H>BB>m09$ zxYT$A{O@)_!7hb5DkI-J{&mP=jmONKK0TaXc}*UJ%fAhGL`FKmeqk_j`@^q*3fl4N z4(au5|5#Y((D{PGAdufz>vFa>{m?@LBm2m=&YC@YC$Fk*6AV8^fp}V%p~vQ4r%o{* z++|RYXE)aV!V6VY_!&t%GT~&8`j@oE-rui~iqolOC%rHnuxUGbG*9{m^AC*x@wH4VbW5LO`wDB5H4O8f+k^6|Z1`DrOKIDqn;X}1AMitq z&e#kW92r*5Uu>dGsx78!IMPeWVmSB+or%7wp04i7l6_aZ!rm5p#+XOg!SF{deothK z37S$O+mE4vKRUU8-@fCQF712$o`O$I>S~-2pwKHBSW|Kdvdf?~D(CO9EShPyjf8>W zXh=`Ui9%c&HW%>#x30PeV+rx29<6gJCOSGlFZ^OB{{kMt7@dskSFff+prrhKwO%*^ zj64H7ZlY|-&EyHrVStnpAMY6H8PzFl0MFx|{`sjFl{rfq7V?vZGropD5ea<5$+b#9 z3=T~plQRJ)dwn3vqJ`nQ+tL5 zX~ucHF{lLL40-En>EMxh2QSD0;E;Y?6GvbzsQ$u+jlZ9lH~;%_CY!e*5WyYvdTrl6 zvMl~Gg}-*O%EB4??Fz0qTMQFA6$G#Gn<12lq19(Zcq@Gq6%5cyXMsH4^95|lVB+8M zGzGXmvXAf`!b&Ig+)YI0j)7vL3m|L~jO+_XnTE4A=cO(C$b@K0@1U9pz9%%A#LWQL zSw?W0w0KDk%!1c1V4rvhpqXP(T+rQ`GYJfJo;(-4mg4)^Y4A(4@tKN%Me z;d4k2XVPW-%U(wp{PX5dVXi)TEK?3jL2l|-O@}VB4vH+SnkOzKB6NDrX^{s;7iHxu zB(kfQxSf^6D_Ih0$k@Fff!4&DUB-NZbw#vg1qOWD7AT)n zH!}AY(~EgM2xKCbpBe~9fTpIfH2UB{R!vQdd(-gcuW6n|=aO!Ceh`a+n3V<7Cwg@d zp&AptQO*q;FhzSP2s<5tK~&h~iJpd!&rA(KGWMvWOgUvGQ-sw12?s!(aU1!Ss$n+Vo^^bP@J4Z5aJu;7}K*l)mZfV-7arb0z zVmsN=($r8|k7SS640(N)EIDx#Q0=y`i7zui4Q{axoKrKKoplRbJN^wBS<^$+Y#`uUaZ$eFS&>%?IWM*Ei$w;{%_aQfBJdUQBwJ_C-za z!ZkvkG49eBFx0=LO!t~LJM}=0qgtVTu8m$Z8`7q1laIXs^&8)?sE8!WATjw_y=L|5 zY#LFgeth?tihD~*E*$;2Yu-Q=!r^j{!Y1qw(BJMitA(st`Mnp=H?BPITnmqnXvx4- z%k@olwe7TN2iKHf>H_U!@A+%juZvB2Gs%0hW{Cc~kDC?yVmJW1HA*O;XhhfP@s(Q( zM4MhV{W#ENMoCFKcw_Y~BoFdZ)KAc?+^u_$2e~6;4$6HeC6%~b((P;6pCM7a;u7Bi zitf1b!Su-BwuK$(+EnRHO-{p>8T0WjABh^m59U|%6;M~{zbzEH;n~ON0r!{x2ORKU zjG($n(e({Xy0iBgv@$5}QlM;#YJ`r*`7T(4=D$nQfFJHcQ;WXd4!Wl_8n-t9mLLSSd(763LyUv{IZcZ3ghB7;buXtEb<`3Ft^yHR+>Dw<^JBYUEco=yg+{V+8++P zIEtT@Lu zXd4Zs?hRs*4SO#i{{7-b0A=FBOh~wbO1$U~QV-T!7|m$U6Oxnl5Lpq#kNm#I1+4`0 zHA3mU4ez-X4IJ?SGG%E&Fiia58ge42y!6jM+rp!InepiH<6u6Y;R{fD8PqH%d+fj# zxnQcqI-vecAOw9cv4HCN2xL{J-K_X@OB)Q?mO;(<+kcjngi$RGs)BbA2bAG_0_Vm` z!lHFCuhqhcNnN5yNhK3sq3GfXWI0e4xs)fB((6M%G3Sh3y|8(m0H>)R<%FN5M>+kvUBD z{dw+|;YZ@0yV-tZCor7jm>$-2OYzM-hYipR&^3+2?7P7dd47@wu97Ok*t@B9aFy0O z#bzR!3?Skl2T3am~%MYqM&(7%v%l&gv}xaG2{Q@oWvccJo! zb*xlKFcTwD+w)1ME0NWG*CmvSyeFW9_f7qyRfXr3xDqz5Cr>ObyY=9UPR|ni!AD6_ z$N6zeZvWv!R1Ri%A5Y!c>||6x$Xc1cGH4J>l@Dc2vt7I2vR_z4i7jo)gEOFWT+?ok zMmsy()5-tX#j*f-#rX+zb)-gqu4R%;zGQE?u`G78V{gIks{>cPDyIb*GMl~;i>-v2 zjNSdfw$!7+j4W5bnOOP={*z?=&r29b2ok{ac5;PAqsNg{I$haXE&VcL0I@-t{x3=5 z>n&Po3Q#8-7`XZrp&t3h2_|uovxtYVCT*=M$rj-K>AVAd_doE)p|sEHPeP~mP0)q& zeIQ{Rd6Eu%YblNnha@bPKy*ETD%I&>+w2FJYDtM=?X=+VUQ`YzN+wD{qynVau(<}_9W)Qz2 zn70i(L4BH4Fv}88A*(35F?Pnr&FK!?gS5soEY@7l3zB-H?@UksNS|jy3*tDp`vaDW z9FL;ch`BP+f!}$2|GpZ{;EZx&R0hk)8MV&)TT@3zi5@$yfeM(Ge80^+SW~t2^xD-T*J{IJ&31m#G=9Ub|LjF7_eNwvH=*C0$dei~yOBiJ%cMva;IU zLRh=Dkk7;Y>@THy{F{sU3zbKc`2>;4`2Jt(j$y3Cv7UT>)&uqHz_s`e7{fsEhBnj| z^m~u~r?o#D*Utj>R2M16uHU(Qa3M^)>>{79>>VE;zv4|%w{vfqVThyJ9o9=}P@4Mg zMG7QlwudysjR)Xb{3Zl)$^B4ogiiK$cK-G$tk;^nP>;V(V8Kmo~Q=E|qw%>lC@@0=|3aQmezjb9 zx){Le!7&lgn3&2W_n*($pYWWUd7p+!7AGx*m1)Ep;bu$lS(lxzz_k`-ZOV-fYK5;2OF3FDLI$@CGOS`dAI>AbpL{u8wvd_9kh89ln3 z4UwFi&|dw3BsCycyvZ`&f$_c_Ij!Wcrl$u8aH4dcUnP<~lB2sb1Kj}DbSYLtZ;GXl zEII~g^MaxqGiSZ#)hxw`x764Gx=qDK% z{0|qPllHXxbbnSMe-#yF0hf|`?_c7ucR{K+N?Emi536oe3&KgNK9HR?Q0s3PD3$!G zs1V4<&EKHynbcElzp{23j(}ZRBk+w_=5Y!yw}Y(ry_I zquHl+uryk+e7Oyz=(=v_WAzBmLYGO6Y=yCbU z$l-)cz+E`R*vwr#EH1#nko}RJy97Zk+S3+uIvYSIv&o#r?Gywi{p8XF2k&{Mch+#r z&RdiR5Bw+Tw)8Jze}X7B?1v>MiG?niCBa z zMS_Jde+JCBlS6O)LKs#~)_QplA9lUU7FI(!bdKw&>O~ULT*0z##Ek8fMIT6ZXVK!?{|Uzw_V9A^0t*Zrdg#=`{OZEsDx zJGNL35A6=5&~??>>B`A^B)wvcsGEL>o7&Q)F0o$lr&*yq2l0T$1mXGe{T`p}uB8nuW zeu_^7S_i$+4rs|NGpB(XP^1~Aw;v<5rnk4!Y_lyT$CaY7Bb}L-24JMAf)}HJu1=w^ zpD~&eiNp=}Nt1%;7{>VaySltWvHi}+j~|OC%aC{(n`4R8{5wezNx5s7N#bLba`CB` z;P?q=!_nnmZyedkCt7mDOi_C_p|n51`t4)k6K|ZFv6YG9;OQug_d2*`J{h*IRzbq- zr7biWT;51Hgo%ELBO~|{`Awog2dqdKx{1^Cw3A@2l!ctGIetbeZFg<6f)Pp27~`-W zzSx$ZjrZQ|i5eby6Cq?5AT)&Top)8@@!iGaJHln3DGWaco^n*$h%wTLlP*WrZ6EAg zr9iQ(TNY}4UB!s;Q-rh+-eJ9ZD_S6A_4ga=S4~AR`Q(nBIb{DDz-{9ZOS(XVVLAz9PJ&-rxk0g}o=+^NMZqkmCsa6cOQ-)D_AI zKYGU(-6I{90?hn%Z8dK~y625(q~6-{qq}Vf#J@6N$e=u45G*SRYHG%9FV4uY^_;w! zPoIBwdVl{3TsDEY)~sdAfDk>q&umydr#XXuJU%l0`Y?u;IjiO|Xe+D>P-J^dr(p&( z-hyj<{tH7QC|)nT!{#@68j?pG%MF-uQK=jrw)x=algQpPTZeCJ>;Do@8wZl*(IM;Y zhWAKxRkagk5fF#Qt85RY6?eMAsQ}(R_lGE7!CbZXhyt8bGU@>NNNmIOopmzLxhKc{ ze<=k;u=MgKTPN$f+{GFb*{%#q-41Y*bj*NP+S3U*N;~J$W{G#!Z8UhQA~|B=(9%1E zk#6R_yLZpzIGmpQoT{PUvQp={B18RcYHB{6G5#hax)-A~y*U3tTs(49JLq7?*3++w zy<6`ksMx7STw2~FCCJy`{+Wc1$NI}ehJWqo1NNCk;3Tx`swNTp4ls<$1VLA6%O-?_ z6f|elpc;?p(mc_PJx8qx3>{1P8^$DjV(1~-%#ME(_+KFV%jUPMPuc%6WtZA;8a0$C zj!!ayDMW;5c@fG9aapA!t#0h^zyC4Ohj2Y-{`@;>B+`HF-CJIP`2=`3--5ZBKfZ@f z)@^>)y=bZ45n4qc(XOr!xvip7(hep(Mki zJsLzqbQ?;zoQ~KlWE%7%r`O(Np#}xvqwRdf?MNXa=fO!Ic;zY000qWAeLIPW3&N>B zVUVIOdFXwVzMct|Y9PfLS|9~VkaLq(ufAm8{#UJcA;++y(4t}Yz2*7&icgxjGKegr z3;YTED;{-I8Tt{yh796hbh)a{Wh@KnN6d|(St<2{>qS58IXxDQw+F{d=?if1cHoG4 zeZ3cZQ(=$)~auh(d$Wg7#@BjpzGV5S}`K>I+lDx0tzZ z5;~#Lioi?7ir6T(<$PuGhE>mrvzQEQ_u(Oie|>OuF$Y+b6K2R?HS&fuRHsJWahV3twdyj9WrXCIoWLD?E?ifJAdnqr84#54{ z)@#wvQkJP|BEyW}pt<$<_T&57#&7?q1EhOBjU_mX$&lZ<Wq(B_Ut*TbVk_AKP9W-I1P>P0Luge!A2n`y6U!q&7eIy7c|_;Ml3ctJA#Mi9PuvA9dF z6}~CmkAd|A!W)_O?!mr`8v5(O0A*D2Z9~D&@j53|pQq)2RPT1MHik$r0v1@`9O;-D zdu-H6Lt+9zpVAiET^5i0>vxI`!8X{F6Ea1H?HGe@1_ zf9&R1+K#Y(q}OJ9R>e=6A=%9~mX_@)v*>Y=*(i!w`r%??MN}mB7U(}MDXD}D0w?j6 zI)7dr+_SNX-6qL{L@)u4jW6lRQd@u;hA&>3^vmKr`~u81j6+|S6dvO4BUQYNp8cOu zcRx#A?-sXyJ-0=&*Zc}96nvpxmO+pZ-|kD@0cbNhbE=Dln9sft-L{59H%!b-e_T}L zRCPA%<>Ez)UX%duQ4a67y#pRQz+^MkI<;dKAhnq(cIg@5A@|OZ`m}kg`#nZelp0ja zT5L|8k_$6n8Jc~WJhAF1Fx(EuBn`VS*SGxG;Id5eis0;jy{2-P27}RT-)wa8LS8oL zkddk0#hkfhyB-BGve8N;C6Az?g-!Z>vgN5KEP5bGc8xGaGM>AY1_nJY4t8r0K=G9!}wxS zNzXJEEiXyz_=yui2Pcd-@18EJigw``Y_k@V1G?0G4=1|1`ceCpB++%Uif83Aew3(f zLhpRWr>&@ZIJ>^Nkhkd}e7a^EDO#>R19&Su3J)+vp^fog>K4rb}z=I zf0w;uu>hCx^zVN`)(8LUZ2cRPsgxU(>0g?vZbC|Q`PDqJJ%uR50(B|=1tI~+UTKJ) z#}DR+zjCvF4vC4WsWa>Xx^I}R)|7&H3%}dC#^-5Ue+4-;ahi!65(vdM0J={aa0rrL zUj@`9-H5Z?QQLy3nZ@sM8qLVc3T!6pnSl5P#xVTw)^>X*o)e*QOitka()(6EX+Zif z%p7X4U6a6(tPt?VJVC25f9Fyt5<)u+P;O;>N+)3XmYTW9zU|Vxs#&&p+Y>aO6!B=X zXo%N@*=$0=C0B3M9>xK4_=Rap_-FzxD1T3DGXxu$Im)(01B598WM03BSz~e9*5S7A z!75i^Lh<1_z9Q1pgCE+&U6h(Lrj;nx`-uIY_{BrAdOHq2n5^epBg7to!94n5#V@N% z{*EUAto}`CczgE9T%ze%s0%E%Ax?3M&I20Ag<2M0;7)-K6n{IOUZalD$%6ED7x%( z#Wb;3$mOAFCw-XD-Ca9?uTHGLGg$}E3nBPUeaIo4oHpymn#lC?wA7WJVAS|3AKN+t zRDAQd`ihNsy31Z4U+cJ=p;wDQzu3rJk@hQOC4&>p#*S0k^6ZsXF_p;@*kKxAN?lcf zN$RGryMs)mlE=V^tG~9LQ&>!K+1Ylf5+w5J6 zbq<_2Yhz;v=y?w&1~4(C?7gcvtRT)#sV5j>0J|0Ay?6D~zGH!~8y>q?h)_$#>*gzY zwt{!=92q)!sAntAQm{BoF_DXS(?Y+kh6_&;P@(7e>aQ4c*2Mt-4;Xae`t^ev7u>s2 zK~PE58Uo*jQRV6S(Z<`s(cb=^B83#~al=^D6Vp;u`1Nar5VRkNCo~VE-kgvyZ0&~` zI7Z1@XzLtW#?-X9i~d}_Ru|wyABv%Tv;&SUpXmGp2Z)n1 zkaz8{`MLVg{G&RtIZ8HK+*9c&p5Ha4ooi*~umCdKlR%!a!Kf;^)FZ$hz89a<%3ZKa znbK~N-Ow})RaJs?6%|cXRzl$mA+GDy?wAj$=w=_LE>P0&yqX1zZow)XEY`cQ%TppN z072sHp62Gl5j+>J@7DdpLW)#2w-~sq-sth$31Vd=du&28+HrnnFbmWjvi_re+=;_x z?{wZhmDv{PRpNDM&mjypN(#URLj5{XrDJKKq(t@=>d=u7{D-Dt*LHLm=+9_nPp*va z+ydhY8+fedj%`QurmP<6F%c#`^}_s0^4xNUhb2xVecY7Cw_5`y4k|pRA&gmXNwthP z9zEJq9?wI9C$tUxI()*+88ax?>{~vE-397cpseV)WFUqn8qdDhR|kNQ*L|Wi zc=F06BJaoM!U7t)d{j_?63s-6%SBXFebocZqaO|YZm*)=}%-j)zJti#` zHz^&vkR;MI*oWJn72tQuT~fBTS$l(9&Bsj{&R|Mh{g*7IPT~4$wr~H3P7`lU_M?@E z{lPnwMH>flXaxa|(r$53vBzR=Fsa}CprP!2L$7V)Of!JHNUp9Z&k1q3vM>6-(uDt| zSE$ET7sI5XN6?kDr8iWWhN(~xUyTwsdF!8yGCKPPL;bS>QYjub58RAJlCKnLW9I-l zm{+N&s@gK=(OXCJi;(lA#Z738G-gUl#PP$;duSxZUU7Kyl=YlJQ$e=VsUxK6EBuVS z*^d*~YLko~Iu6NEZ$N_+dgpxHlthahY7Ym7e|5P^5)J~7%p|8!L^l2qT_udP&6!B?HQ8Rf{Pnwcxx;2x+R+!uZqjL{maWHz z6bR>s`GUm~2zD()cWp+jCZ;qlUIY}X?=DrE8*}Ks*xs287(Hp7=>4$MFhL>jU6oCK;nFr56zbM46`SCvCAt6T5{4@nQkKhH5JTS&F z#)17b72Nze6kpEW1azf|((r@c>&p7Xo7bov37}5~0I$9zb!L#urnNdSed!ic5r^nN2}m;>V*VK;mW#!jR62NsP|*rc^s z_5BMHoVde}UP~D!ThniTrG^s)X<}2*;?a#y0lzARJMfkIKqQ2MpKRD#@YB&JWcJMs zA~(L@fud8K)Jq?5T1_l0qDt-(v0xAv4ucAQoUrdahM;_45e2s>_nqp)+eTKlu*DCq z|7FL<&5dU%Q@&4gIcP2TfcN#;DP6R!dQh#1NvHP=KnKiHUX;o_MaTy|*9-rw?AF-K zN-m$Uyj}l<9Ee|ECFQO5^`f$ej{#7`871i_ravObUEK~CKzzGK>ACi>~Co~r(&)cCqxy?HUcISee#5@$&` zCdd%K#H;?g5RyWr=G@g4Rt=NKHwMCIgRW%dMA0pqbxhhqmq4hj!_bfYQ*pXl zbCz&F(Vq@+;>z)WO;toT{QJu6DIjXwKGK5M;pOS*j{I5n+cfjUfRtv*ClnGX8GFzGNoI50tGDB+>=L;7F$t+u;4lOzbA*mf+67!$^Y zUt4n~X;ZsAJqDeYhKmt~|M-LS6mVwLNu6Te0w{xsDY4@!-ps#DB>Hm^TyfJe8@G^2x`Jd+hyrL#qL-3IHlKja04cn}Z9qyf<^+vKCah&vrS1(fUn$pz~ZhKIj%%a$!gnX=>5_3P~@t7f#M z5~O%3Sf@65l>Tr?inQ^;d22z~g|?G+jLQW9`bj+Ub3IyU^6dQN0P=$DbKfXHwC)*v zoRx&XeTxF3CD0v&^4;^a1FQmfh@?$$%dax(5wJ(oJZxy7@n7edIW z)nTmDWo~`yW$AwwX62};ne^JeOrT3((3gF}<7_e(lAhuy)i>Ni4&aH=n1F&AvfHBH zk`nvpf}sz*wusNp(wz8VX?SyR2Jdnp-1GYgN1a8BUlN#EqU_aJiIzP{+!yyNMu&Nu zGld{yinFDKX_$VCa#@zg$8mQzZg)ui>~w%TkRlh?9$}64Q9Tm-tIig4y+NfUBSBz` za#f!Wg=h%FyYK@a(vxY`yT#=d6&W4#!A$%yHz$Qv4Mrv&NMpLk0J`C?gVuh??Q?^V zKxc0m)tk#e)K)b^G|Ag)9XiTIF{`m-@n`aOZWT~8y}{k|>D(-cYWvOF8BkgExK5yv z_xdvx_OV#Sl!lqtEOY)JE-jp8FZ;RabXM!P46I%{#rM1TCKeC{wfj&C>f<1G;~8Jf zgF%+YLip~|bKW?M;SM^vZ+}3ZrU0;1;2qneQm!fBEQr5eF=R{xi72iG+k+o&V{G^+%5gU(2Ek_)bwz4|t^ZBv=Y z?8Yz9h1%tS8`4GZcF=5d*#6t^L<#cr{B7f?!O#ik_UW5x{k+7rM39?o5`I!yzPp`4KoG_n0ze{<*z z@yWf=cRwsCgaChdhSJ^Of^!=%_IX<A1sZnRelR*b?!#ZXKQ4zhGNAQ_sDf zDS_s~{~Y2b=Vtu)Ig5rePdRsmXZic~Yy(o&U6u-5-C_L5)YKaOdQ*HGSpcAVhWN7+ z@Iq*()?s#&o7ugG&4%RN&yMbt3F0P44z!dLKgSYhxehO5rntJ=kXB+Gs*96N2KiRm z^dmT}#&_Y>^>E`9@YgAd)>v|`GL#J*RCR1Kfey}qVMZ`lPWHNN!!YL&?Njdewe*;R zjA!@ZsXBtL{lKBLTSiRMZIq4-$mZRjtW=m;!{@&Z9AAuhq#bOI*ViS%QiSKahZa0A zHd@b?SJ9$5k#G*TSS2ix5sm0l_dI9cVHYuQD`N&74qU2r7_yD0>}oIMRsy?xXEmd!eJFw%FYPyhhx9HZO$bIc@s%7{dlQ zVZ4}Lw$Feag1@{0#TFOVuW(7bLVox*{NQWc)^#7Rw&CkO9;TawvI?StuXv|bRh zH*Kfoa?=o;1~G{t>1^&dS>?rFD+OL>+@XH`P?k0>_9HLm1<6*aEhkF_6K~r)^f$O9 zINaP9eY)4tIgDb6d|jfpp!^1$P|2eY6Raz=;{^Z<>Hn2Ss_e=hlzVt!8vp^NTbb)r zrwwNfFiFYTgxp(B@v5{5I)(5qnu+;^IO)tRA=OK7jV>Z-*>t*HOSZ{(ga)_P6Np5q zhUh3{_`Zn(d2ljpPXehT5~#ArYGEh&W_b;7PMz}l@^9|9HVq`vDZ_7Wcboa6&CHoc z^n5s1hd2JaX~b}_F4yv8<6K%i6*aZWL9}t6@RY45l0_u~JRa{;rsT(^xx+Q=Y*Wp{ z6C&-0eE_?(N$ZwT*2|`k`i<0C(jg>0DlCNjdf)rwmoEoYHi(=M#Q)q4c&(tMPu6XK z*C|G|zm2gz3Ub}k%?xY_D$R=@dhzodY_P+U8S+9`@F6vNw03FKOrD@ld&jy@oyw^4 zNZxv~ix58NFXkLo$=zQ2!Fgi=LmMEH#D_ql!rw6Jg4hZ(57SusyeNyK%mEn##NS@K za-|*O;*6{L<)Jzk&GOnT=zP30h0Ht;IMf3}w`me>ZP6BQJI6Yg=?UBuJLaSC542B@ zBR3{CDmiI)8xH-C35;dm{j?5@(cZD)_;Y76H|mr5f1G4+DF(R@jiwca_L+*R0C=v= z-mnY<)h^Ag}VEyV!8m28mgoVkc|siE-&jm=%bo zN>4p={G9HJXpe-J@`v4?F7N&|PvA?sK23ulC7KkFjlH7@vVwMIuj0%I{m{S_(2Ky; zv3?3|kv`>Ts9the=6p&2Vs6}gGJ|R{7Mc*jpq)zC>g-g=-H@|HDA}!qxM_<*r+g72 z963=nYbZ@kePzG5tjFrS0-2Z!-GK;2VSgd5;a$@7XKmx#F|6Z~uyH7Uw?w zco_uU?y!w=ZN8ICne$ez=ZEn(wlVchCU&GYXD^0WPtw}}4IZD~LBN>YV>MqV@B)AS4&v`|;GXEMB!41c4Ho>n@LtbCv!p!V$a ztz04YnCh@xDo4RJYOVjfsaKX7ss}SZVIUGd=l1rxREjpSt6V(%D}^&_enM=%GcOmL z_eTwI-sfn-XP)jqq1gnMVU60PveG7HOPkjxY3}$Eo0f6bWSPafF)-PhM=K?-WpBVR znd_*glf6GWs|~rx4o>R+_B^SEo#;PW{6N1W?gKrNXb*+d%(3>LfEf?G?1F(i^ez1# zEff(x0N%Rw>vzo@%phG_yP~aRHyL#+Hd49b=C}RMq}(35TBTIn7J$QG>f;0YK(<2% zAJuZC{c8Vv*w4wPM3Zn#ujb(1Geg*-V$D1)k;Wj7Qb6kmRcamD?WG?f4oP>W)qv}8 zanj5V3DA08(0VDANgd9cIvqKkNFb}gXMSbmTmg=t`5ih9jsvv@vYU`tW|as1%P9Uk zm?dCwC-zM6x13Dw0r`T^xF8mWK3m1p)vYk=6H}1ZQecfcTwE(yQtWHbE&+EH8ATlW7G>-d~6#sv}?WT2Qno zcH*~0apJ*#(N@ITGq<*Wdew@dPo~{9{zL`$Sm}f`fKY;9kEK79Z4H>h`uy$i=b7R^ zutEW&-gO!cPZnzq+?StX07RvKG#Cg^@~hV{X;?8XE^rSkJ(jsbb@N*`y5!{OV>V7S z9rB22D`NV64TsX+4wP+8*brWt%^njX-KjA%u>XY>R#i*J7ps{;$5GhWVR8vxQPj2W?HTp+y_9S@xqlvxg`vjcVS@iy|t?d%kX*Fb4uk@oL_i9=ynr*|X& zVQ7IGsl~$|C_7wnwMb{-b}RU1soo?Iim827o~T{26eB3(D&WR74gtOr<3~j!0Ppcr zrsE67a4tGJ+I6;`mew|KU*)z;5{2ioWy{x(4*pA+j1Lz>Yz1T#Z7KVQd}<25*}v({ zz^zSpXYV&ywBIi5^H7JqpA%9#IRu5N?fYEln9_ZG;knIw!hA+tm}fX$|NE`%2!q^p z1rZtL^Im=aQL*vEl^5%WHKl)VxoXF%S+m}J-8AIGAFH3Ad%F6@xiPb6{|eZx-S#^4 z53g6RsIqb(VJqAO@99wx6#3@eyFd`I_y4s{TGvq&X42NQuqcw>F8>Wxs#y8iRy zAHCQ8_|~^3SJrTG(v<@^a#uHfy^v-8ohBL*mgOlS-N7q7y^ntcaqXurI=)`dn?aH` z*S|;s_eD|stjeK4rU7(%S_Vi>=GXLt7XWUf2y{8Wq7Xlx+8SV5`ERo%|>*^KH^A2@;*4+iIOJy}cMyZ$=VW}T8 zpj>)#%4Zy1N8K<>T&7lD!nzwqoRx#9Vx0N=OEy#bVwBV-W&XT*E3S?LQI(arB-v!m z-w*}J(aBFmTIbT#xD)KgmZifc0NUf~uY)PU%@VfHjz6HFQ{Q`?vwvChI5Ht^RxQz0 zCInu_x0dETSWtW*@H4PbQ^)Gw7=a{5Wd zb=rhatG1?4w5e-@h59k0a5hcRJ+9NXU6k6aPT|*+x_RG^m%YcSubj3fo?sJWOvpf7-WaPjhmt z?r|^FnG7=AStb0at}-ILla{}#)|pG3Arb4oUg+)J5>fWiz7Vb%802So&z~>r8X_hN z^Yh%fUXGXL0&w=s!q-8<2U&(7>fm~v@f@RPtImh9b>WB3T8Yq564Q<|xGk_XU1=CB z7Nq_sxAnsWlJccEdJD80F@ArdCz8T%->e4Lvhe`&z7DOh(mDRY#A%<|j713kz3i1n zS8jqSmC4e&ho*`O83|9HJh5fLg6GVDfPks0y%lJFZ>+yUeTBJ2`j=^pUs`t{j@^@ZV9xTZZpD za3GjmTq0+`0%JiXRc2>h8Cr}=gv_Q#q4unOrxRMa>*0Igt5BW(<=qn*t0d~kc0EGG zXy3zPqfKgFQp!Oxbxhqdv+5J!Y;Iw~C>pOh^DYedZk*j-S9d$-aK0x^un?gV+cIPg zgTd`OO`NfnRVy`+1Z3R5AC_&+@zs7lfFvIbnrHRGw4|-7?lV+-#9yUC-j?{lG#6Hw z(EpoC|N2XrTJcsqp9=Iqt9|=1H-@1`NqmCWZMyR|mDKF$F?DMTM%TTy>CPH5?_%s1 zvQ2rbv_3MX-qwKUpv-$mZzEuj8W1vhDy(YBioqr3__#2p^P09E(Tcc_#N@Gi6PtpJ zAHpEV0!ulA=z9kTKc~r%2_vj7!kA8aUI+zZoiZHmWMnC&^MHnApVQ1G%a)<6ZL~e+ z4L(qF>-&y``}~JVOP>!a>1z8bq)AxpAfp#*Kl1GtNB6$-JZ!R)o!v(UuNCwyL&m|7 zVg~YR)|Mlu4LK%!hL)T%|K8lp(ALd`el1Mpzh#Tm*qMw(pq+`5n1=o13^ceeK+RLb z6~Cx>o+HX}Pr8(2*&%b&d(FX)nNw_7+V2`Ulm&c# zuv>Nq4R5S%E*0ys%e+6#S~cD95WUTvxMf5&1_c1Y~` zJMOaCJU z3w>GG4q|zv47wu%$*i$&ae9pUhGE{%#!urypJWhnSd=)}un`bw1@QdBNmMrMUyOj< z*zaWC{rkr68=3K{7F<(^qNMW87f~nG__hMl2rKYhR-|Iy_0M7lVcmURXR$Dw^i@o&%6JSB7^I( zUqM#Xr~Ykc(R0FaGBexQ;xm3xQSl>T;Zke1>t{*Y*0jKYEmX8eo@1F%tOiTC6?Q8P z*VmqlGE<4&j5)WkWKMqsg~VF-lYwmc>S}6zVz!>9I3e>d@rJW_WM|jBOIEe`vmmSu zx4{#QEuzk2chWZ!xVjCje*ZrEeA>_Z>XJBhwIx(CMiA)%MeYc1O2iN}BDFW|O?;C- zb1Ycbl6a>9Fj!b8CypVYpl*wPqy7F;*3cOQ$h(i86c@t=Gh483FLm*3yNVePmg6b7;Lh~utSGv#kSw(AeoS@k^5!n zMv4DX6Q_7$>Ty6-%9To85IavJ=s=%raL@i72 zybedZ9)GL7&5aHe4s=yjpuLS>w5+rT_2Aa0AR<|oY@d6 zlsh$=*~dM5tE;IAtHo#*1`BlAgYNVC!5uo#!Y=-*!_rka{Myd&oQ>g7r#s{<2>kmL z?p%;7_xhtPk}X^catJXG&}c0tB#h}kO&mYI8J*VsJru+eczA$)Zgwx7qHqYRJBZQD z%B9RPj=h}^_nLtAsTcexJk0i#KgabAxKrcV>)eTT8Z`P zFztEKz~(J#{^a1n8XYtId1+~$aT&%K&`9Rse$==F2OZJS8-05Fd4nKObQA~^#|(5@ zwyZZoy|LOSuwaDasNHnTth7zwWYPg#vThG)vw{H*!;A4gk;2?48(|TjcJcJickZjT z@3tX9y?r%%v?Lb^D3!8p^87_2BCC7;q_uAL?{%|(E8Q}<< z%sECKJ9Nn1@*BUx8%R6@mXreta7u#+(-)(9()m#v*WYH4PM+!-1>>&0 z8!?XhgYFms*Jl`-~?sZHZJAHVzt1&s%3cCI&W;Us&AkLyCIA!UKN z#XN8Ckv$_R(#7rpI$PW~JRN4R4&mT=iabLq?>>=%A?{hn$d%>LL=M+-2m-`jj<&vK z28DhaE%jL0c_JGpmp9P5u;$3bVj7ngh}Cg9E+qFyO=1=#<3efjPd~%vLdIu(4XB`H zX-11jkShfT1p>=1>jv0BICIxe0&PQ8ny*P0Ev_rp_89F)Ddd5!xr1aho6Wa_F_zN> z0IoG~O&2q9`J=*7<=elB7YXQZS|U5)dpTa@TnSJ5pCht$KscgChMJmwf*~2ObU@bo zq(`BiB@MNEDB4#T=LIf|sia_g2FH$;If$xAyFA=y@{FsDZW9s{3tibZV8oWze^UxI z+ygWGCiUQR_~wa0i8$bW!3`u9t|0w^myZ;?%kec(B%sRzYHwtz$pUfN%J2;$SoJ1{ zeHZiX#Ipvf^ubr88Qf#GZ!j3Y$;AC6wRI+LU(EYEt2{?;4AyJ#4kEqmZ$B^uC>Dls zLbA*hnULiSV(aG|CeMxQIg7;mLW>805gHz z6C4vn1t;fHwoTYzrTVXKsulI}=?vBc2UC>rGW4GX1qY)-GVrxGJZrq3mN9$zsZj=sldC zpJZDPRRD{8M$LEA@eT8rVf5bp`>oXbF~<*89pY)VPhlCPs&&lTrslyWgU2O2@HPD{ zEHoL#6^(j`uQ^JQ`hWam^wv&2b1Z2%n&&Liuj#i9;TfIdT2^mu`aWB`7ZV6On)KD* zc~i2M5h`W%*1-J}bQ)=~TQMg*Go0Nw!Azi*MCvc2|;-a{pfA>uWS{U`+N+=`=uaACF>*ho?=X zex6aD*p9P)CP%>(l{>HC-O%nSO>uwOF>m3*;ELOGg$MR2SR{qEWi~8+y++??pCxLk zYtty~AZF}d%zG2M-Db>dl)(`-!Ofa{*Q_Zt1Co%PWzK(MkO|>US6o9XAJP{J#O-qyD2bR z90XG^5_89bD)A6uEH|T`;bA54j*jwy0|(}U9s4RG1EqT~30U>dn@O7*8+JX4A&@Sw zEw>$1Ps}IwtO)cXA)<8;V+ytBnh>F1UgL{k%Bj_^-F=@^NJoik+6T5!_IWMlSyTWF zY%L=YN%GXKr>u4dphw03V9chvN(UJ}T0jbKNd=LRD|@dvJ-6;M3#$WwUS$aT0DQ_} z#QtN)e%ZQptK&OPBT7e)`kp1rLsMQb!C^(bBJBNkX)roEKtJ_+Ps8MZnUQ^qW!~Oh zo6Vw*lA)Z2zyyqCt&THcWbtO=T(&IVg`E+-g?iK$${|75N2Gw=3nt7%%*bYugk;(8 z#;{<)00>6MzHbiP{4FIbT@{fAyb>u3&+N45o@vm< z)KnJV%=m*wd&tJm5$0nne{1Y@m+U5(cRKH@+5v7fp3T2b!zJUl1nat-OABTYevO;aQU`Z#+ zqfod-9P=2-xe%-Oc2%I-t9I{oU&}eY^##2{Y+~Xag^P^Do*ZD+BXw;jP^Qy!cO~lB zAW7df_NKfUI7>_*Y+2j(UCel(dv0IrW?icr`-v(WG%7J7^ePN zoV&fOq^^V9JAQ&`^l#wkaXg*o?FAP0@Fi)pPDD)Sx4FP7-LP@v`iHVVp;O1W@^YXK zjg;)He&cPpkILY8ioa67;plQ34WG97C6oj4di{p}GNf$F_ekeI`B1TE&g7ocqgJY_ zsxn&Mt!K|51o;o|0)Nk;McioCJvZ&BN*5E8+i#YVVkh_5YFM=CNGYLlD8l9SdQ<~} z0F;_3B3>X(@BK#eA}h4>)&XiP?$Mtm)4Cf~T|E~pxOaMRWiGHm{MM$QrlztC3v;Xa z)A(ZMmcF78Je>XtUwrNvko`{Ei$Kqgk?kB@7org-J(ezEz`73a>r$Zy^LNn7R1Tu= zDJRmH51J0061UiEE)?ZSDJcoh841{-eC*r}>~+6>AjAh%urR9y_;F(+q<*nE_urQF z$$X8@iAtTjb{+FGzu z_B4vO(>GzXaoCsl#^}c>&iPpz0E%IcN$+fOj)2;C!9o@jka#sVIJ|QA`epd02BS_> zuOs5dZ_(!aD#*Yz<{yuYb#}Hu^^V|WbMq5rHr4%IzqQ)C<$wR*re?0)>pSrhBrs!Y zaq1y6(J-N*qla*!M0X znlkSN3sgzzA{Abuk9izWSVsL1O0%Rzk9&12#k$vn(UA6%Vd6q}dm3dUEy&MCq8`o~ zCe2oE{r_^|x4Qg)u-fO{vQOB=PuoxQ~=RbRvTecN^onR?q=X*Bv;g^>_zd!Lc zsQvgUQ|{hgkCQ>D@9$OT%``p($%oQ(jX$`HunCDP&kiegLsW zw$r9g6@4~@RObVb6lI)IJ%#QZSSl%L0SJ)n1%Frt`$BAD?hnNjq8y!KtDOjA0EyQ- zqVI9noQJ~=piuqi&sG)}uUwI8f;#MUU7Mj!**|3$CW*&+UN_XjW5$G+FW?vmb<@oP zh6y~yM9ag##o|rl&2FXKjxq31g!)7EwVgel$+IODi2^M3EP5wu(3F;hXULfzgAc>n z8RgR(`zsQT{gd#kUE`wJ+^q^z_}TBMpU6OMe9lk1q(+8xPup_S{?`o7$)j>2;%J|A zGDL61w>O9 z&6OOBrNJuf_C%drGwDB8=!pZjHdM4k;l5|hBkWM{)=KTqpS#PmtU6ETtbhud?v2gM zu)A>hY`bpVRr}lZ=hcreq!DVrzn#wYkd%Mu6a^gAsRbpJg{;~`?GBn0 zo(E}0rdl4O`(=F9Cup(xXz1cUH@K~l1FX7+68id+6 z9%k&N4ua8B7AFVz=rmpGeVUSMo1pvnOqC91D2?<qau9BR!t{v4Wi;jLnxYB{Xh1fSi|}k+Cg6f!4x(FK*1Y`&2XOW}KsZ8Z{X{ zttW131q=+_e3~V`P#@oargsoxbLk(V_YMlG`=I``*N1tCv}nM5PO|wUl;Wfly+Y3x zFc`FSbnFL4o9%C#Q38p81DyWXVhS0tbo!n~nb{6;fG4Xh5>z%EI&?_ffo!CMnQV|K0q zA*1m-da$X^MsXA8oC0w>b$kV6u{pHow<+aL?bKdX9n@*zf(5d})^H^fG}ynVOsZK9 zO-P#>NKXawy#Lf}oeuwpt@jS-d5`yPVl9i#8}^LdZg>$zT0w~4%tnTuWVkAYAqssDbqMyB_uv|?$w8cYHOKV5bTF7xpXHGm%d~IhUt*5j0J=RH%H3b;@;j z^cw~+ghS3vH&SWg_j%hK#kQFHuk0aaQ<~f|sHT0uOhwT~R8F#sSMxC%zGIPlp=LcbKY7ZLC2M31Hj{32vwE`&g3baUXRuyoRI#Kv zS3_Z^z8z-&rBmxXwtB_6CCRbHj$?K<9(<15T~$r(vk{d+(Q^ey5?8xK`93C8cMLj? zEyo@d7G_d;LM*tA{a-^|iYlwf8%I@Wtf+}>OcwL`Q<}+^S+_Jz8VXT;f@Ps=A48@{Ys`^a2UfUx72zbG@Vzye z3Ce7T>APjiFqcK`7%5mX-Qm!LQ4#W0w-oiou)BQERnb+jX(_eN&m(^%6RF#Urit{ ze|ZZsGhh-eCulR=S4MnN@Y#6kj4KN_i}qJE zkz*8`PWSW`_@}*nQB)%TFWY(u!bJm(g1^})kbJ&W%WXVkuIRglWa=bexq2 zN6wxafQc_YxSd7TQUwH(z8mh_)+L@+| z47hXYUo=0zI-~hui;|$RC2_2#jkd9Zv(2aGFpWGj=G4m=c*49WQOsJuGq9UjWX3nc zhF(1YY!Reu=s|GUAwKsaQ9wfz)#x4VJrOsmn1H>8enEL170?%;IBUCGR zJqizQXwz@|>S-w5UMA?DQUepi^<1@{?cSEc@$^UWj8s>@jAxc+2ALh|eWt_ez_^=r}>6U+JeQEeEO8vOc#hbl)AgYdr?Q?zj$_ua1oC^uNj-k0Vl!GC0Fqg zbC7SfM*#^Qq6PC|Hu@s}tnAm1zht#gqsR7resXF9VBz6v0cbGDbn(&2;R&83X>UtI zni$De-+)Vw!nIFv4rnu=h3Z&py%07#S_zE^7{Ql(sAOYh>xSw0C_?yP8J7@5g9;h` zUKi&4vI%H|CX4$xxp~%*zOECjsaxb&PL$~f_mn+050X^+E?qtkpxT%I?GJ;a9YMUC z{KI3~NLQxf0@s5}Fx3Alm;ygD_nE}c@gAOCqLydPbMm$Na2c7rfn-8{SCECrn%B&e zIfBh=KAr^JawCFBAia9z4293C6)lTv{Xq^3xdy{jkrfK?KL{H*^ zP~G(cS%jzfVfk4DP4FLr_F_Nl)@SOVrf2n;O&><3oRn6 zN^Y(R6Q2Gr8ixN-HVjf43ihF~-zj5lzzeQ}|5))my9WEdWl7=SnKf@qY%Ca}%RVd! zG7+%lHSFQyf^EWVzqN3?o697%4lt-9Ld-^hqLV#&=Lb7DUvxO#THBxtCQ(q$=3Xd5 zWgh1B7w9ZR8oK5+P!d5&eVvQBtaVpw3fVDhRu=;c4EJdj7(2ocp`r8h#eWgpf$bxd zQ^OXb@uH%L%xn?0!@@eiiqEE7!C*r#;Xq6my*&5 z&$Mrs(fh|{*&Jlf_%peIAv;JMaO--NAusUBFKsv+b}pNeWHcU)FHBn8i}9yt=uiN^ zjRbm*W)MfqONfJ-Q5q$_B&AG2_Znw=Z9%CT{c7#6>mC~32a=MM0U8IU8FubR*ww*P zU|u1xjV~q@g*Ih3f2_SR?0vel{RE?bDEAk_3G27jg&N&-8l0$ptRbK2|T zEG!hJ|KM-Hz%YcM!kcCFVA9k58|%D2q7Jyci=NGsp~Tm0%*<4wG!jes9uB;?(R*c) z(Q2DN=^HihdVSrm9`Zft{?fkE7q(;rOL}M+cEi9DI(QN+^AWylB*CzTYa9F6z9J~?>m&?(;q&Yz_ zahI_s^@Z3dxD0P)a)HG_QI8}@DO$jA=hP{`Iw&`1i{%zP6Q3u+plAL#IGC#Fa~6n~ zpu<#fN?x+e2wpb7p#S9HgNCENdpn57*TArQvSjfQX)Ghg zvE$-dKkTR)%<{U27x%c@%EiNSqL4g$!>WU>a26zuoc>6EsE2FzbL*W{%4Pj z0GHt|e^z0==Jxd{Wb3S_eH+>fERWt}HJHus8_b}{BQvB%sKp6Ke_a5hAnNzx9P z)4}=o*GxPeSUXEqC=gCrEu^I+8ccYchAo`C_!Kglj}V2-@Hfn*KwsGVF7pU|ww+Z9 zhTIZbVMYyuG*Rz$`u&YSU9h*%JZjhVID4#o=Fgp*1rOr&;2%6_a&l#pP&CS+oyqit zs|M_aDXxxVtV?kzmPTE@u`+#!LSuAkIocs0)YuD8;O04d24{n}s_+ubPikms-Js!K zI+ZC=n^-d9P24ex*U5*!{~oU+XU|%-M|7sZ8T+T7Ja(+9xQTWxTUGblh=1H)(TxiN ztzg@uG>Ii5QZ~U8FU?`gN6=;%c^V@wygw%@a@Y)inYl*GJGOh1My8sN1l1*KKHw`a zMbs(IGdRLUr_t*GHxRIgpWj71&ClquIJ(Qm!*^-N$Z!!c=Acq+lHl}6J;Ov!aD04C8Rb3!86X4v$Bk3BYf_|5% z#xtrROk}{Ku5ATu^PUu{Y=FCymj41&A{2>@8@-1t!!{n&Q_ylK)N)I@649eKckHq8 z4cW8A%#J=+#0Uo=EtC~-nAO-1H3n3%-OUu$lqkQY;9(sf{>qJH7TbA!EuG&;CS8ur zQ7DF$4}JT8wE(JAw81A}N-`0+s7+|TRlO{k1?RU`5oD6ozAaTrLiG7_D6gQ~lhgFQ z=hNb78q06XOw)nwS8-@oz0uf`l}8_K#!%UQ&&1X>M!n^E$SDh;!o&)q+;hEz&xcV0u2@`_Bu z(=$c;(NyHz9j8UDnm=pSb>JORq#Vaq&`h33A7@ld!4GqC&mNjb;EAhtK|$J~kGbk( zWWb9fd`wu+YWGOf37gGxFuibqFJPT7wieR?jb-ROJbQ$aU01PW!ATL9y973<)oM(( zyO#x|x>Mf9+`ano`6UAydfiMzd^bOqf>%in1!u*QgGlYQ=#s+1;7jD@&s9VC5IbB3 zmU7BSy8V{JUu1h;xsov8XnRvrGh0@Ur#Bdg zYKF>a+SID`y^##2rNurEL1F%-K4g?q?hFXTw7B8TPaA8!Jeo<#Z%6({YX z$;YK(Ej;LCDg6VNW8g@8SVUeCAxNN;aBqc`WB>k}w7t;pBmJtcero0mwYmH>K6RPy z9y}3Al$;jAwhkhy&8(_|DmaGU%3o+aq&TG610cEPYC(6ML`H? z*pC;CWgG%jevdfq9!&xs5b4iLv~KUv()h(v5PAHX)2%klxZck^USQ z<^IwCj(qt+(Xd6u1KK1Y#?;VnoTrliq(2yA7aMo|<1KM*E4cC6BG8O>(z4!?PTvqwbKMV&PP{}e#<&h6`M7d4*ig0)+o6UwBx;DNo zlC;sI(VsVJ*bA>d~5itlYIKl7O-%;TXAPYdM5_g#OSXb4lL*;V&al1vv# z(Z$i^{M4|C&G6vni*2ib~FJ$dD+vJMalXe&d-i#?!($nY7_Y0H}JNZtL)`y(02r5 zlMC{+mgzc^f2*C9H=Xwfk(Ty0?%UM`rE63>wP?^ZVjl-orr4AUM+~xRzX-$fZ(I+T z8*#mEeXB%mqyj0jMP|`U+P+2<65y9W2{qnQ!r_Uyd6_&EQpum2@2mBva zeX}1wHljsTJx1*eHDda-U5geCVWHKQi^~Tkf#D2#uBm`I!7{TNHFC8P8d*#7yN7n8 zW%S(E-EJn+W4Q2~1MVX}rYQg72Jz&G0$~tQx5e!-ME71u6=KN9Rr~i;A)Kb}W32&z zfHnQ@I663(x%9uqBooxuEX1094%TqHY@8X^6B`+eLlVCb7-au32|{Z zjmH1b9EVU>IR+=7H|2b!G)MScNC@96uSk)%l;F6)Af~w!jF4sI$W@515vo zCbV0UDW{VhYd2&_)Z(R(ZMqGQ61VK#%9hfNJ}q@xvT-+64bzhmc!`kbE}hIw)=fC& zJ`do47r6OU952oZVUBrcm@*Mm{is~^ySkB>$nG6Gcg}#z+joICe?aD!Qj=LJ!B5(V z={T^5i(Qk$k7z<=e$AHFV#DACcP{s2?0^u$z486EpmR2B#3*-}sONJB3@ZaN2CTGp z1V^!vlGQa&%L6nQ5vbclh2#yNg-|U&@)lCc2Y$&PhcJXfYZMP_aO}bT`@Jcok6xtQ z8VJE4veXUUl%R}dxgO5Q#tmG_?NNIc4CgQLuN_(|!TL~P?Z4s6$71%S1k%D3nmqBP z$kRY-n#Rs(VzP%$E2c1-q{MKB({DUY6sIIWQeR6cytp;W+Qj~eSQuE^=V26%++?YE z`}jDN&h+zB1w3>~BnMOu;;v=|e~ob(%@SrHqIQBz`(1$1z#6yZWjmT1&T|bv9<?<%3%LK^Y^FgRZNwa`L)>_0qNym;JCCbBe z^Bxoj*ID9mZFMeMRQIDlBNHV_M<{;W@Gb$}%e>ZId6DM7DI&9;H7K%Ul_Mx$Sxp{= zEC~E13K4+<*0-XjgnT+TP{ok!Uk)&j4*kL@hL0@U1D z@UMogc>wat)|X15#UK&)Gu1UUP6YGNl^kxM`_pst(00~-QWmQRIS(Da|ItbmBK?DF zWAX(x;jZ(T&=v14s)s!@A&@0L8q}s^X;;IEI`)4d=VH^(#;rEI=}A6Mzs~B~M=+Ft zGFdC>)c<<#wvn^%jVpEHnzd^iy>XV#9PC~xL}t{jkd^F}#62Z|>kuq0GPy9f4>Dg< zP?is?_?Ym0W?f!_a^-Yk;-SQZ1Rduc1#MTk`}oNdA1-L!V>O5b<6DT<4(zdkYrzl4 zps_mX!=9m9r|}O69sR!UM|nFX?~w6h#wa@X5wRyc0xUGI!hW#Cl5=1bgi7>3RgdO+ z#p^E0O2}~^|IJuW*v<&Wd4Kd!VgJw~V$iJ)2Bj*c0G7hCc(m&@)$qUY)k3rtzC?#^ z#MQ02|F5}EB4`-Trv3!LlBVlq_sE6?Wyi^BXQ!&3fs{_#KbKlfkRfg^{8ug|Qhjog zJo!cuFY$-!G<~X-Yxqwh4}cE&N7t+6)D!YdY?~ng$M&D#a0W2_@*lOpTYzP^e};+%pQo?et(o(+(FJb~oH?`Y!*igt005mm1riBGj=nG%U>|hGD;^b zu_yO6;+5QJ_`LLI&ssy7rsNKB^U>RjB5g=9TFiC8q`vfjH63Y}KXj2yA6m@l_)QQR zCA<;IFRuIv$#RmVu4^x3EKu5#Mw73t+o5lRA+e*Jb81r#JZGdbw>i@(Q? zHDzqT%03J{XKNo&%;Xr9O0pk(zdb&Z$2aBY(lMwW=6-Z!xGO!;|Ddq{i`A@NsnZ#( zAqzO#G-Y~jXiC0BNz8eB@<(`*rzR{q`l#?QoRR0 zD1*T^gd93(V0)A9m!3A}7F<9!>*PnSRKiRH1`CSsLTPb6xF(nhtFQbO)pC3gg^>u>S77TX?dR$5uipt? zqMlX5t>>A%>J=q1Z!@Q|dD`vOqxr5P{7Aflp&$nL&hfa{Vyx| zJiJg;$#8FJt@3ODFK8)_s)z{z3BdX9$>S$9fA|rOQ1H+c>YawOCwX?pgu=g-@aDnJkGJL?^$ zo}=ENA#0JQgXNuX#=4*WWgc!6X`uMZDVg*hjMr>fnZR2s^xz+b917*j6!rkj5DL$@ z!_IW3^;+6d2PzO2ESXLO?MX!a$FI<5Vm`>M6g<2dAD7B5&``YZlgMq)ws6=QJ zYVop*k{X3~4n?PFrvyY0gw@5bY_U4#`buQ*rUO@wqv2Td7)a&5nzLrC>AHc*rqAn1b``@?-YEXC zphcGYA%8z37*AieT;8Q4N1EW25XH?rHqdtWVM>}h{iBp-A-+=W4(X4d@IYQ2u({-+ zV6Ius)_Tc%mK(nKD-TWnd%{q<7!GN>csDLoOycylHL+*5FZSVbtok-f48FW? z7_SXT%pz8KF9vwNMgLf+31S#_xGE#EO;CPErW8)JH85m0u1z&HBWMpUX<_Vd&XV7n z2O{FbA!p{y^WmSmb7eMTu8@4j{an9pog>ff(P7TtE&Cx(XpKfT=sruv`A8LX2~tp< zl25;dMCd{omsX-wRcDb7=yi~jJQO)dgZ3s}bq?R9z#*tkcVzUJ_DU&n#%7;F7-t6M zbNCH56)TSMK9Akq&e6y18>HKC_LSL0Lf~YR8-PADGqEqBCsaem%BFr+*Y}7ug{wmb z@%-vne0iyG;HPh+$r;QW`SS>)ZB=o7-HuVm@P6QH+R?@d7hI2|z>n|WOWt4pFv@i_ zqUS$d&(k>h@bTj)d3XJnZpqmF*1p@ldqjHZ{{0H?QZ_fD=Z1Pb#9~qS9zZ4&=3k& zu5`^gXwlJwZeobyNZr41U}J*WQHjJvlfy-2KA0hPb*WoxHyO4e26hzNz=zbuqZ8YZ zry^Wx~t) z7{g7|K>BXla4!*#fBz|k$5c+}`$hanV=5Gbg$U~{!gNZV=~mQDMFfu$GG-eMZZ<%m zMjr}VIo2H07-+@27vEaG8D%A>tx73nxoh*rumEIsgryfDkEXXU4(JQ-fE&hn@B^YA zK&EX=G1z#c5Dyx~g#qF#T|iEkD2)-OJ9T#2ELg&rQ7+W%!(v3LNEVM#N_l)h3LKDD;uqlydqGi zL_bO-LnbC99Ahwsxu%dBu6crqG}ukS73q|O&|lU4fr0SPHOrHZHvIV$!0X*(C4nFBU{qOk~%zd z5C<^oaJRGHuv^U07Alp_f=lt#&nGxlscoklPxJN7?1y$~4IP-w^{uqza^b>{dAXDr zPZ{B;M%>Z&CBMFP+r!8|`uJC2oHJqFJSH0FNg^{wICGcsY*K^Uf{+YpzLf=+1REJ| zl+ipYu?WKb&ZDd+Q2DN7UPpghs&a_0;Rs|?+1W+2Nyi0(6MVl`Z+^Fq7E|t!uzQ^D zuQ)v>ey0ML=DA?fg@p~VyPCyt?se#6;C>CWK0oF3U@9`*)jJUJPz{vYaF~l|ws#fF z?0jH(v-taP4iY-v`)`gNq%-oo9>IwS0i)|X9Urs$2M$V)@#BYR913(6YW8MO3c4Wp>AbhYNU5G91hoFD8nDh{GMfKD9*9yv>pI^{`8uRoY$)qFO&$xMRIb5uHV~1-GXRoZSQsz7? z8^COG9f5B@InSA6L5p3Rfam%@eWmeRz!@q0=RN`3k!N-wd00Vd3g;s1yHAJ~<&I(Eb)xm0V3HFY1F5B&c_&()Zc~Y1X2D7Ibp*(Us{R0Q)cHS-o&3tgE}!B z7L{{lLBy%IxjbV>hg+c7K3(E@W@yxFznJQo237NOWa^0C3FumIHl2KOW<|`TsgOnt zc>sX(@A_^@5#a_O)tw3?l%EZ<9#%(V|9qrp@0_X4WU7S91gb#FB6SCtL%zWc*FZ!m zwHKRiN}tTr z0RPH__PZK?Ipcca-+vp!FcV1Z+juZPNaCvsC8Jpm-+lWW*IG=!A6=|-Mc28@%l9v$ z5UPpT)^PB4;`AZDEU&b~O@g=Nl;um5Tr!(GVI=4qJ$lYV4hOL6Ch{NmVZt~r(yKLP zyn0n{Fkxr-XwhnJ?Z#LdQC;R@!bAV19zG-B`@BJJ?UTgqlZYd6`Gwt*iSr|I`>t z1xCNEWfwG1DcEfP;gY)R)$VW6(r)Xt2R_53Rq5;3*ijoYxo+nnTh$_xHH>UFP3x3|=VnN(_U57c4#v7C^xU&1YBJl zk=@`E)ws$5#NdvbTP86c0K>>owUN1jEfRyML;Wr>L_)4VG@XG{SQ?o>is|HRmaN5U#m_5eB@R6 zR#Bx|vcqy*zrQUA57b=s;t&z$>1KYsO27q^Cdd9y!dBPGr- z@7DvdHK(QbZW#((LfI!9UX@Gr>ICJx5i3MsC4lLdlZ2fN-Z>m}1Z2VY&0r-R4iA2J zyZ?ywmd#(sP^m-L84UyDVa~3@Ua*~LJiFQ>S3)#HJD3EtB`gG#%FvN&WfOE`32*85 zDyPzz+V2p^@>QBn=I5SY{IO+z0Y|fpJ=N%4Pm|;J4W+_*nyih#PMtlw1}oW$aO4B_ zXZTE{9C?fyr9xRScDbT=SF6DjlqOf3yNuQ-CfZ~;AoylC;+n!#4Gr2~RrNI3cPHZV ze!wEphA|_rt4Uv!oUMjE7G)oYoz>m>*Z!LJk(w^FEH>iYXC3PGfUlA82t6Oa$NTdOq=@*mL-4{;SiCUNkJ1U@k9yrMnzSV>7q z`YaDlw$S--dAkC$zB8yuaup~&zc<-hr*hz$T}6+PKmWhKtZ$ImlN)^NzX7jRYJY!w zI=>dYUO*M!7HZw>UQ?@@FCHxyF-1U-6Fm5wCAaeP*J3LK?$5Fw14)de!u3x?;<<+E z1z@ExBWr)4hT~SBQ{w&}wAXR`_?GB`R87P)KzG;o#%xdx+J)GnUq_hCdG8UOzmB0{ zC##zi=ZuU;r~rv18UHO;fhMnh@hXNPVa+-AUN2g{Y}tS5Ag{TYML(PHSTua3MYcW9 znkeB2w%8k0VjeaU3Pz6aE0bBA0Pcd5_g*3uuRPSeLW#>2p zr76$KIk4eEG8%L|dopr5%wlUm;89QgU~z$qUQvY3S;^cU1$OthGZf#t=!iLADFe?zI|{%pF`sQbv5w%b`S}pQ4+1G?6XzGhIH4dV$}BCH2)DAcizI#RAf30 zneaOp7M<(3)Iz~?kxLbj&jf87N=G`b|yT|A@?%LvNlRg&Kg9 zFTN*+bIIpFwT=qaky%X|4m<|9Ij@#_1BZ_vzf&Vby)lkwUQ`4iDQ_iC8x#A2lNUKW z2d{}z2vT0(urc7hW79xTxsB}xxgXA_XGJ7GNC7!Z#1qNWu-Hb9^;D2V zixW6YwMl(SQ7pL%_dRl_*eAt}laWfb0_C6V*SUN5W|$9E&j{Pc<<{#nwIbI}tfyNH zO%QcM+G{#t88@n3!nSLNu6AqA1|u6ayaH~%v(D35Ush<6e>%fV&svGlZ3Bd+JZ@c8$_KqO^`wGqN%toD zDUP87a|a^h?Tc@oLpsS!<|~6-Cun*&R`!>{d;Jm_@lWmE2v0KvYoK8DA>q-YwI|k4T&~;Y)Ct@d*F(eT=^-Mzfls2M0n8XIR|we zKBG}WVNq=(gSpV}w)_VON$`O?j9r{9@!1rkK;bh*WK z>RskT-dzbOZnowG350sk_R~R7Mv6+lSY~aRtNwBIIJHwHGR2fJI9Ot` zo7FJ|RwB#4Z)S2l?bW=&x|JSpn=>j&8X>h*;1Y@IdCAGrma0G4fCYG+I`narEtHW2 za(|oU>)QYwyG(po)``^jtQ!9@2XpFf(G1>6UH5 zC(183!w3P>@4~ebm%4oEQfdYzk@$v-uSTL_5(UTwunI7ZboFa^8#{Sc_htM*a67`L zH^sBygm7J~CAD!EO$H z{Pd|KlJK2hdDWsDaI;*UJ;p4{!MBrAnY1Sd4y=X>-?AX}^l1frLwo=VzJL{)HTR)_h?ph&Yve%k*-Q zu!s-kOEW!PU4}h54w7C3P+s>u(ja}xkfX_rQB#$3Z5?MgewNpjA^@WJtnr6D5Z(gg z$<2xQ>SdFjo=pWPZ=l6tDJ{9tb|094ESJh`ad*aL8e(356DWz4oR-DUfmJv*?DYe& zWfrCRYwc>yg@sBL&_xS#QE+>zFX@ptZ3ix?Qo@ICGV&p9-(7#Ar*-)r+wShK zqf>PIhK@{pDP}WhR-hjbUXzjWm_FP}0xRyBb8e_1rT@!6;q19{rQFa*4I>{Me6>4r zcHPpn`yhy!ilgjzkae>f;$T%#`#xF64k(TCd}adUOrQ1T0}Yf9_Eg>dGX1II3qoS? zOCKDID}<-*>@#84jCmyE-eBWN%)r1DamIiFxqZU|E0H+0=Qz8decZ2~BhXH85}P|I zD8dD>h!A!T<{|15&xvY$SlPs&w@oaNH*Dgl7@bMitck|0Lm8!>gnQDbRi6MAvsOAf z=NWTeq9-=^3W3IF&1`ABNipES0kI@*}gxUR>T_nVaCL(zkl^o8dOjt>5#;OT? zZE=C6^|H1OyL73aO(BvQSQ$w%wwM&1Y2JD>6^o+GFm;=K$US^v|Jm&xlL$x+kUQ_< z;*X7CfmNn$J{FaWrd=PoZLkUcQBgHerRJ=})$Hi*zA+(-nWAAF>|O^!a0KJGb2(bv zTyyxONo@$)xM@qtCcdFHR6=XTB>CqGfRm7MXlSf&QUTmRv+;Phu^R1EJ43@r()faZ z*Xm$pbD}gPt}JKNOqh@BnVFqqkR5iW%vib!YX;($sUt5Et|hAM7eEgvH3U_fT+Y9@ zbkhGTYI(GfbzwlUjnpBznt!x4e>M0Ucegkw;}*;%n_b$!!$;4a)kl)#U3Q5pj?7Z> zJGN_n#!nDHUDFCoHg3hnk$cDn%G+E<%J4@?wZru8U03P#LT>*ot+W*@mOl-0TQW$ z@TxeV+KPG?V%Vkot=B%s-sJzf3w|g80A4Hwcs)EH}N!e5e^ zu?NY`fGt*;LyLoT?Ld)OAu-m?OvR9UJ-rvo4U7%;pod3Kvn3@ts8tSzb zvI^)ttK3XiEhD0v=Sd}c4?nNZ^wTnOs(v!<*rWac6O48IolfVaF^XetL3v&q)B}ZT zoK$9e-q5y6RiT!Txm*BEB^gR4MycXe^kOJ|98XC^HA5+UO}qE5j8xpFWQJzSMpdj~ zI{9jF6s?j+7n7Y8PDiy%pDKDqSN9q8m$|-yL2T!4ds_ft$ti9um7A&^b!H&JLYfngYy)x-+0r z;#$TU3|?=TG2D*H*`Zv`A6<(6Kzd95efB=OuS_z0KRo5#8WZAEdjRb_HG;}5NoD18 zuRjb3{AzlJ0P7quMd-ASG=L3&iVb7?KVG8zPSqrp)ZHliFF=SBn@Tz{C8N*+y}LdP zTvxOuCPs-aVdz_+2K(+&hdZd`7ISE@aTMS`o_|Y(Afavcr(DgBsHiuw>(b>fl|#`d zu%k04WUJL^kilzzzO}MXg@NN*`0*@jNlN|lFS`#7#aK}(sf`r5zRB>p1TA^Bk;p*` zkGzP)t5@4{yZbjxM#04HY`>4QaXqhNRLiqye0fVYp^^tiG7BN#`R}1{RugabXI)jH zicM6vse}H~Mj}4yq4CZGPMT@`xo|M$yw zN(U$+%ayt7!vSF$JOw-A`G#|J6i%fY77{5nH#Xkt{560 z3WQAWXAIMn3%;15wl!z(*3FyRyQ9L}hb#hv7Vfm=7pt16tIJSkqZu^RlGM0zI@CQca!GHPNRkS2L)x}#wT}8c`gzt9dyk(_ z&dzNmV+z-3amWiSVSf|h3y6byRIkj|02qrQ&H?)dZfYv{=Amlb_*7S!cl{gVW_%ee1`Uu$l>+~5#6U?W|6YK zVuIXvix<~4r3TClQEqVP0C)0Vl7m}v8RYXr@wL4}z5;||)Vg7Fzn~zLEiUm}s)ZDg zOX}+e)bGX9a4OzTGk+DwGXbU$Ut1@!41JeVr4CWCtAGZJAd`=EjEwu64%tdiWH3q>OhdS8ENPULeQ(6 zNF~>Zwn8|oMgIciWpeEvn%%5%<1n%?cdew{SU@P>(A_YQQq=Z+41QVi?%gf#&SrK# ziml8uYm0tCOd3C_fJT(^lkqK!mt@`$C34ntY3j3SOU%36=-Fw~+3)X#P z{9-ud+b=^Px4s?6gpASg&oX*UPjmZza#AH+D$dY-is*{HJ6<8UFzwZATmE9A0a;G3 z1A_q#B~Vf^S@VFw_fCGn4*7jidQ0EDC5&S6_`?=ItH3*6Qs|3hOv0N!NK)(RCWQX$nP@%hcuSD zU6objY&%Lzb`%}hD>`4*B!lh~Wg?}S%M{XOM&@;Tc%_N=72uU99X~lDkl>1NG(osa z(n=N4o>bJ?9l0*vg)kr|1apBYC*A<<*$oQj54**tfmU>B@4dL z%4PB0-cIDgU60nr+Q}#7vxI=IC80SK31_=FL-($yXVF>JA6PYW{`{1a<2YwvkX4JZ z6KBCy9!f#_WDmEebsJO~m-gV2STed;IE!ljp8TQf-OzEX5Tv>DHz_u`=yZ`8lp&hF%H=fqi<(k2Fej-)0r;(LS0MC z{~#LA|9GvN$S`G{nsB@JhsKZhCQIriXPD|nSROJVQqo$KjOOS2?Ai|GzWN1U-I?w2 zpDN8JO~TohXXxQWcc;aH-La1a?H_+09|psv@6A-tt^m(d zt&w?2wDWy-uzilnpabuC$T_GR&n#j5xp2N-qXLw9dP*YPrPRv&aRLA_f*7^fmPvn_ zhgu*o;c&+&Gc9~C|CJgxXO7U6Xd(TEakHVva0!{=?=SXwYbV5n_{OhB_&&Y#PY*8% zHo@T*zHAW88wBTqD8W?G|I37lk_()yU?w9k-Dj?C&4s_9sD<-;Eo%!glT`9t_2sd4Qhg-TVf%^zTK9I%FXuAh+(r zXOq0AUG1q)%?oxv048Uq^;QcfDc|5qWt`H{vphtnsqqHQ!_wi5vX>!}_7&V0Ihs)?*sH$WDg7(ClGU3uY5%lKho$P`2i68U zaC@QHJx1BuwqHN7C*Bny$Goc1R7Bjc0wG&UDhdax>MtI$_zg&M$@AL5ms(9?j?pDH z^Gg7%%kxvChKFpyK+EXalZ|zoH&;br*uh;Tq%K#9?r7t+9&4_VKeDam05YFC_)xQC zQ#|wubv}8C4_+R~Nv1#|uI>QpqXGKyWYgP21{>mis;+tuo0tohUQlzNR|_}Z`|{Cd5mACRc9Kkr2@OVtV+ zsyzY;;LA;e>bwgB=?ZC{ihg(Pt*l|}iw=k*-tqq~%6bYq3{#hXwbHAdlH$T(8Vp-PK|k zXtA8ysy8TUMSxg-718SMJ`2J$)^INQpQ|GidUbN8KtnW}nt9k+V0SYx4Hn%D!haz4 z)iv!e>Se7JB?El9(O8O4s_CL$vzYr%4`AKr-$RGeb8@0AwgN_S-{iTy#y}%I1d5Ef zKj?`W<321bZ}Fl<<|8#w=+e7u46R4EP6KpuX*OYHEUamLhxhc2yw*eIN9fjA@b{rrB*1OX1tOyMU%;b+G13rB=fUR_ehj`U-K#6cND(^PD) zq#I0&IuB7z$wwpaCDlG^HxPN* z!@Stpa_I1bRi{We?7jtnr3NFh7JH^I!z(8NpM6ND((qbIQi~HIkZ{6Yizm~|0N{8{9suKRZDXTx6-rJB zzwI)Y-Yh#1-o?b)dX!JR5E_+->8F{o+&@6(bnsw|D&&3_YLZj^X3_5-<$GmxV5|7L z+EOizhj(u*R19WxE(>~Q=ffiL2dZ6iQQ=p+(a%uM^&t;>z z?wt4BcMznR|8%@~-g(+Ij&f`vF;0(c zi&7hK4IN!5x;9z{zrX5$X2V(N!!)btUamN4)bnPJhA|FG8*+bZ9}TZvuFaPHpNWNX zx#sMu)vX-O=M`bb!YNemqDP6T|HtM6!q{`wSvPL%!vqf&l(@p##RUW*D*r3(A2AO} zSGYQw!Gr^anyi#80aTiVir&-+o`ub#FBopEoIs4Eva<%E5_J(q7+ z1bA@UUn*$M`du3pmFk(25R$#I&FxW{K=O#an?)`1A2i#Rb`^t}3TL&(q*nk7u5Egt zG!X&#rn+#_37j3^V=PD7H6Hu z;D;TW zp6bMomXHZ?Lo#G}0eo{k7t($p>6YYl){i!{FEPO7P0n<&qW@!;!!@vp8t6(~I zWLYLmfx(oibOE4eYrw9z`!e%%0q7%@UzN5b5f>H3WdyQbikMQcXr{wZlb(MGO~#@9 zxa4ofmC(KK38ks`#)Wb}9v${X)0rZ`;JUhT&k2*35SJVxX*EBHA%01-)!M21t)a=m zw)m#=&d1^h=2^jJs7H_knNq}sWIqd1TH6?t>4}~koY-^eBh1IpNz9FF}F7I+Xa?`TJP$k93zCU zJN=TiA~yX%YWy{v{2oVSf^tyhi-ck=LrXiKb64p0#QlB;sKgMf=ZICrcx_WIm8CYefzu;i;ArsPV7773vF7f>-ep!T*lS`@bQ!$oqze;1n@3>Bp%1h?q4WDsk z*;k+NXx7DY~Ke+fwIa1T`I1{iO= zVjx922dnBY#F3k0>Lg30C^h+)EFQtnWT1FY&BT&QH+DJ&ZEZ4V3DB(WolA&jPOoKyh^}#_&W4?@ zT25h*2cM7YnfprsI?U8Mo-%5pY+~?=Sg$J$P z!O9opC*vciib<88oo7$qD8+mTdfg)zDXYw1I1+e?YEEn)arj&#kW5%6)m}()%~knB z9dJM&Y{D5&YOG*`a?TH&gV+u|{PgQatR@z8_Dsrj7-tW)R0^2gs6jZr%lZnu4AT(h z&qgL9^m^R4ukXiguUNHbTo>0g7`lCQBBe^DuvtVO1U?0Y#3%S=QVL#MT1wp&EgY`( z$n)T);6yS!C+$!`n^vu4v^R66;`Xve)aAUC(>`Utj3IFyK788It}u}t*<1;ozNDQw zGwoW4>nwiKW->|!^<~-6D%x}gL6!7-y)S4PglSF{_6nRy;V7Ur)@h}G0s`#l?mpf? z#lZ`LCRJQRtA49daOH~uGSB$c8!iJFV`iG{vZ>{yHB+P zR(GIAX)}yVk^{0rTqWkcPZ9@`LB-tT2S!4+z{_HTR{JGf!r1g%*L9Vo(t$Nvq|TfH zR}_4h+d=BGL+5mwj&WFLim(GIhjkt$Z4t-h z)GRV|KtlorD0*fXQaTWcJs>=qYJKJ8 zQ6J^5Hf;OH<~`7Qt`ZiUCIH^km*PA~r#M@pMG39D^VFjE4^4pU)?Nkar7o1fHZ9^> z)TH6f*!c0qm0cFMcwSOv+CAXV;2xWPQ0fP+iu@3^n#_VxTKW2>4yjaIo>NVTNUdmV z5Mw>(<>(ko!hl?dHXl~i|0+IDY`x7lkfVib<^yfOgN8yO+jh_R5S?H3?OR#*A5@?+ zELciR`*&zw3z&m#TgCOlY|2v?6oBef7rGkPDG*Q66RCA4m5~$|UG2-5WjSLzwCYFi zO>q(PftkH+60Ufv8gFkSQ8U0)830EmuU{*U4kED=095llSgnM%#x`S9Mjh7YrET*L z0bK+R&wU(0{~+>Bz2>Zb5!d7uU<9HYD6tW~p0P>mz-UsfPEj69L3*S$$sImvH(>k% zJ``DrSFPo19uc4K^RvkPUS=0q0@%Y+>TVSk8AAD(7`7PQPVAT?JA3LYx0HNCep1x* z{`h*s?t#khqgL_jPSI+1cB4j#xys*k4vag_EsNw{m!dYYhUT~=c;d_3Up{}9NQi+Q zdCLH?_eLOYKpMMFxlV+|GiSccr!A4gJ?g?1Y;850dH-@vn3(_ONK*5by+KnrYL zMuAq!B37M}ylz7OVH{F&oqU(%h|8<$_wZB#%LT(2sK)|E<~VfBa~{FX$)O@Fp;h%> ziJw-KiG&EGM_Ef)_F0>kI|z%-&X+(K8KCRqf{b&50x%wl-x|nHs~^7*kV0|}_TEOB z19q|SMWxWWXxJ1Ah2=m|`bu`?dy7{!`BPA{z4F?$Ze2KRVXTZ>O*maAdzvbNq*?uMB~iaE{A zryE;xqiuwVc~J@pDDmopo)P~8-97dJ-M8l=y}zT80Ew08^d?bmv5e>Jr{O$sC$hr= z5ZVsYS&3l@32kyhi8S~4-u-kX$En-1h_cf(8}+(JPD^9Nt!{#8Osk6XjL#Sj58EUA z_U&QG?eVj0ThIC3o~_t`3%cLC5ysgo0HnNFW2f3fUp3Q4)yEQNuA<6sMcrydZld4VPdzppGlL%R=Vx} zi$;dzAY%g9d&s0f=~49tQ`k^f-0;2x(kD}Sdr z$rWrwXAW-8Y>n+4dma)G(4VN;xH<-+xG}t&zq@ZZK+tDP z2}Xc#t$YHT1~y)DN#e=q^B$+0H4_)S)?K)5l&+Z`;}Cvkaw_HwHNqHv_ijwn0by-g zY;Xr&1)DW#Wx!ZLxFWSAp72Zll3<8hXQ&|2Q7_$_Jh6S46U-^Cn1(jn7U|^hq*|6N zy=R6=G1E3g)})sPHz{?DF^bY=kceirx^DLYt#h(PMguIH=m<{T75M4;sBQe)vM>g2 zCdy~lJ;;bOi%zZYzzQdk%wYBAIt6m*9E2~1NVM8K0T?~8Bn^aqeA<^*oDCDi?KIyqQ7#(*$ z)Gg!-To1@-sId)`<7gkv(gnZ*(C?RiZqDN{Zo^xx6}FQy#-q@{xLN1jF^~mfwyp{l94s*$edh{F%0Y}hDzhIR7!g_Ao_2I)DdBqAhh#e7< z)gHve4DM6+M`BS~jOAmFy4O9PjA@oiarm{`FTE2fynI=I$ZktlWGfV+4O0~+%NZ7B z+L-Gj)@AQN>V%TcBl+n{LNVR!I6Bu*26=OZt>4Ufn!$c2HTUdU00kR$YZ@>O&W?WF7(?um*!nf3Q6}t5?XR#l`un!Y*j|wyz#h2;3)c!=HZVojI15P8&L?=g zu^ubD;L7rUvfE`uH?B9$w)t~emkp3o^PYJn!4E0+FqJg`4VTV0`wib80M}!&B;9~w zM_V6}F1iqn-K4NyEvE3TrWF=&VHAMp?s25^;C#4yqwMu-Z(0G#q}9vbyX!ark`+AH z&_C7pj;#T!VIZG~!dF{IB=;Y%LGinu22BwzX zA?2?rNc(u1UPg=3DU#)2GZseYOW z02uIBSC7D*pkDa$izh5v=lw-l(1rDng5j~%tg=5s8 zkQ&K%niY?)N)1C!hi-5yR7~ZUZP?8+PFA}OGD`kRsi>Du9{v|02`h{Iv^U>L!Ox(b zMeCcR;|??^+J>+3KGS#h(WYQkpo*wnL30MDW0;ubx_j^7R@|YRika(P3JtTn;2ixY zx0zloA6W*}>iQ~BwB9sY>R`=9+e+m+?L2itK`443ZNP?oD<%==$VjTK?!d}+bR_mS zxH`ap=By``U=7!4LLPwBdEM&rUZ?RnS#)jNZ?PulyW3Wl&t@i$4#x-9mHdU;i5QvG z-*Fk7A2DgY4uej%diKL^wOWQbFjvgN8F8Ht+TohNBm;tBIu0gE@A7jVqY3C-DbI0L zKBY11<~plDV^mhid}U+uzfn=T)W4?TH@Q1V%ddWE?sIe)637;IPr76h4SL5xg_19~ zU5VL1(t*!Xu-?L{JFPUpZNYGjR)liYL)V#?wZW_)@gwE)JSUi*rDeIREn1&_8{Hto zBr&&bAqY0LWXcF&{9bgRW;-L9=9O1HKqy%j{~Kj0FNmQSB_kea>p~f{O=h8t6f?76 zjfxPX!+fp+A^L`0m&2ZK5c>o{ws0cpGQS4Bd6Hr#C-=lDok>Fw z@9T!ETtU^*`OGqidjtJoJYP_Dk`vvzoavP^QFE18XQQq*r38wy8cewoYZ!DKERtf> zBeOrT0Qgm){~}k&SUcU>ky(||kI#$D3vqgsk;knTcg6~D4?q3(2C~+FYCb*RwX=-# z$(Lzril!XM1juHI9&hM&nc^z)%(FDk&mgcyT>;riI&)^5>UPxr0{+g~OjaY_+U^^ClPFD1I-O1ELHz!mnDY@9Cs0x_3es)M5jgxD1 zq~G|7U$}Sr><9!4mfg=IXvFO`+0oC>Or~Y$N-{iv@ZFABGJWyXZax17#|xp03zN7N z8PYmDY`26w|G7hgkPOYLH`_UH)t)oUpk7+{Tf9DMG_pr-%1#x7`j|#YI^H%_ z^ed=)T~{RB460`i#-NqVgwZ->#qYm;aKymN73qWM6*RGs_ZAXj`2!ABq_h1)ILt`b zMH}0!@-~Z?E_15B0r3!gsFwbS4q*D1zHMl%t-QRx?vLQ9y{3W**+U@T+{18*L@J=m z)02)pLk+Xx>C>kZ9JB|~z;Z5(UQ1eJB&U{j*Y(6V0O8FrWU*0aP3^c4e1<;IZEiF* zI$?n7TNS~cR{a_8+!*C9(?pDiT_Aaa*rK}^qLOl7RZ(mwr>1VsAHr_npPasUcT7zB z8x2$wOjez4If!~jcuat+p>9f)0O{P%hQE>YiPhPkXpMF7N0zl3&o|tSL5ye8p~IT^ z*FfenmZNPEVnei<6Mgz-f}!RF!ND{GY3jJo&)!9}a;4=w@57rUXmO1gSUPwE8f{A4J` z`!VEGJNYc03iyn#a_KkLows0OffzNZN~&!jyu$GeS^M%6B;WdRABtldz|YT7Is4G+ z)C|U50$(3a`K@OTbtz)9H1(|4s#CQD$7EX_I&%e4Xum29a|=E`=_MG}w%sfu9?h55 zW5c_=d4vCmgbolT%!=)dZEp0c9}=zt z@YA58{LZemTpD_Y8w{;fXwUq%VbX};MSkhqj_Es695EF|>s?`r797`&W*@Lb3b0_C z9TB05bXqr;W$O#}saooDo)}eXLZtO|MQ>~Cqk+wNB zC;^;9hO;M*p&X*D-rsIJjXyV)y@CV71YEel0B&f=FKFr3)C%g+NRh+%qBhf+B-ety zYc>ysnK*+uOs%TBGd+{FcH}RFrO(_MRTL>y1sRXBL&-Qkdf%^k`>5f zu;6?+Z=OmBHQn`k^W=qFh=){o)!G(lF@0h^uYcjd(|2#wU9PC$QuFiA`rOpx?*~Zv z(D%$e&y;xJe-%`5uc62>w+uk+CiViGXo`@Ni$9J(f9X=`6v~+X{Q(svkgUlv5WK$<)s=&x zN@O(7gC$TgKmg^Txr|8#rmmk3M-vtvu3@X_GRk8krH8!!wE3x~?uHA=sOANL)VP!~ z!Z`w~?k_e?qipYMt29+uu1a z_CX}XFT+w;0@P};@gjL+_t~5R<=V%M*kc-bIO5d`cjv~wiz;aH7tv35?K&*wC7uMx zR7v2=-NvV2*XO9q>;^jc{J9*@n&0q0c7zmKiwa6*FMB^~dh67Uqx<{Q;G&udD9G5_ zdGt+F2ABTVBcZ`TMb+jwWii-J?MjXuE5;O9YoYxry0&UigCzk0mIxo>?-TX(3Ssx0 zV2PQ4CvSt4=uy!jj+xaW&p|SzuBmC(04V-;*(QKL$#&zv?Ds`mTjJRcc^WUpm!Jtv zUGmO%IAs&?ENXkTe_50JXiFO^DzmXiBqEl?Q?4ue3;~04O>a^ z?8qkwg$~r==jW8QgNYRvelBo`Ht@!4x((6(yD6l$z|L|C6K$^$wNU9)!vsr-p_I8K<+%R0!E5yGhH)EURRnR@oyd3K3n*+By>`WBYSD3h<$lmp8_6a#@M`oH7hpoK8jR?sejqS<^9c?xZW&KR_!G_?SK0pR|TC4b@8Vo2~`Ai6oAlPgOVLuQ6(oSRdkjHJtC^cWFo95~t`>yD+?PJw-ZF(zc zwvhY3t`N^Wy}Hp@7@=l_FJC)J4_?giE=Maif$nH`MM9s29lU*f-rmA0n?kVK=%37# z8sI!V_FY>1+1Wcj)XS#9Xe0)zo)72;$rx?KZ8Y`kT+#~Y^XxfO%U6Q!1K{gUlo)0x z;cKZB1ZI#8%WE-518&~*9WwBuivB2cUw9KtB#R{-5m)DXp~=2U_FTd_?~>SPI@^=;?4)pAbZ9g)%Qk zTzNR(Y!eigbSAy1g$Eo)%EAMVh^DU~yx4w`p)_ybw^y(9d-paRiu?D~|Fi)6zR>4M zl-oXOl<1M}<>8!{Hcz=N5=PMF`FnFEi-ICQ$9FR`c;?#K_5O7{WR52mY>*U@O}Yn8 z!bB^_iYc2CxFCSG%MnMxYuSP;LjOFr1ZL3(>*D=-iAK||NxWExh!0&asy15I5!z;H z>@L*JBMH$O>VB;UdwSuYRd%LxN`>Eczv!LS2ox##h`#b=wFT6Hm%970JDfUuwyU4o z5Ye)LYQ9Yj@9~dHC8Yw2p|TC>dtOY%mI>o7EolLAlA1u7kPY@VOW_Q-TVL0cMN_=i z(6y)?QTAxT>)Ol(OcX1Bx)|7bGkF=09y}1i-m*pXwqiE8Vz8lo?6aff9LZ08!%-C_ z)C!i#&&Bcho4nb@Y~Q<%xWI2dB}%bPKuvn7s@~h;;=C}bTFy9*zZ8Ghr4-uofH$!EurZS!EO*|6_i(!BE}>hpFy%!Z7|cGozEz zp0xZZ#4g?A7W0I$W5x(NKD=PtHQPrY~egQ9%*q>t3wW~Jd{r(5+ ziV;RwWf4HZ?wUzRHc&DJWBVgR!lE}jf@AX+6@rCAJ5`>S+5@C(`M7S}6P^0+Nlq)s zH}uspysKl>BK*iZ*t)tF>j*WDM(8o9Jvn7wlP1~Qj&pZcVqL|Su&hfFuPQialBnyq z5eVX}%8xJj$>k(OX5Y9GB#oD@v-Rw;LE>L}K@C5(bhlQE$jxPkwOq@QmAH zqq3keJ`+kjgRu21;L#Tc35034A(+BK(LjyI*U!oJzmi=(Zv*7O4(vI9 zK3)+54j|w_ibU*^4r*)+Vp7RqX&T2baRwR^Njq0KLrfFi)peTVw+(r9a(O7m0CX!} z*TPu5pg?cZ_s1~>NWK4d<0FQr3LQQf_>SVk%-}(d{F{KI`fzh}SuZCt5jW2#-p+`J zb*PiCw)eX_Kt~F*ibU4_5J2!MmGLc!)hjkMy&rXAvFSi?tE;FpC*6W~lv(l>bk*LkVfi0+0AF;_Z}ix72@0 zQ<@*&xrZ@UR9y~PPC6{|?gIy0b`Aq(5L;xUH(am1+kXa$|L>}hlkvnlhza;-4rnb- z1TFbH9dubcj!(hY_2V!fI35dYz?gFl4p3p%Z`g1J^sM0E0+d&s8=@Jz%7;W5P9Oh( zqT>4?E1(5}u0G|L>wYtHJ=4FM&kRJD*e8U*O=#XRvz&w zpTE3r$%J)_>ptezUCwR)rmE6KyW;iNWxM7do4@n@j*;VX96ORR#tp8hzX4Ht3`OK_ z0h?8zeB*Batvsi3?Jm9?q*B?2G9=Ml`W9ZgbV)BJX3ZK>kz=6ApNvNtA#37`Ka`f5 z-QC$2p*vv*vP|oUK5x8@bVq*G=zXox#T3fcu4ll|33Awl-VSD{)#elgUjDBgrAK!* ztVxIhPeIK}22FJ7NulUIEOA4`TeTy`gKqfL@ZqkSxoM|Qx4F#r}x5udt7 z#Ans{>(`JCR{NMDRf*|Pv=#*1r-S*;Md%PY{;OD&c-#*rh-ZVyox!6I5}!}grT>eF zw{WOz(@UZ};dm4@zekQJ-24WMOXM6=53V>)%<0iQJA;c~G7)97-4{<#eVkic@>{)r z`Er0AI)O$UlRI-PJ9Lmez#oQE7J%}S{_J-kgVgXA-Jfg+dqQp31yVow`E*K31}BeC zEW_&^Vt~!foFZOeP|X6!z4;H_Zt>=jt<^zuYif%a#4NxBf-yJfSPsJZzvqxRNTr zdyff;Xn-9?0RGzBb*h8ve!lNAYZ_s_S2^+w)Ynv~1Py64jZfhy_*ddd#G373p!mzW z^;fjr5Bi)vbt(}D0`uHB6|32W{W_1N9o)6A4F(b1gxF%O&uj zB}@)O)nHA+FV#`(4V}$*;J{}aoB~WD9&=%t@AV)7P;9JKgZ!czC@sU52uqk5){Pi8 zkaVNPvX}$OR_E`M&L)L>QFL^470=KMA;))9|EIPf%MgXC5DQh{Hfxl|bZf(_u3S2i zZi~suxIUN#D_I9Vg%Jt|I#E}LaNq-4AJdA+#MU;%y`qO&M{a@_Jz4cuEJp3);OKbI zWyy=!ItCwzb}QE8$bTB5?bB5Lw$v!+N+44Hz*Sywx3+R1B`UUNr0sINeZx-zf z8IK%z%4Q_Bag0oTUsKh&?1JgdTDj4ul7L(mM3hxBNbii~786K9$?x+N5}xe_gDtyDP9;GZnL(Wqr{RqG_VE=O}~F4xm*OiNceW z%)-1*H6`BYj~tP1a#rAdtaTf?cT!G;DCUtK>au11`XDj$=)2)VrsK67E4B(od3qV& zu|SUO-E&&q3HX!r{@CDhxNVWPj32_F1um;G4%WXHwIkyx&y$1-ha9D4bUhLzTlAP3 zjW>h}#UmR7bWr-MwIY=R3&T_^l^`1AjTX-@Ej^q0=3iuGuA73$8_UjPgIp8opXqW7 z%)jyzPxMII(KN^?8g4Rzu3I#an_B)y2<5g-ca70Dga3;0H%?ZpF?>-foe`?0{ln-{ zGLfd`2l9meh3r6yf~;RgUl_)L-X6nHECL3biM(JZ8uN-`-h#-G9lZZDvF@$o&1bPX zBtH>Q*g$JAk30X~I3?hpXN|3gngR64PrNvK!i1(!@}{FvgUZ&t^P*bgNU1n8^(b=Q z!z$f<{(Oj?ENYM#uv%H?2*Sis_~}HSte%}tDlaQkU7tez9|(()U6``7VSGf`=<0Pa ze8&cKhKMA7L43tsXBVbspm_N4Tj)=`{Q1ov9vIeMc2*H+XQ(fD1rM=qN0rJJ<%b6j zZSboP?A(^|L~Ie-)c_FUiP)A*wta`L{yOo*1qKc=rr1qpo7pqvsk3IS<#HtbMl)5q zdC2f1Imm7&fukL{8L()PY&TSyowUUC-&igS&RuN&2$O-WO0J@SuA;ZT_tEj{zm+=j zD)CWzm~cg8@Cghi^2TB*zwFC?VQIU;pDo4lrLS=)YSVvqx{D z{tZ8PB=Wl$5(LLkS;)blVK?ZFWwsf=Z>SkQnK!FC@yy{~9yBT^D+zPQwPg+W71_UC zhmrWOD9U)I%gr(+&{M`;`>n+OHYb?$^{hquRmyqB+8!IH%pv9Enb%WTapu_Xdd zXdU%+0_@JvEkQ)XuftQIu9-yMFIQF9IY%d4YDN2+>cy2OQ4u?K?(A(orkCCDRlhMp zv%XLOhS$xk{}~d~4lCqUbfg1U7F}4_7+Y9P)1+AEURqp*Tnx#2M!kFM@uNh&8G4Pr zLc;8C{Z6Q>X-LZZhPcT;ht1qD=Y-)oFeGnA^ls@~c}AGpn=V5H4S$ySxq0X(8k(W? zneK%bXtso>!2@cd)a}B>i?T}0?iyr29>iBgMX-95-)UZ4lT2n_(;WP4o$NGkX*;?r z=G*X`+_LLR4H5!ztvQbzt^Kk!WB?>xy0r6kt@O004%B$K4B?AE=aDp2MeL9AZssCR zXI?LaV%Vu0fI&x2>Bz|nISP1;s%f=C8|(^P+A1GNhl^vPclUQlT+7a}YXEfb&g1Pi zlcO@LqoHgi^YOCI$z{y))tCA&t4@FQmd{+67r3Gf#jKe#k2>D>)&+-#FqThwMrc6H@>hY6lisD z37@pIYN5MzXM7vhX&i$0id4{%@BLhyoMht`cNtIM?#mMWX|hEv;+o^|s76hjI>X1E zKF61r_weE5EBd=G&N;^t8)*(b1Nt!a|9OER?V?|I9I~m|FG#6UU;Lnq;49YgGQ^6d z=&ErUQsk}+ldB|F2KJ!nW$`l#QcawLBk!g9bx^o+>dzLOs>(foi`H!No?tEkdcMU| zv*NnU?IBD+-7)iFzehGUd($cE*W;&8qd`lp)6Zh|;#}5zchKIpd9zc|Uo=&cP1#Q{ zD3F4XeNd&eWvdgvF!LyG^@scL_^C6)nwN4%bvM^93ncD$X`C%(F*qCe#pK#|c+X~- z5zx94&&T=MlXi!WA7Jc@nfK^Vw)I%F_}2#%e$9CslND{y7lN~lcwzYM1}S0+UzRGd zU}5``AtXLT>aCR_nJVk*h4EbC5mMWWiyPimPn&5)B#aZpJ)4fwXT&k64k>QEs6a{r zi__~YJc12xecsRLzvJrKbbSa}G)M_hIT%q!mA_bhfq;Id6M5x&L|bRZmMl-(1%c^vpRM3q*V4i4&cH80#u;ag3 zn9P2$YaA)Rp74mi8#khhz~R?VmR%>*f0s@- z;S*`e?E+>KSR>27>`8_;H;KOt+_tGd-M@7{w4 zuXd8)7mi#Y{vMw_!F*v;PZRu00O5T*G+6LfnVr9Jd1-MB689+X>(ScXh*^QM8&F{H z@QNp=2Vc2M>mqGJt~3I9$XxTQPhsNTH5r9nrqXMyhWd!8l>{_re0i|Rll7=r9?q?- z>tDZ`<1UYN)MG}u6iK>FWjI9=pl$x^*RKKWxjf#RL_#qo$2V)#=n6Gm?KhKYSua@wujfngsUovA3xm{}ZG_h*`nd=pQ-cmdj{;UMCD zBDlNX@-|IAcVBSHX3fO*w6T+H&)l-bW=^fPaa2M0m4=bFjP;Z5!LCJf70y#FLmn~* z@aJdi-9FkmjIUEATZJ1v@^KBDcI|yYK2TlXFi*g#e%tLuE`LI|SVD-0PFaNSsIHwC@cA`5yj4y3W?a`MP@d4FDM8hdVP{*Y`wc+o*A4&$fMdvpnz~ z4J2il5!Q3Az(ZV-2XFDL^hLg^c87ltsT0C%85`@#?q;!C_7%z<5G18b()sgxgu{DJ zzQ*!NT5n$yRzQ2rnl-fY47`afeG6d5gJ)`dVmLAdNmQSoC^?rDKi2da$Abd=7AD zuWA}1VUmBZ&`!qX!)Dh5D~XwmS}rD6Ks(rIR&CpR!JUoo)wl1)qisPQ5Q$~IDguaw z?eQ%FLX>hgdLBY)IH6Kc7^Pxv9?5a5azP$VbCzqGxbn%#y*c-`E%0fVK#dtki_ECs*GEi=%pq-3}lr8Q7!%o4Dspk zoV`L`u5Q0Vcy#odrg!7+pm z><0{x1l41m5X2ftn4lZ=)d0!Gn*nai%q=bV4p@O{6TCN5rG&C^>ZCloVPA8Fw z?To91V9`&OndjPN^dWA-Izv(a4GM#6xz+5Pl}mA%DwiVLBU~8pUHU^3tMbHoMPAF! zR_7t#7aLvgsq&^NY$W8n@y(I|AuK&1uq#Vg?J>yzne?TVLTSLz7hL92#apM!m0kX# z200Iz1w&f)WqKu|z;`!(wl(w%Hrso^ydWp?G`joB^k%Kd29unktH(j~s)LjKwgdy< zq(j$-eP0IDC7-+6W_FwKxn_sKF`evf!$7e{-!NYLeMI=m6hJ%fPjlOCc;+PlQJR5< z5PZt+)`gCIzyUt&(Im1+t9&_um_Xh}HvnDluNShx)N(Me zK|0$S>ixys#!zBxPW5ErRRgY6*-L&)0a1uwnzY%?a(VUCEQ?;f;?fsUVEv8% zh7@HyAewsGKh4_plQ5QnJ&=uy@*-t2K$FDCpjWu*dlXsi_^R30mv|6=1HZk9E& zWqts{2krM%_7YC6+XEDlG4#i=VFV&)fTXIO$$X$)&w$rP|GvVn z60c1WS!GwAKi_HH1<)Tr`pttKxMc_@v9|xS%J2E}_#AZo2B_H*SfexqxCQO1ak~0zoNA|fh*uBh~ zZqH~U!?{i+w=s1j+ElUpY`jEEbgw=8HneD_T_88Nf6jdOckpZsi8M^kR$WxHqAePt zfF``gD||j5qo@Yaqn5b<$E!g#o%kWiOn9M9oMjbVPR^_OI{W6u`hh;gSN!6|IRHoF z!Z4ntVqwrf)URE0^>ErZ;g-Nvb`9aa1z5PI($&^>qQfkS?4lX=4wq#BjEkGp{>VHn zME}h2wN7F%>mNoD=DgKY zDQsob1-|_VPG*nG5|Io8Ln zYc7RBo)g=RR7a|?7r3Z_^FO0T`TqCyo9Ft_0X!v9Nq*T zXwo$Mh!Q8S6W|?FENpBP$o(WedgAdDCq^hAVZ*045UN`$VFuCSu?&7olqpv#pkHQ1 z0ew%W!#q3u17x$VE)U_1K)6$$br^KO;eL0biortMoloM@een44{YctdF)S{)LGaI2 zy0^EM^_7+9z*7}2O6gO<7qLl~JO^3$H^wBTbga^50kXR(B2?A2#u15`c<|tjPdgyk z=nvJG0sl&z4|lNpDl}`ddgrJ1+=gKb9O$V~^4GY@`{j<*evej)Mp{wLzTf#rcF=+W z1{&|}$NU?{NMc_BKrXr|k+rWC1sI_OTMUNrHFF@Id;~iX7jGzC|hWAAhj);WYO^_V)ItAD<3xMMLTT5+qzZ`c|Mzb=^((S5hWP_aY{F6Wb=byzV!7N>`x}qPdOJJ?eQlBF2n;vf1C%c@1-#&dxvXzJrUbTH004WXc zo!yd&SsLjr{P|@XN>xc7xcHHqSyT_ZDkv0Q zzP^sx8C@>dgwPcJTt|Qam&7W?CZ1|Y0qfyQNd@Jux$2w9X{X=J+>C_7EcSbs5;q_z zO;|V7eAY%waoX@4htZ(lRpSarf$QpGLjn@x@JS0legaC-lKPF*)ic2U10&Fk?Gxe2IYOy_{#2Q{yDGc+AWDp$4NIx!>*9%1A7N}se&TJMSFPo zU=sR$5s4touy221EGETId=ZMXHwsbP?Zd{zGnU-j>0bLLvL#qdDnMr0* z&Gb}E#*&3<30kKOTG_4F+}~Huw`|cu_N_IUWSFeJ-;)0Cr)Aah9auw z05YkJXU}%j^Pq8TqRpFnw#agU5A~Zz%d{h;pyV5-xo|qfCZahOz~lk)|DGWIAvVbA zXtLJs7ozT|2s(1L4JdtcoUxR%8)A)pI- zZ}`{H;ri@wnz|WqNeKQ$n8s+pJI3iFU+_l2{2*~1t6w;(#@c7l5z@3+smbzNG;=$S zNCbco$NtBtsx2)&(n}?0fRR@29F0%p?8&0qieVsdW5$j(>2{N$aBErqczA&iA31X( z>x9Gj^?fd?*vPd-=<6!avoSqniO=r4OCh(xY!05)-qyEX7;zqh+Wr!^O4(f_$qX$3#Y6cg?>fh)#`hEa;`7oHy z6ZTPIXen$WTVzKL93v}1Ip7sl%(s#1&FMjLLAFj9%mLp%hSlaj3Jq87A`B4G;)8Cs z1h@+cMiH8}l)soXxdhTBuV72DWq_t+eM`H-a5|=blpK_##|-lrm+BRb19S20gle z-zXy=0DQxHOU7eS9@c!hKTO4jTLIPJhJ59d;D=jo@m51&-jdsx*#)afh^Z`YVLjI3 zN)(8iH*RHTi%6oGEJbc^a*-u&po(HPI9UX(#%PyUG7(6Ip=jU93K9*Fd@YVZroUdd z%D~y1UOLpX5Zkw8kViKs=ajZI+1Kpm0z*HpDG9pSTLwPI_85l(2c(*``PWbELG%O( z74I2w)CTfy*%pQjM#_Dm7?@V0d+2M~P(wEeaetndh?Y=;*|27{CDhrzSbo^xn2~zc z-YU|#4RRWgZF|c?q(}6PahuepntsfuT^3}s)n0@fDC*SvG;Y*reO#Qm!AMEc9sB`S zS>y;ht$Ctb;a}hFcM+#H=R?ve^)8`j&tQsMBdbLu$h!VIeh!Sl_0M=syIr5wiIOA# zib@iH43fxZws*=^0>;WJL9g!?77pDn>)H9Pudg>4a6>`MC3zhB!@E6Ia|R!}DmyR| zZgDTOaLwr=R7M84UP0T7#Q%Z6+?^KD$tk4X80?bc#*hE86D1g-V5=!=J^Y9m929e( zw!of=QQH9N0o>}nDzwzHq)R1+3+3RkV^ba)K~`{Qyc6d^28!O>-38&rJfK&PTr1G?lXJ0( zwx9u!#BERIddnR373o6<_J!-Jf>c$_rO)xW#zC2ZL7DoH}UEOX{h-+Zaop03r$mkV8!^WfdbocxraNGkJggB?uOERD65TMUfs&3_Ha`1(tRC_?cD;GJi`C&Psz9Jx z3WYgtJc@F|XTQLX$ZIO@Lv3`hwT0{?oT_UrfPyvsfD{ zEdt=hh*aUjqm04P!4~nz8YCtCCF~@QP3ry&<`Jd`u2g>>HYWHFlhIdrbnn>?AwD){ z1v{;w@$B;9 z4E#m}xef-Jz0W045vFR3U!Ns(cH1k&F_sV&%h`Ino6azk8 zMqHPz;FE?o)thH+FI$s?@(8=&f#NvyP`t zUQWWV|F;0)&p-WL*K80~>(Bwk;b)oa3jxrG4Q+sXXm3JR(m~eIgbulgqbEP%w*0S z|K4!q~7xgSt+>CXWG7|CEWNg3)Nq~=)RXS%t{S03jYc;c#;Itg4 z?mu!QN3l)mym!OTw6VPm-6)B?sW+EBX+r;$mkvtdN_`;a0b5IRnK`df>JD92Z_)(c zfW_d$Ouz`zOXj!iyAN}A*)#g=;k=^HU%q{F_VF=l8%=L0-pzrW9$D|II5|A)Mk50Q z0=`Y>L`6i%n$83tJ``E@{%2_m6JH30tExAXn`GuM3IHC^_=(DaQHq5zP3rkzTfmax zImO@4FC8*_nnYeg8@zIRf+cp&M}IyqHp@?%I@QHpD0qlLl?Dy8lBH=!+Z{ZG^*NF} z>#&<^!TR{${7jLuh2v9w2_pWbJy;3HASrAg%mWkGp3vj)sSXYyj$I#cGl@U(r?It{ zXRlhIu*gZJhLw@u2eJ(e?Mr^S*`Q2^zO?A!j9^b5LNQ2}lsL}QGg3SPq>#D32jVli z_7VYwY>H59q~#M2m+LJ)T`re~J@haR%LT+aV-Io${aOl;iShv$WLYUOk;-5l80kAv z<9SQh+jJZv!aDI13~CLeoXuU7tug~T?5yEwH3?XdODrS$`BRg$%rgsc=No}jtBY%< zk|s^Ih5Woo4U$MMJD2jaEzcy7RKY&HE$eyf5ptP$VecYEUvhOTS5^5_KjwZf5l4qu0M z6reDK<;lgRXd=V~&(#%Y&VNI=(c04g#@U)$z`uu4RIo{7w~7;g(C`r> zksb8y5fbet9AvKAds81ieryE$>w1K)n-MqeOFz7K^r4E+*Qxu5 zjzUj4QstNb%Su%LnKRYLgm-i47fSyyiHD9J?8OYR2BT8@VunZm&$z4V(A4Zry{%if z4)!~ZqQvyBTQ$xEes0``7ISUu&Zm8cV1MAb@^_7=_cGZ=D7@|t_)o7sD03w%ZYSBN z&YYf?>lmZLb{qt>!x6Q~lMSVfanX)Dc@*0tr_e@Sk`w+}(fH{`KDFcz%xhvi7*Kn1 zm~%Jt(pRtC?+7u8x5_5N3hs%cw6P5-1r*T{g?WH)Owq~bZ_z!&O$IRyXb-|m$ru)@ z`xbsxnz84SY$5>{A!&dJp!?cOv7$@@Nq2iCuojWgRT6*Hg5&q4C*$1fMg=P)yI5XW zEaB%voLQZfPDfzUY8YaUyywqv%i_Xy^nJ+|cHL%2+`8;vV8hE9lw|IN$p-=l z`E>O-02ZaATNzxZ`;AVHkA+R4B|qpYg@^5#ftCF0etz7SGE~yu4snnzI}9k@`%$Vj z+JWd36cUPjC?lS@9J1ANED@HsU{(_D`l~f{f3w$91}n8W+qZ8oD6kAE73hvHqg|Ay zW#b?{h2l!deJ0;$*DZI53mwS2!*1(~^Ik_~ZV}P*9G{2u>jTsXwd&3p1~>SK;K9t4}tefh|UX)Of=?n0gZ}yf5CcXt4LckkGf=RRoucGAyMVZ z4%P!k_}-^v2}h7SqTI9P81uOaDtWHMhE26<5%dw~k`vOXofmj-qAanz!$7k1F{K?W zI|^NNl_ku=xn4>1aEuM)_3YE89t&z;FARKo=&pXQp|e1n08qrN>+VMi4~^xMZCa!5m(-6+&Rf{e?4DaO|5uG31g!eXkhiq&$Ot& z8)8DwvaYsb<@}Ea5K-%lkBE}RwXADZyMtCJ8vWk8F=Pw?7Y_6iu^9jH&VT@@C$Ixl zV4Eva@RLi{r@nnzt>e`#WdSV~<#Wr@jpm%odN#hQjjYCkxR`zkIBAXO1gss*EOt~; z=*0@ouy}H0Z_JC}0g1KZlodFkxPnr^{jB#w#H}1a4S$Y|m5q&|-_d{nbs$hIG=i5Y z*=?sgpSlbjp<)W#e5xmfLa=bM(^b;Eo1F`6W}WM<@`^h8g3_&Wk3T4$H2S@HUpenU%nQp$j z&3;3;%)!VvdR^GDiUikVAah~`xS2y_eKN)()6@LMnsEf&X5djj7wkt3uKfO85~7ys za*yJ~bJ)3%`;U9}haS{cr@MOtNZ6$Y$Uf@hlTf|!75wIUT!S`Cy2-)wJHLm>$AYZH z==kJ}*SGMarpCtZzb;U*DUuTMsASjey?Sw{CH;tJ7`o*IafT?Rye&0-*OzSpbLuK0 zD&>9cS5()7Onjz~V!JCn>-s|#6f*i8&q4L<91Sb@b^w7;ALh+sX;Cv;kdUL40y_{X zy8x8nqFJBWw@;r|e7tW)MB)nC-nI%}OHwg@H$M1#{)9^Yjq#Ll@t=ziQ|Tw@J`s`a z=>2@!9HWtBPRKUYS{a@IE1U&okllyquf(%wx6Zx9z%DW4iP-fN2sw@fg5ws8n+-yD z;#i&ysp6h|(L2iZdl<7CsPa3-mfK{K;qo^*dJ8XzS?+LP%PQc7pO96V7fg%bM z$TVXtOi{>&4%+i#dfw`-C?~pH#!Us#WoKvcNIB}Z8PZdkUwM48E5X3!b5rGDI@>4H zU6Wv|N>|%I5w(--H1!UY0fC4;JqUV5KTeQ(c8@mhSA=YZG#@@}>sI{VLCz@+&@mR7@Ne8-7d>;F* z?CW5HRXo4X7v6VpGcF}Zf-UtD_-+F}vlVjw;4M}XpQ+KNmwQryq()9P+h&m28$Wwf z;r2GufXjf$2VT15BJ2Zyv)2C|qq!_xX~jac<-J?_vzk;$8-~>)KAt}T0|Q9AF;AJ4 zgR%#TK*Q$1(4AM{VS1lQ0{)f>1U6`}C*Cg90424Eo={Gz#(zP6E4nW)M!!k$A}7hZ zk^Kw_+j+rQ0`BDc`}&6Qpzj)hRM7r}TC$*=Af`Wd&|%0S4v8u(oFF&8(RE(GCihI3 zy_Hc&)mYR=+-4sA<4G729@`FIT{LV|qx&4UC>V|{ck#=W6c-0kGZ+odyMhdbh zH-D68B`Ig*M;r?%sR>J*P*aOFC*a2F@bKMu_PGV(*j69^nV&_NxV1fSd^)G&haQDl zsE!_9x~*Cnl6SXx2Wm>5Ei2FFL4&IggQHQRDl4dJ6GA*kqTQn20!ZtobGPBT?UZp0 z1o`eUS+((nvw6qJn^y2uVrt0glf)i0X#;Zh!fK;t^?S?*;N0ORT zU?oOakqc?$QMap@jY8JSEK1X=G6+qDtbf}03^Sk+>exN!;H{IFe4W1KGrwvr%(rZ8 z7&pRjukMTT6+hpaiEL%JG&+RCb?i<*77XIC@eLogBn8gXGFYmJtl`o-@99tVpc%EJ z$s$EAUX4*)14!IhHg)}Ill2}E&l=a`{BknC23*WPGjn_9r!@dUGJcjLxbwk^a_R%$ z@z1Roa0FQZdVKcN%*g2K_g|$4JHj9gwa{g2lgNUsrqB=oI*cBz($PdIoiN)u*lvEF zq+O?%O4)=&d71QWHB_Fr~a2=I9-a9M#BI5UoyZ}Vp+u|2e9OgVTZQ|)0LqaJQ zQ7-fD-+%dCe1Kru`PBi?nr;4CvMKu&m_#zp`6oxIjACpcfXMU`vb6G>q~AMo4boc= zd^KcSH}(0O&)UEKpB4bAZ};ry&)ec($Qu9-|`pJ*9Vc zLFosIuy`U4dZ9y(jrdI#Iwfb&`Xw|mqO4joSlGKJP4->xjXM^c*ENSROOx|$vh*n# zxiHYys~{LYE&NT9aeCK>c8A`;*MuyS6rMI&(bVh)P!3Rs;Vtsaa#!zz2w~Qy2;RSe zJl<;Z&E_GeC5M#$rB4#WjTf5_RhD`?@4m60-m-*;oLD}q*1KrvAm)C=i}^D?KHJT{ zm(pBonvPg-rS*Dk+Q=&O324t|F^}Gj=N2A#WLPxM{@OP-gZrQaK)K%+NJvW7#+{!lE4>mGO+O)H7dPL@ zjZ_J)_)n#L2$FMu=NRt{WkNSq=DHdr6VEKI;q}PSh@Fg5K=wzSoW@`z8*Yn+`!9}1 znY8eT!&2k?2k=p{@3G%tMin$Gp9(IXKW_j*TJJB^h@6M=6)lX$WX{(B`49qol6Sxp zG_8jgSy)=i!(g8dnTrn7!8Nk1u;tMH78dH1&B-UY->&VI@`?yknELvUd6?<*EUZ`U z$Nv0eKa3=awJPA%Ug7p`x|);U(NYXtkQX&;<=j$Y?Mq&~xN@>p-+&`3?i;plb%B)k z`$yJmaJ9}fshv2PfE^Lfw2d_{AtR!4K2c|LH~YbVpz>48M?@b1bc0Gk(+#VKgjN5E z4U(;x1$vxP=Jn5ADJVvyDxYDWMI?xgqI@jMU%>ijpY{R79PJ%)6klU^3YGeP(VEe} z6_zl7V_*F=b+>_bg0+>ECLp_Fqc*>^Y*E$>w33@&r%22*)Tn9PBbykMK%X6FBS>F?%nnm&oBAffzhh$vU2_U!s%*u9PNYPD=&fGM~M@@>0ZRU z2&I3Jxq^PYsDRXn#vFU15n+S9hbs39DA(mb#DRD1Bt^E_($=DUfnk85@uFU8q>~?h zj$cvgC%f?&vVHwI#zYJlqyzv_Y@P}=dppvKga6=cbcIMew}{l7EHT-ols%8-q4P%{ zk1H&B^JW!1(EaNcf6^x710=VDW3bvaWqnAbdN)4wV&|jWW6v(H6wcSo?jV5!G*#-F zgifGh%{|FENQV!Xt9P#{fpA=6%ZBpuL?IM>;ovg$Gmo(9@DF0H0vSs>(Zn=#@C>?H z`9?jON2wUnr84%ltBgdd<~{VPSQ!o7bc@^5S`frXfl)To7m#;=J4m_J-%Pr zU|*f@AlqftcD--VdG?+)JZK+vX!yEL-3l0>RStpKIe;uzY|Jm{{81v(=b`}FP#@SA z)PH{hux$^}$scY1>enw3i5uFpZ`;XXAC%MN5}nqHUmtq*A8atKd2u0tEy~VQw>j6S zDx;U&=8uxgaesE>l@`m$jOhQ*O_q^MP!7Yr^eG$HgGV9082BY~9J{29vw09DLhq+C zCTglt$_XH%5Fn~T5`KR*Atk^&LnyGkt}> z;^s9N{_g`m%iJVAr=X#XA+jNK?G0R%5?hp@X5d6Z zWO94=3+P4>nlD+pRJ}|ziEMm4(4~i&`q5|j3MB+%%|u{a$S1bV#jiCxNarFc#J4_!hF{-cGPtY~tJP|5hR>Gh85@Dim})@6Db#OLS|JRTZEbC* zpT>xghhb>Cfbu0DKMt+j6c-l(mbK!I_`NZ~TzHJIi_;XDW`~C#m0e=jouZLTSh{#I zxP$B)wg>dzIf(-&V#X6dTH*WlncVv7P`^va9@h_J<<9ylLO5ITPu<>(|>a&LKYz`mK6HI19>h*MyUEqHrnuOGz^iR zJ2I$?`W z4m0&Q&{r0X^j9brkvjEo&b)cv4wT;*4qD-5{>T5xDOc}D`(e3#V)N>gc6hHDIN?d( zc9^dy#G-V6{(bNk%SjGX30(k{2ATh~^!vAOlKZ`>4)dC%Gd&n_=ImL~^Qp}Eb6NEv z!Mfm}W%*4`IjvPv+6g{8;J#Ne?i2t@POOSn>%h#ZPlz($?8NysXxMPinKN599Ke4j zjkx*txVUu6G|C3W)lYT3C)?#5XnuP!x-J)1nK-_f1^KG^p38zFSk-^;{{4zkzh^(W z$BpokwszTC%But%Q2{#5o%s8F2A9k*IVh!(6;IDL6Cl=;zEuJdV_fi47pv%yfv`ZsTEMgy3NHz#)mSssMI>^HW+*2cYI zyq&-%6E#be0_yI4apA|NH4(Mp?Gzdhae&arcn5W zckcv4gGlMy@n+qXgw0#G`tU1^>t~LNVXUZ`fj?$}p=hLmyAC#HubB%Kh~Bq%k}Ilk zAdF(2UG#1;-sD+N$dG{fn#}qj_*rKx-#jew_y<1CF4bbpTtQm z2naZ__zf7T>|V@By^$BLTE?3bWn>fXm;ZU9%dFX<`sWK&)uQm}P!2G#~R zjne^Y@|!0yl`}%0I1Nl*@=NBC%eCKHiyWP46>cmEql7rPBO<)kBLoxRVNEY2Y?Qj) zQ@#SuOSiSLIYUTfizNs$GC>2YKO-uPpt5SG-e!_y6xchUh{MB)B3l5;&B-L3*iAo!j~8C z*swu^TAd*HVn@*reU+i$Tkdey)?*GjkqYLXUM7;H@lCh-(v-CQCEcMqa>ngGbiYjhT9)Q?{Kiv(m;lB#mB2)L2S&M6w}H3 ze$Wi;)s41$gIGs&g#|Ntk~-fKHD8oZy;GREC2PL2JJ8V3D&&g%gZk=;)OsHpa{Aq1 zZr6$d#3>NjSI}HC+c|1gU1gOc7@%=B5K%IGk*PkCI+=A`TL9RLYx0R(?D|Ms!E_n^ zrv1Y$C|Vgltvlyr*#)?y^{&(vwXMMHj5;o-`6XwnYOR2gthNaF206pj^B*>gHx1P523V#(@k6EsP*N!l~@zKBKU7UF005+$3P1$Ba>g{j9D|XU4z0omA0VL1@Up z%SxWu-L-s(U6WQM&yHSPTz7X9IRLTbvuwzr(9vmU>|e)Yh$ru)@iSU|8i=`Jqd0e> z7e8|b#Vk!7{gP2H(0>~Gx^XoI)#~=k8m1gnBT-OWy-F@Q=491ey8?Fmm9k0y()iH zr493(aqRq?F&)F3p2q}53vcKX2M;aDq`vJKPcd1QEMoW`KBo;@D#+0**RHK{wnMG} zOzqRk4#_Y1dZq(gJOo_3#tah;OjCTZBF*R)w~#~5_MYojFQv(r(-7L6p|4lpzW1vn z72)#b1l#CcF@`6I?y;d6%w8b7!5|A{f%(mUPEA&5B$PEUUvV5sI&`R3zsPQ<&RHcj z+PhF#ku3am# zA?VHx9%BJv3OCYJ#Lk2r4I)zlU2x7q99^r|tZ5QGS3EfE#GTV&tI`Zf?CSRtqD1UT ze{TbXp<3^~%CVdJcRAv@P;AS%Rkf=b9VAhw<~J}&F`vVpnfVtMc4VqdL8JU+BVV}0 zlPqY3EKM8nhJRH?-NYH5W+P!X1|3Tuk@f>wRu$k%oMzV#9 zw3yquT$)$VvD&X;=Hc9|W4?Kk40xh^n-~y@^pZE;HXfxS-;R|vJ&&-Fb1@N*({hldKQ0zny|9d(Se}BZ$bl=5`t%V2+IC$xjAhrwg<;07MW&ymA zY_^OvbjIDVVg)eKv!2|elD9{Zn`|f9l6*P+Sg27x20#o%HTw~j{R&<-r{kc>2uAkq zBvNmoFXOXSe^Wj(FAJR>njx+{atD-seS~BR1$7v2%j)63vn|V%cnPhu0>nl%; zP0{phgkB9>yp`Qo*2(Z`B`gN{Y4ZUNO_l($M>M%-1r4JByfz0=23!TNf4dRQ8!Pc* zm>M^FiPVwD$HN_{^Y+|ZG}E%Pku3z~&wnz9pb#7Ql#6A)MBM9v?Kh<_R@F@k8mF=q zY(RFJlINJ??}giYZF}3Hb;v$*wT{4$i%s(`&1nNJ68a9!1Z*Y_42s{U18Ye9js8M* zvq!#|Pp{Q-Z{x_@pc*oRDOl?=RP+sPFM)u`zDUDNZfWt_6zpOcD*;-cJbEElf+TAU z^GBS8g3I+HVE?3&E8!ZQSvw;_aFTzUhTOAv@1r)Tq5WHZ^R_DcLK7EgZu;{Ne_mGF z1iv$Hq>SYNarX>(s*1UBMZ!1z9+wO6KxMZOY?7m!U^Zl%3bMrg|2j6B;P+fKe+b?i z9TAv~uX~&76H=XNv>wD9MPQF&Q}%5v?1v0OHZPfvg)@UX+iQgduO);}KM%kMStIs! zC$Bn-hcf;N?d1A_J{QlO6YbjCdB~g;opdofiu{sAlJhk-bJ8W*U(fN5&}@QA|k#VP?A#|iaKk}WJUv;`)RNE$bev+eyqkOFE>n1<+=Moc-414Z=q zn_Qu?R3#I6;N5%o3>Sa?(G|?~#Pw2oaQP8^g*2yW+hEH#$a0kZUEZ=njy}&!huwGg z`j@_b4T@DCaK?DNbXZKd8*GFV#yBAh(SEk(mg5_#>dOC@e4~Ek!RpddrRqT0$?~zT zh$7}G+);`&E(%-u8QAJ=3NMb!REvQygXyE}SJvK+vG1@>7z%NG@ncz81f);oq&AGk z#?hHx>Lpzz*AVmH$PvYCHQ@ECnv0*6X{onU3Mm)#NiXMYXO{G09|Gyxdyi|{MCRHb zLN)f(q82U)Msfe>PoQ~%MK^hlWdRJp;_3{4f7u~h&x`87b*wvu{pr)3)vH$vG|MQiG7^plR_8+4TIms9 z*t5Xe9)_i$C$P3y9kY2=A}~=o!~;e-G|=~-K3xo2T@q28u-o2l&RIbEoE^Qj<*+(0 zAjFI*$-5g?nk%texpwdbBsdN`Lm?E%DJGBVvW`~dekDr#7f4qhqyd16aO~2ktP7@Q zW+Qu2l$+2Uy}LR3h(~b3il33OWvDbX_4Klpipd$g#@$$E%w4Dn?7n&>EG{5Z-g&y) zRy-OD3|U{HGpi`=cHx4gr_%H+e$2HJ0kS=(lhh3+uh?n57QSoF<5gx1dLD$ltE&xB z5yF(#!@slBF`yVElA~#A*ZgI_toR9O%X*_r5HP$|6^aJhBSmEqY!O{sw>*agOFI#f z^Q7WFNr|z#fiPI`miRU30ww{{Ef@!bm_Zo0@WYY0e@f0mIu5Zye_up*&5i68PB+_m z1$(+$`eV;bzj-t2{zc?dO_4pdx+Rbkx`*yd4Uht9)Av1);O{G}*`tNuf9c2D0|~}U z-hO1B=4wd&l{9vhS4M4Z?c8tWp30?Mk$iJ0i+}bkT2`Mj7kTrMq}O;h*3bwOtKBI& zOaoYKfKlH(iN>Ty8`%7xA?UrQj<0A(S ztcKKkv3+)$5G{3Ykco)ZMj_FN4m?Q}Ysz*Q6n4Zh&I45QoqqWW%x#(fq7P3RaClXl z^McQj(!200_a?mkrW>hhm4^AXz_OTvCMH$C24_8W3k;7Qi>gG52Zi#jKiNMRleFXw{am|Mn^~tlLxoF?j#piZL3)wSv!(w-x%B zi+hS=Sm@fA7zyE;S%d$9rhoP7Rax4{Gohu6 zjBc=7wL<%G*wM>?n%Qjt?V1*8P8|_5qc+v$XX%(6+~WGUJD0{oZU`@M50;;FLWN{n z^&z085eL^dVmrIRpq{M~7h`u|rt(a$ArR6V?@#&Tg8zgd&#S-%Px|H1fBv%jp1f$04hNqxE<%em&;@`(3y)+5;c!n}Uz)f~k7e zN7k=Q3%J2Xc-P8JnBmbdv52QNZoultG~8wWf>raou6EXl92hAJtv>WnQEEQaQMZ=+ zXaGc(^TZrmo!e9KTWjF3YL^NE<3m~RW&^7b3|M0nPuuwH($cL%a(Kh)Hco>iYLmE0 z)n;oE_X4q$FX(E($i%Cb z@smelE|Qh~1L&S)GknAt9G&R za22JAT=iS9`i=>9WRiX(voIg_KuMZVZV_`*3aK>oL)IoWw|f>9a}K7s1j)4n zDe9)qnj>z{gI(abAf|-|O9N;VYr=G{_7A76voe z$3E5C4Sj}4*3$6b#gbP$RkT9Lcdh^42i?eb`0+WLUm`)!*KcMx5ZM(Uy2G{3Mn?5H z0HuBzbhboPY^>T2m&{Jw0e!a-x5S01oloa4yUs?mj~%KLyb;WXF4b6d74X8N_OkI87Rc5}jVR8vc}aF|uFLrGZTb2=)QgIW)}g5Q zcv^Q&r#Q>ILtXOM5O?X~T_!Ptj$`*wGMK-Md4GHT-yl%W;_I8XQLa1s4)vCw7!=06 zM{EQFyI$`rG%*0ONmJ|%{y&c&J-UY6B?i3MJTZ9{vVVe$x#>SY-W@(pM%GRL+y7m= z(Nc>gl16x9>|P`HP@Fg{<2c+{O&WzfgUA*ye>P0QHoWe;v;Xo|RFh4*oktR*&3jBv zG{Z6puA(uBlqadN$ssGLpZJEy_2hWK)XkqQLo7G!&5u{ETp|8qfc`TcUVlmI0Q-@t zljmrYiZd;k_nS;qqg(^0zETogxGWMpVVd|w#_h&nK}H)Id5GvAk$@|_d1khn2eL=2 z(}zE%a`u2OtEnFD&}RfLdurA1+qB*KVm&&Q+}69!WlgJaaTCW@PNjw}t3bQKNF96h z5;qI~=hvFws|jk@u;J?e59k3rqE0919-q%$R{(QE0tQ--!eYeN1iHShl`#6!^ppV! zBCd)~w4-zkEI`8j4?z-sTsg0N*~pc(m3pMDOJKgknnB6NQ=NmcP( zZEP5IG8YI7gK0vt3R{Lps=j^EmBNbDvDAeB*cptV=oXqp{#xY@sK3w#aqGbD=6u|& zJJaIS5a2b-1zPC=5KeMk+&AHMNN{rQM((Z2TKso@{{ETVHR@;hH)7(7tXh~5dXf|2 zhpOo~{XDF2M#wI17H!*1nbw^*em~#G~zqhN#HZ?#yx^AH#^rlEHpmn0q!^%36|3m&PYecIQkmAO2X(14KXXKFfwtmy}OEzB9KW9|}&o zBt0C15=fSqcbfz(IrUag9B1yn2~X%;l4aZi4rBslOi zXgjvl(bw9Cq#!#Q$@7L>G zj`KK<6JKZ^&$GQmR8XDuLtkE8DlS+mL%=4)tohtnpZE5~H;~*2c@Wg?`+O$5yDXCB ztBhVd3G%7cL=V1E(pRdJpFp0R57|1L;yQ#=jq*2{_LG=Et2u|0H|aKKiY#DQ+sD z*WYL8C1qJTMW<;hJyoOldJY~vl-&!C$Y4O$b15QdH#irHn zM~vDPJ2&&y8!OC-bBrWAUg*-Tn>~YFmohM?`As`{b|pXFq%rB#qwZV5Ea5K%%K*-u z=wEkLP1)Y)8cssOLN3iNC}_=Pd9d_RK|$60)%dZJ73}8BF@NI)&s=l{cxN9irB&Hk zHt;eZlJ>Y_AP9@7GOrI310e(xLmmNo(eCVcF3JXeG3X2-2yCjT8vFg$^sgJp>h;U0 zB2K$fM4`wz>?I0Kc{T9AW{LX@EBdZ{C110#vnZ`)rfQ;v8yYbD8xJLfM@EWrrTy6c z;n{~(xErDN4%`C10rExd=9_76ycdky+qXF!JSo=VY#aw^wRY@n+d6(Y?QPa_HYDJ)xgwk1t*!~N{u%kXrvb8{+B>J`g>;-~^;PfZvIa&*AB-dlB*dEkt?d+_jb|Yj;b_5Sju?J( z15W+|@=2_(1?vz|Jxy@zJL)h-D5CmL9)A#02{)(=Q5MWcjG8yYM2qwX)C>kyJ6%@J zVG;6x)>I}g5|5s=LH?*`V2~FsK6|pL+Ngn^g00=nK}rUl;oy&;r|EJ6_*6dGj13sQn4)6tym=(9%Rc>^Q-nFLxuCLV{r7J zeMnj%FH2_Ksk zl1zba8uNF5wqd5gA9OkIid_k{(x(|bbpOtnz8gwN1rl9j1EMf+9OmHLAOBpGY!mYxpQx=&CSe;aI;u_h%hhPjAQ-9 zvl}jV%adV4a!N|c56*kr!^}e+VTwZ^bB)#a0azMy(zhr3#hnQwag@INgB{ceGYBYZ z6I|%l3JY&3yD(j1Plfg~1QU{D?B{V>zSai!-VR5Iq3ExjYFmbWZqKHpmHRKsTBLV* z2Un}+7EJe?V!zovZ6A7tA}T$V+Mu8y+`P>7g>ebmKE6%oRh!3wKE+zn9SWM{)f!7` zOSTBVFr`1B_O80??d4U%9lYy?Fv>Qu`(d(n%mpwKcyvs0mE>Su2iopzwf}w9q{guR zMN?^J;6v^Ph*=l>Cm00Urmd}}o}Jf`Yqg&SlI)hcFl7{$QFB+m z#c!n-V@3Yd?xSA`8d#_5@@P9gHEwYr%<(!iELq?10oz8|x*;y;fr3eYK=~R5T9fZ1 z5h2~6<)afD)?7|DA4>#OddYBuXRW&^d{XNhWvn|ByEb_3zP)kI72Bpp#YEci?A`U&?PjB5 zx)Rkd__lUUShhyEExxprhGo(9Pq!k#P>1K|xrlCuT00o4b5$kkIYs=}sTBdp!%FvTpNqrrab3E$wxa-8z{HG)mJpYDMAp;DSwIvq)-p+%g<)ShD zcS7tp#`9;`C2Z*dZG1P-0!XzJc?#!mn=QRoef5!jGJwUitrsy+*|)(YFHp8pFxXQ& z0aT{O<+uLID@bRTSej@DA!DZd?tz-es9bM6ufPORNQf}*us*o;h#hHdWUJAXsU+e$~4QTun)sLdEi7KZV%(=!;VA0xes-Kjc*`O1&COE4HbIJU)*4johl z!-p1l#uda7QKR=c*{I5eGIwp=#Q4S#u|edcXfn@m0UXJ=j^09*Gw?HM*4iKT!KpQX zQ06@ifG5&nT*;QL@6wxqnN2^;>lMzgN-RjQB5AvTiNuX34@ujm`1meA&%A|Z*n5|~ zN}TOKOVJMqZ>&0TMiG~}Phy)a71m&@$u3E*>^q8oJkY8KNL#K zV=RJ{ezI5eWhN!KPRuDr_VKbe@N#=xO@5}1|EZ9bak?t zF^0Hf;@XB%?sFR8yOxfQUCBvCc#IbAb_l=is@$6L9}(-u1CDf9948Q|f&Vv(!vF9V z8kEOhLYCTY4+~g5&~VK!wuE9#_D+Q|GV2_3bLhPj9(2o6));S%Yd2ol8+=#L-vLnj z)i4aJe0#4=#Fb3f-tN7r55R^J!V`d@iK{RkGBU|cIs<5yQ@>Qr*Aa-KbNeDx2- zIFI`f)2Y9N;$&-^(2L(Qclbs7FNLDf;ow~SPNyP@rnEgo3IQ;UWVc>@ z{aV!l9-oJ_e{5keHd6T-6LEoIR%b)JDdBu8jJ{UYZ!QTijt_tgFOnEboc7}nAHG>7 zwT+AYaEM#KgCJxr4_3qm}+cmQ44rc!v@IwJkg>$I2auXXWbg~ zg0tK4+Fx1qwz=U-kajvvy~WbR(~X?o%EzSAF6Ih7P$H_zS5$RX`C_uO(~}!&&H;!q zAiz1sMEQ&{H&Yi61QJ)wE}fSJa8uTNrWzeTbLM^D_sj*v>Ganp+}35L`0sbVq7}ob+|>ov7u8cWR(5V#^*2lQ!+T>fI^HOSGpYNt}UxFiJ_vH6_KZK zc8ji1bEXxL@Mz?e&_Vp%OWB+b`Q7FYl5<&gus;zAr6kGxQ`FuSI30l{1Vy4srog#* z-H&8Sa;*CFneiSoZtq{IAQu$L-E!@&ojSGE-aAog(gCZ?`qeEQ!R#joM_w-<*`r|qO(1aTx3oEo^Xv~{E3z=xafa+V*MEm%Pp*O}xK60fx6tmIm4RjFz z*hwelXMst{BkDB!341%K_HHT-i?Y&vDU0{`n4>Sk8hYGwjE*x#LivZ(8%$R(CNe|t z9UK1&iYiF6ZYfx{^XG164skC}V&PHDJaA=Ah4F&D7Lyc(GC@`;rYKv_IGXP1=(pAr zv~2zh>wT_&8)CFj5+SE${@N4~vCU^<+`fI9L>67Gj+CCTi5|4&C`hz_ozG;+dFIUS zx#Ms!`Zj$xWwG#BRPHn8vYR+jCvE^1YE$Y0(Bt@N3lgZP= zY-^^n4pSmSfp;DCT06liRmm1kV+t$EzXGIhi*9u=H>r(qo4rejXo8 z>K+PoF>7&4M5jk5ru>}I#1+4)4c5#l4M0Y70@(xFir(%(v0qI^f}I&_a6_0!I>*1{ zqBDszSef=|?R^6#rv~KbXLjxtA!&cIKLaVrk;hi>x_R*%1A!MvGI|fC>=$(XC$AOB zukmm_y!lC^?G#KiCtVG12%smdgKre&h4+s#4YIlxAJeu}h9795PA~**#`{-4Ao_d` zYEBI-5!rA$y&uyprqDUVutk|r+PQmz#cD+s$fE{~Mw2j|_zC+?iYX(GenVN~-I8Zk zlFhT0*Im18A?qWU7yI_a>AjFs7qgT08(;ahRKyR$WYgrV)s;;e#YRsxzhCj+&MvYN zHdm>c*RZ-;CavLKH_3~X4MhXRV~or=C?9DBF)0=DP+1P)U+lBimI9;Ro-_-fu!@s@39q% zjiap)Uk_rw>LfG_b^!rH^)m=P9)O|KZxVO5ufb?4lt2cwQRa)Y_PyGzJ9n;!SIc;6 zjE?6oU^@X<48L;vWSH^~E9U*g!!BqWvKDyWF@}K1834OS^nYas)URWlmfKh29>sXM z5(hQ}mruy-TWi;{fhW9Z%-uDuV_i_^9DSqMd57zgGLEEi6W=rQLS2TpJ%cIY()rgZ z7rtO-x1-}ys>{|^^H5zL@O(G(~cecBDWPc4ij0(Zs6ZloOU6^$>(I@+AH^ELiAMrbsc z-Oq-UUx^R<{@a}3@~7uFH#t0O&9_#~#xbs2N6R2snu?+A(l{pRCRm%dqD|m&5xBNK6srRYrLrRXfRV++*>@BUq*&oN)bEQ5gLP=T zo`;0E56{mnxr2tX{c?0mzeH2FF@w_OH((XT&^RPC^h}9A313WzW?NjU3A-Es(->x1 zB7k<#rsP`XL54$C>yb)4%VE84;$EL>$Hc4=!l{&B#IyAPZ68TnSGd@oD08RTv*S2<|Xe?x}Mcg@tRN?v9h@_{0vZA|BOOM$t(<#2v=(2FE zC2GcUe<*TITnjKJ#56V;e7oclIcJPxTRvL|Ml}ntscYU~qc7spKK8rnb-*b0n!&R18Z+Zky>FFCUKpBwi zT!6Of!j|ESvMH!p*YB{Kh~o?2i+vtbtwS9EAtL0GY*F{JpS~Y(2Fw1%$=Qd2Uhp1)S$*HvDA~|Xc*1P%q+gOelbK3Y41urbKBY26;LL~Elqn4 zBq`i?PP6jb+|Ksp_ryhm6}K~gf>{WN+N3SXsDD8HAe(fGnp2eR)??_A)W3@tA-T=W zN8|zIn{|zedXwI0+&r`Jxi$4-&^zFMVI|a6G_=p<}U}OPv+qP|MG&wS`0^VHZAxK5kyoN$8nO$Zx-wOzX z&ceGeFHgbHWjdn4mEqr=xK9G5S*|99JPYO0K+>R<-+WZ8SNO2G>JYG{V(^^^yOLhE zR2GtZ{gs0DdHS{0`jUC7nVRs1zQj;7BW1DKH9_eyFs=1#NE2{n@){v&(B{ky>H%DW zMe!SJoa8aO-hVff!bFTfPfrz}jTJA~+?+R;A`PwL@WjERxp&R(GBTvV3?<$D-7WJ# z*R9LmSSK53vU_TTIy)aq^8|o=0uhZ`^ z`m;smmS+GkAU=>qN4V?El2i^dz`%|&ejRseIF>{)zFY5Ut@yQj*4i{C?f2f+Gy5{L z9P8*Pd^d6vJdhjn-ts!J{AlS2=AaLG_Fz>TCMH#<#B(IYGnE!!b*U) zGx#FIO5q58@?W{Ki1N}g0AgXjafG8I=n|6enkZ88{Fpv#e){T8RUcRjx+i-&?n?#+ z3i3)Tq=XWWMXyvl=bTb+*%=~Hbudx^4W!tjCL5nqm83QeK7tn5~l@T!HZc<4?+!Q|j6;LjoO zfYR22Ym*5ryiX-3i=oKM*0ME+C!42AV^+k>TBOlA?2r8eG(h#| z-eeJn0G{C6=Uf)C34Gc}BY4z4fez%jjccl^Y76F?*Mdri6|4;Z0$*?5Vjni_8FMBO zk)&B;ot*5MB|ol*(g3z)HJo08+UF0iwglLs_wWDnw&tQEuk`7~#*jwPMAh|UL7s(d zFB67aBS0yW*X&D%wY6maJ9vI#IwI`%)D#jX$g1WxR7p^HM+S47)d46oJUzO8;bQva z+qG_=HlLVdidEMNmR)NxQtPO`jmZ$7CS3WA3=sd|&Wq<>{U@3clGK*66xsG*Z=d=9 z<`U3LP4F3lER##Wq@{h2jBJ-iRP~81-R32wi7(j}ug1w(QPvs^aH?5d{ZW^UxTC9#OAfd(I?@$Rcse zvmhkRfz@@_BU;o1YJ964e(JFDFjW;5BLo1xKj||EZ20%|X$7y4eH%Bq7NZOc8H~o5 z^sumSyELP%C~(d>v2al)+;B2#?(!5BN$Mz69#0Sp82+ZTM2+Ua2ty6w=HX%MeUye2 zgz3!Uxq#fFI-Ofb3PMj`Rk0Qw-_cS&Eof3Y9{C^MlE0YbTgMC(r zI$qZM1$MJsK$xuOLrmvgC0p1{AGTz}?rzgIFeD!Tw`eTI#kbc5`_28gp4do-4Vlcv zkbx|^mD#l&KqkeiC+N4&?CaMVX)>!8Pi66^{@$P9{H8*QDBEI!*1HwXFlF!_KE`Jv7f2Sx^kO|s>bWKKG=ipaUm9mo(#utRa-KSx z(dquF&w1XW>9N=d5tuLPl`Fm_w4O(scJJ|fTkSiwQg|gy1`_C+jo8_wLREB7n+*y( z%BFRG;c`W~bRdE5#t)^%uYLh35w>er4~EJk32E7-6vdXzc*BE_K+hDj4POo0q20=l zTXb*vt{3-?39Qy?%weQQ-C@T2eno`aF#xjuDdW294Zjva+D@llykAQ!y?}T_G)eop zf#To2^aOF0uRGr33$g3jM+cMNa#R7cQhScnNYQ6OsYOmK*|s zgrlr@GL2Epf470%aXB9oa2}v;q>i zJ!Y0k`6)F^NQGfctk&6l69_-$EF&yPJspZqA*=y=RE48rqIQ)!P0+Cf6S6w{;*5{z z%sy|ggvabg>T%1k`$+Av2ws`y2tL@ zXO{O_9C*kr2U`hG0D8c>UzlhX(^P8qhYqYpiT5N4XJW==ql#2)Jk_6>-x{{$J`<=V zpPmf}4?M*yuRnkq5Ja;MX2uZGe3%n2dp#v%Whn=Ekl&Y(VMKdFV0cU(Zd^z4t#63J zncZBw>>aogGPv)XkJt+m$U1JA_F2dUvi~nV5!jo!^L_^1Fmcf-lw)CGHKl|Jy3lp{ zw2{;f?qqY9f>`7;MB?Ed?Jh-^Vm+uLq5(fY?P#V6@l3onSTG9^;?nd^TqkIuW+`I; zggjNC4K}94XF3WU+jk#IIWZ5UL(Cd7LxwP|9?NJ%T|GfttVt+P)rv&5PTuc1n;CH$ z9jn>__{Q2$WZx#C*5&GZXNYziicL?A6>Kr02>Sl%`Ixt8JsZQBdi4ZGff4hGP296_ zgaq?8ti=#j?1(Y~k~GQz?JZ7TpZ1#{XlE5YP^)EOqsvM%t_TI(zP7XD}QB z10Vh(guT4LS!_C`c)Bv)!()986R{^HAb+d9bN?7$)(_ViBXjd6{83eFnsKtv0aK~Z zKvo|A!U8intzO)H$=Dt{zkcSzh`<$G|{?3R~4e_PR2?IPxO~R}4rB z$<*7{cpOcU{z@rg^wLHOaXW=$s5+XznMxz!wC3;;tyYb)C`>9aXbv9lJgRd&!Pt=6 zMa=_nzY@o20a_m(C-#a=A@VMa!c)k{PW<9W_)Y9xIRb2-$5m=A8J~!G_sWqI4Oh-5 zMmw0+x2M>n8jNhuRPUAd z@&fKT_FRDlVt*;5ixpB|&NeITxAl!ywU}K%21yNQ==kC&Wt31BVpssE%IWfoXDUc} z=CEL2PKCOXHFYA=t!c`=KjA$9p(DkHm$9#J>o#p7VZCF4F;=-O%5dPW!0upi0Q{O- zjP91urJblPP7f6*IXuWLuIU#D8yWB*icp})4v-L76x$okWClz5t!68vO^}HbXW?md zaN3iRJ!aG>?Q+=#%a`OXxt;6OrAsr8vSu)NooG@?CV$_UfP!Gp-o5WMF;!BfH_-js z2VE^t#)Gq~{QSgSX*o_$7g2H;X(|3J^N1x6^Oqf$cjwwrqCa@pw@)82(Bcr3_pE^6 zEG8z8hdMjd{`lHe8o0S6ZD7_@S^)h!H#Ug+3|vN%+y^pIB<+ls4aKd@*lrv_u`V9d zd`!>3Slb&4(x-Y=wi)mivxN_fTK2*wgm;3ax4{)Pl-jP!kg24UE91MUP7WLP7j+e$ z(cA3qrH5&o#wR5W@9O?PhCKT#>40q~P1@Dy1|&`lgdC?eFhhZKw5i$?yavKjso#gs zpKnseUuXeJBZlkO7r}US#YBrKtXk*wBWCXF$f@2pG+@8Y2fkeY2g7~7**D}-A|zn| zMG25G6uN#IY%jAz{cy#HO4tF9NEr^9BO_UA|5UZOmpP>=`{*a7(4%l_f=i0nqpJtMTb(7PPy?-g%rh6u zIKr-J7~qe9qp@J3<}y0zFb29dJWk)v%sV7lY}abiBDfQD^=(IN*D~9d#f`ggXUb$& z5FIg@ADmGnDnR|_H`!2x`bYH~*Mcxo$%^m(W_H;sLgEprw?SV3E_K$cp zwYRhG*4ttU7ORook^-8v)vm9zkGZlE!R7S!;Dov>Uw)7^@PT#H>6Ev~#N^nKBZ0up zf1C)@%uE~Yh7cZ%BB`nU&!c1l@lJ~#G!fbbwMp=O-al=6jCT#yrD)z< ztSQgL-N%OKssUzT7WIzQd)u1sa5i*8IN7rk!Wj&R4TA!Lju;&WE`EJ$t5oO(X)j;4 zr`_G^rhobJ<=Yk-L023q!755<27FD3Pts#r11%WQf>1C;ZL@dYV)0(9$?9NySA8uc z!}Y?af92}hkLT-t(JWHG-0>F@3$&H3Hi!g_KfSm*X83o;ozjg6nifjWOj_Hvpk?j_ zi}^G}L7-cW6X#}4VhAQf#GiOi<9UeT2r_C8$ z*_s94T^rRj)LtiH0-e?u4StZDn?|ILJ`F`Ai+<@=lx+G1Eiqpz`}5=|6M$9P+0HcW z$*6Yc5v6et-{m+tI+|(NkzO~`)SP$#8GUl*l8Zg05` zz=Ae+$$yVYc@+prC{rAKlgi4=U(cKYmcvE#d@BBu427#&v4)B+vBe|t-sY~X%pwrp zx+W2su{|)@>t0;?1c20YMgs@7AvU#L3_UH9%1!kh;Y}=U7)AXO&KKYG;=;sk`;}hN zJ!GQ$81;Azvu#Roo9mzP#uIp1k81q4zYH=_)e@vnTYO6A;NZaRRSaZ=7Yf9+AfARr z%-XN+hoT8-H^cD-VNz! zblfcwJ0iAB3+Zq6FpR&jkjMyhPFbh;YzbFZPQ~{Qc!r`hy<67~?egt+a)x-ad88&UEgh>qU;K)f7f_dF(t|i7h&jGks@+km67BbISw;IUCMn%opkQ=W z`+xBa0>)cI0w3Z)b%CtO!mb65zI;5&c>7BOi7>!Rjx+G#8xywNzF~-Q$u&$ZaXfYU zN4k7ktyk_)jQRnjTyUeaUA%PZs}8GHu9OXJkoga;dxWnrqdgZ3%2>qD!dhM<6mA5yH<}LIL5pKEldP*%xx7tuSbpki7NS!XO4KDQ<8Y!3mCpj&^Vu zh8bIrAH?(u?m%)MC;DrM02gn2d<=ODUs@Bq$T1YMSd||>%J*`obNGFgsr5u7y#zi; z2+dmB6|gCQ4Mz=pJKM6G%uOSiUI`LV{K6xw^5JN!oZ|6g&k)~I_FKqs_7`x{I)aNx z&jNMX2?~WLeSQcP}qhlEXYT;tC@Io60&)iWSd#v@A*t zWcRCxx(A)XtmFpOPxLC;<4y=HeE%M2KGX&sEKjGT*zv;^97PUF$EzaIy?nuY8hXaM z@IUfW>HcNsnfH_bZr*H4Bf!Zn@h5f32Qp$huul9`HpW2sVJWVI)?W7XKwJA_2tOx^ zB){Y&ij56C4(rpPyYQ^XzdDP+E4;V`MaKqmFP~J2#7qnme=v5~m4+3u=*OMFuhN%( z(3ET)eXRkmLgn)nFHdE^29gaW!)$$Wn=c4HXSJq_fkotwv4DX7*CPDQvNW1K{tp#s z1W^}6s+e7Lt z)Mx08cOS|L{nnt8WP6A8b?`%*TC3BRbe(x6+TI8fRew&{=P&#elEkMW&DqNm_G^Np3;fFLrHOul%Qp$2tH{mCT}9y6FWNTYBrY$^(6RE^Ou zS`9l+e9lgx@ZtNpnk@c%#fsf}2J|JPx5x_?(%Y5R%t+?P=n@nu6vc+vk;OB*^t&qE z_}Rp8fM$H|P&Vbs^18({Cr&iWebtK{8Km0>O$7jF(zD|pbR0F=Zr~&vbxvT16KJE= z$aV}C4V&qJ=4mz5*Dew#^|{YUP)SrX1TU2_bmt;nS2>7jfh=X{MOcv$ynpq({Eo8D zHzxpFAeyh0Z*hKDTwmv>`j9?on1@HA-K^&<3Bt~2v7x|Vur3zQ#G{{pnxAEeuz*jc z80^_fMSArv%?$e01N~M zYcYf=7#AzWSwc#`euvG*syLg3jRI>&?N!Z&gl5!h2hIK_sFxSCBkR&n;)5ZAt0DK% zzz>fQDOpU6I=GCgKP`V4%8*VT^XEGr4dS9aovoq&kcwP%(L=>c z=}l==o*btlGcz;Ox`j=c8&@vnqjVoYa{=w3b}*aXrjDUs7Vl>6?uJr__f&!+OO}Nc z^?mRNpEUFY)|g;Py{`$ZhcyTAsd~?Xf2ZA?QcN7k5)}wPX3trP{cY`JP0Q}Eg;JLk zuYlJh&H#9*_H`V>T~f4c`Qs-6o}?ShXnR>302&^*He$59cioCHSru{xk)EU z{_+;68N70HnErwPI1Tr$dh0~_Fn*f&yYoR6J{t}CXwWV^V$(8@&H={_#|hpdTULYY%|@bKXza&ptjWu76|T)(=$$7ed0cCIde+)^lA1DAIVibRz$VsksEUH&4~hK6Ri9NRVsw%J!v z6PP83JK{vS%KN5gndgs#r=3Z_7Br`A6gx8y9{h9QTlN3L1vuDy zq3|crR~gn3W(5}vw`odZJCu}9rNw5~pnLqA_*W|*xLj5rTg&=6!i)nMc%0yC;C3d&OtbG74DiZRH$(;LyB{JTgRq0mn=y@rPP$Hyb0$^@Z-3+g>B2D>HJ+_( z`T%H23xATCMTm38nPA7PVecW` zQ=#fHVAb@ncUrGBj){jv3kE!NWwP=FBsBa*40uGsBJEZ4-N*6fJ0$!?t|E{(Nt*>z z9<+ZQdp|65+QFkex9H{A>%VwZbYw1(3lIjAhWESE;R)Y(DSH8wnF0QEg=~hO+R}w< z*dqX)X7o8%9jE(v2d@1l8G{ky51BMjZr<`;2o;P|F?~11gAGPc*`jyonQdZPM<3Sm z4+Id4=S_ex1rJ5sXCAqWNn=Gb+tZgWO${!dfxZdl_=R0Uf90l)T>T0fHrH*r>EF28 zGRU=h0yRa}v3j)(xu;_z8=^!n1tFnsOn*JXC!<`MjYlb9hW=hR46S2>7j2)!?A9dW zA3A@9QlHQb)B7dlirkD)>=Kh0{nmQ`+l88Md`G2ho_LKziL@dOd0^LZgh|o;HbG{w z)2{J1@(KA{Ylkf3VYJZu0y!Jesz>A|kl4J>0<h&g_WmpYn+q#i2Gt=PeE&# zAc{V6h1s+(JbUq!ZTc7NTtE)*kY9A!&#tZ6+eo&wP-f~mVdyvdz&Kto4juP;pC!J4 zLdXi9YW8H?!6<09omE8Zq-#CM+?{gg_HpLzX_x@TGpKG&Rk__~=Cj;fb8qIGN>mV4@ujdG z#~jq-2z2*e31mfiMJMjOg~bS7AlE$&;xMk@bW7u<{G6h6HePKy+r;S%m&r|?iANOR zj%^SFrQ%d-Z-o70CYb829lUo@!tXrM@FP1Q=lr5sheoq$S&4(VysfmNm=2lc{UsGQ z2-&)!{Bc6h4P?vt+iM>|b=n$y)(WFp=){Vp@;-d=##I5FHF zXuN08&8)01FTw3(MMtlT*iTNszBX1#oJj=lIe3~5ot0*{4~UfpNM#)IE)A43rGC!{ z53)^G>og4nFrSVLCQHlNe#4Y{lT3TAemLFvOFuhgETmmULmC@dk&!2lZtC$S? z_m*4u+||m%@Ee>>{pYE+x$S@(kLD*9`BKM;D}3?zS+h3t(klDdTWzk86xD znZuvUGxZt_0OdSu*0{bW&z#wAI6}AER25D-Y~-d$?&q`tgzG?>*fs@aj(~D8XSH0? zLpD;nk7UGLV2ptogTgEQn1-t}^xdA4bVko~Y`i@6$NCU^SbQ`(T z7!3)izpI(X;KAZ0j33Xvrl%eO?`H%yYOJ=JDGrHo)Bv&WM=@~c+$wI22uwo^`vI8H z#NLjaa>k2|*<(EJ9tVc|Nb%E&Yo>oeE(L3OHIIwWKT#^UkfP`J8m9^1r4pCuOeklN zPi!ZdRr-f;Ky?y^10oq$q4-UZ1P z<<$vRk9OxFb$TQFWJ3OJWjd=#oNXkvmN!y2mgP+ z3{cu}>EgvDRy+%cUSmq1vQ-Bx%>?s@PJ@L2at1dX;sZI1G-el818pcDO+l zuDNVG4Y46Tf99%JX0DfCedyHav422S@m2m@H#h63oox?_N|^$<#-O4nZ)M zQG?S|GK=gTuZ85rS)VA@ur^YU)+h>u*2g{HuW@)VP-Lj^RgORz3L%@Ok`O=X0VM!BD9oYKcI2TA_SLMnfy`FxJ=_0La{as&Z(kuw7U_* zJS)F~sH;jKQpp4UYMUC*U8kZGns2EaqrN@%bLoHEez644rRvwZdmQwF!dJl)e$&5`hIz)-yO_k_l~#R?nK$_Lf#Pda<=`0t9M1b;@3L^oh|HCf-P6I-7&_nh z`hm$bG@X&PCcVo{GW*du!Y6Vb$mQ8UP*Paz-nGjE(>?x(cOXjoaH{wB(K0%NW_oCv z{m{vk?f7x^fa?u?rJbO~Z=;W64u5%qDQNI;Dd4`#J`tEW?nFp4qwdo$r0P!LiwV3* z7iw|UqI-9t@LHU)h6%w~|5nkW|6Q~JoD>Aq)c8892nBg7eFoM|T7R;Ep;a-}!StIP z`d8H6(tU*Q;YvQcv@CMt`^+t>l?F6sjOXGVNp}WNR-ibLB()|Cft#gZzRa5pEmbAMbrk1#JSpfq?fd7W)TgNaj=9fuGn? zL{|TYu9H#fgLQx*rm(V)ej{C`bKcjiV74SK;1FqEG=?$78iw2sqp+GKD5%78*}H1R zvVROzNhX7*%ZL|b8n3_-PVbAibYVQOG(L^%zK2M)fi=Y4Q>6m_b~=fzQ>571&_YI^ zXno1sy^0(U&~-o19hlOhYu5t}m6X;@yc%ZGLvt;^J|z>rDV(kS436WdC<_uuI!PvM z5w&b6z{B_m@$it+@g=NkQ2SjA?qbO#R>1x{LD2XkThuhw)zb(Ek9F_C3l)!aI=cn? z$wRP!MlHse!%c=5?|-1xWtup2mP{cW9WPAg%-Z7F@|mhfpBe8D&AojWcqI5kCPenT zjt4{y|7sS$S(1h=Ob`g%#v2f$M8>9~qT=AQkeb}AyC29=^P3b*knL_d5dsWLkFeBC zJR!SYV)^YbT%>5;8VXd}7td$@0Edlst$Dk1@N8^HkB+!M&T*8s(F8{mt@1In*`ONw zQ;?x+AY3y30Ls|axq0d9*D7SwnVSgJ07b!@)?hh2e{FZW2fSk$^|^CYNAP_Nzf0lP zVPIIyOWQ65&z?nbCTGf_m&@{C2a=ebHx|*cBNOtVRD|bp;FZ?S-6dTZRs#VZ&dLI- z<{g*YLk@35U8Zr{ux(o*dWc!R2bAnuTb%qH*Eu?2rMJJPp@{4 zjEvM~L0-iimQteCv;Kz`Fp0|L!RqGvJ?#=V{|i~wE^U4ELepXEu^}XP>F25oL34M| zHrQ?``t@AHM@_aJ$NRm1+me^V*m-w!=rt(7Qs|E@A*}3c+{2*9;R6S>^jIq8#?w$d zZ^Pnk89G>=MWz8~yvlJQT`%$QN>f(*+5u77fKPXZaWdBt`c4GQyPP5s#(?4+k7`eM z*Kzex{18Z41SXZSAhxoyiyu(k+J3US^Tr-Z5teKY_?`(LSx-yaAZ}XulO3I0$SQXW z+AtSbRsHE+wk!Kt8Q_f4dAhox-jH9-*;0$bK;7#-kC-rKobUJL^(bkXAD-gorV=UM zMI#<27u&A;fFmn!Ij;!X;TXq_`amSqO8@y=lc4DQ{_eROBTB$1H_X$sIdS$+!>g~mM-=yJt2J3Bh( zMEGYkkyf&zP>rm?CC{xy9wykG=Ts@*0Sb`eiqk-t^NCmaqq6g-}eT9KVrr}jJ8*ea>HuyE^}&PBVlYLppHl?2 zwgqt|{HQPR`thOf3Qo2M&c05h{84R<3EUni?%pAv>(TK>vkIz%1DFSRF%`LTaDJiLpV9w>!;zAl86eq8_M~{(& zuO$WYz2CgC@4(y80e;rBBuB9ZYJx{7Z5z4r%m%a$Qxc~+B52F_EVl-}>TB8WqjAwCwF$`5Qm3B7z`d+igLTd6 z$uNKS?$&L3L)1pzsH^Do5)$9mDcZ9bjSX_(X~^mk8w7$jUS1s(3Ptp3nd+jcx~*kB zU;ySHai1U4u81b&FeHsWEmryle*pkh>%i(3Hp$>RdcsPcg@D}o_o8X+f{dzfIJgxt7w ze<)RGAvjI1n@DNMT86onG=x-?Hz%${kRTR4jyQiT7Sp0-un1N*)DLcUP>Otpsm^a% zmKn;%TZ5jt*G624qoM6MD7;!4Z=m`#f8(hi!u&fJ+-P=KVQ9FK5^}^Y{6*4%0_T>8 zENwMb@V57}q*C-(XP^=}OY zI_8$?xrONtyRk%ATv9&;6=Sm_W4ukdEe+e(gMn^)E}xg%bkhi#alRac#=(GuaM?Ge zO<%F%e8!RgU|2B~T=IS*-mUVkKZPrs78gRfqIKUa(BI;|mX@j5MbTj#@yLtrTG$q}#_Dtc+MosgDQ^9A}K z4d|1ffFTTMH?x#{DN7|7=U?Pvioe*$6zBrOnPSo^`7O z>O?hX#}Y+fE6^yXJe+IpEr~BV!R%6s#m9{ZJxuMEK9_xVvP`-Hva5{8@ z9Ehq@8TnL%+^#6^&qT_qC*~N#w`ds{FcGue^cpL!P6E1~8^Rb;9JLmKTYuFj7$U2d zF=pfOHhSuD)cO%2AHZnsZgh1e58fL>RCeTg}m|A?g~g(%Q(MEP2X&$6yBei_)fhy~XyS-Nx`wCVyO zb^&afIoTx}r~aglkQI_gGl|7+vu6)~ig)a0T0W1K^p5&#Vo*XgEMa9Vvfwt5 z0(|F_GDm<}yG&!yCu47M@3T)yP7Z{HX%}F>^(|&a>nUVLY-)Qu{a*}+eYYwlbC%wu zQR;iTha$R?36{)W25il0dA&XADH2XRqK9slV*J4K4|e3hlUPrk-3nF@N4)S+8b%a9 z6QN`2Fa^FkS)3FzwQPLSyJ+SGSwR5;tjO#~y>)0UG7~j;&k23SSKznq!kp0xVgYen$@HU@maeWzHK>5> zi*3Rb+?Hqf%^x1Sa6V)2f_+Ji5?*=PWBLga6Yakhy!iLe7|k`q6vVnO#WK_@p{g_* zDdWOn57sx9dUe!R%TK(h8`fh9CxJ&UB{^Jcuf$1$+ z6(Va2Z6Q9N;ts-F>u%h=~h6>7ba#5L-GckS1A>zK-I zU%`=RK?~9KZ7szY(~b4qb>~&vkr`11Sr?Lmq;w8}V9nGYc7KZrG1whNf(u5h_B_S7Ii#Pfl`4bt>D(L;;Uspu&)&T3* z{g%fFxykYb;3seJP_H#GFoE@M>du*VZoUo0Ni#9S5g0(d=?SDfz#c98W7Z*iy_CjB zb*b|^eu@X(XEyTMXl#4(&#?C|ki$2!3kvlTCN!l}4EirNV*V7u-#s&9EI0~Kbzh8wpUW@df&MS(L`c(BQc()T=5YPut;iU z?#SCtR39yC`(Jg0BYK? zvxW}L{mCT&eWd=|whRgnQlqaCvxz~223an6(%Yj6aZU_Is~w_N=w|=GGFRa4no`&j zvT*xX1$0O;hih?AutCup$#i!E7?h>aMS2DqyhD^cHC~RDxc7gHVLt+a;?N%`@qc?P z>8bxZr~XsYicSL1Mw5$!_zDM*-R zCJF=!?LbKXB^T&N(n&ATcQZ4akP>(LG@m+Y(g=NQ9!)Kmm%A!LdFxEx#oD6&&zUpF zHlE3<78Vs&7*X=RLU9SGbSPxQ2fpaD-U%xd&16#t?luNs2Q8@+<2H>cOAM*Jd;k6h z=39T<2LcuLfb~nNqY3RzRlf3I=UbkDXv@=|lsskXRMd(s`OT=3jN^YB&pgjVx9b|w zD&XXGt-mIqjA$hc{lxVh#e;q`t7f zc8y(3DDdpo#Nl6t@u)d&>*32en^yFA$ciOPf{5GAFIbFMG%*G8$MA1k&Oq_kL}TxM z{R#?kP=aY5x)k(cnE!-lc5!wDe(_wq`DVA@j27!Y4y5sW!i{A z8%*7;vncNo*y_9AB)B7T&aK3|z|G5RM7(UGW{#4RdfmUe1)tYAe=8uCocd0!{=DZg zLs%wYow~9aFcrfiVDj<`Egc;dVv=q+sJMkN)m?Yl|^6Me{o>W^78f zRGyW#BVrj~xh`6wOz(R9e&Eiejd;o!kRM&JjkNDP{6cLZzzSvyoZRdcbkvQA5>G5+ zSk8s`Z|zDjo*U#ZwJ6}3Qz)}^Kd3vMx^S4Q!Kf${mciLPB4`>X`F6#^!sOoqb0_D9Y2uDg|qOnAmy4)Tfsm|H;8RyV1vP4%n{)d2q*%>ommrqrt&(7(SC zF1=Lj9o8L#Qp;X#hvZ{?8I>A>&kb@+SzB@`I(Nr59rUc0oTm&9Cv?{T0)8k)HRSXa%x)g^P8n$ zNY_Y+$}?}AsC!oR1jTC5HF6(Lyz0j}@2)Bx)egh6l$9;jK5+N-z`F|G@EHVK`Ow*} zpzV?s%xwnjkRJu(nsgxC@W%TG`Pm+-<0lo*cFtlDGuWOEkIyaGYjo2NCMgyu; zDK(R+i<;M?T-2g7(q1oYji@l)x{Gl5fjxWP%gz>P zY2#snFOq=UE$S#Qw!j5uh!i`Q?*)fnJp`IoG6oSnXPd0)@@NiFmQd$j8*oiFt&e4` z2Z9S`vW}{B-!cBh6>@db&&g6ZHAG1C?&p8LCY>`29sUCG&U$PFuqx*8-R z&*BH5C){++6aJ(ZE3Dc__aeoa?%`#{%y-iA|o>lQCxTkUIok{FDQnX8%Jpy2(RG^zKJMCRo7Ec84 z!dBeQ`ze-hL@Mp$4K179a$i8vQ0K zb$$FCzFV2AuZk)V77>eyWa)Ryk-U(5(_tIqHivOd4kRwYk71JTJ-h`8E z@eqyMa9i78yS+Ur&95DT)DHDfeKps#IjKPP32arex;;r)nljjT%^)^?oh&#i854|d z)E?*xvYqj}IRG9#Nl`XSrscbw&hz+e*I}flnU?pt#6(+`ALszcV(rTfzeE!9Qp~;e7rf{UUCU+Har&S3nFu)Dsk3qjl@n2I7df6GX;3ngXNM;X9cBMq`F&Q>|&6=wrYh_0_Gby(}ixBMLCX#)IHAhWx zWP_sWGCb+nu}CVL31?*Y6Z~zXC0odkrybocY04B;}S`=sTBUELHJxm~Ze8VeDn zc+)yuC8d_VyNME)YBOdzf}C}55A6zqc#jiizkK-6JT34lo_C-o_|tie$2wL-zj?Q@ z>z2JbDNB%Jh}y=6Z=K1{>$hlqKI9MYOXXY<(Ub+<7QmT6b2>txamJ&uMHGq^G{!@! z$$_%`6)<^F=I*j@T|58H`GlomJF>%syBze@*<5_My`f!y()r<#du#r9tN?Jz_+h}! zQ#^dcTVZ?j+vm&JIpG#!*RhrROVXmKi7AVMy>p#yZzVEX7P;$-W5Va48k@cwvH=qi zhrl1io0bzPj!%_b#kz&)l1U4yr%4s2jlz0|J@X_K7%Jkld-keQqDgE6H|`nhD&mS6 z*wy^}c1*?R*N;In*hd{u3z$%csk;o!8gHYBE1^^Je8kwi64#2;QyEEN8x2oXHD&?_ ze$qA(P?8msZ0sM|I(DqBeAQG=t@MqL)A*~Vefs?AQNN0YW7G~#lPo{np?n)!j%@nF z8erwyYXJ>oJA9WK7tjV0@SVV7f|5}FQjLCUH8V2p{4ikQ*4_BBO)%@zrxkjpuY;lV z0kqw z$;3@gw4~L(OP1K+=X@%izDE`|;0{vve<6weA1Jq>(Rc%GZ5!{-Z-+(ppaieO%} z;%W*G)|M-`N5Ov-%+t80Gduy(r^{qW{DO~ks-ep>;hP(squ8cFo88$FW(J|+Q2H$N zE`qf}(d+?WTK-yR*j|JKe}7da{by*#TFTlNL4C1CVi05 zv;c)Oz&WQIJ-RR*P8zSe5Rp#i3Zv7v_N4O=S$}I!=wH+K5{3{Xsz5;+W_+WnO zwQ44mk|9He%x^n?(IVM78*Za2llgsa#s;nT#BmfTdTt;rntPo}&&1d`$mSNmfO%`1 z^a-*D2~T~^gX|`)sh5RpQ+9+AF(+Zf$8%Jm()ueMWqiV1x2ItjM(^$RnJnn~);iL^ zClg_Q>{B)OX&ndm+ZYuUk3AR{GB{6yUT4W4jT);Cn>KB4(GcdDgwjJ*YXnWDXb15< zls$FYG*(fJxK~NR^Lyo;{i5fW)>l+49{m>`Vi0aw6Plux^c@sXzwcc`oXx(Rbo{uY zvL^1EVcahQwON*{- z@Ikk)lEn)U#(Cpk9KOVZ2b*vhU)4vfJ+54{`q$4^lq<#;I(O@~bLIWGxN5?$AO#F3 z{hAUiHZWOyaSU9;L|pcNkUS=3P)F~3mx)ZpvQIk(DTJJ*!JB{{aK$uVQ`a}AjT1}YOF#J7l86uoZ!8VfN+#7Ftr8%3ujsIc{$+{u^ z1=I1T;N6WpeH4W#?Lflv(`V1NB8zqCPxEwxm#9BpV|*u1SK2*0vdgk>wk~_3qi>~- zuKTy}r6`8sN__aj-p1vJw9@;EoVolI8#LuOw;ThB0M3y8k0(4n{fvm90SuJ7ZqXgy z8c1R8WSU>$JlS3p?;D01b$;|vlHiIiQ)Nin&f6Onrbg%7OKAafHz4mRUE9LKOHCd2 zQ<3{70}%B38Q3~w=b(EV?(aVki-x2BGUOF6lelStcE+({uSkVC=DJjvMO{$LNSSSM zd>XzUXssL?Tp8feT1p-*goxglYnHTspj=tR3ZAc_(<2->(EgAAu`2xal!qnen#(OU z@H6Gd3U4M}$o;_*M-Ki1^@{y}?#$|&* z(>lEUh?zkP^g-$XsO_{~vAUB&qvsR};<^v95CX%JxHS{537akgM`SPnrJ(D54x;8r zco$&lY6ddG;AM>nqH%V`XDQ7Yz9tCvowFq3A$_j0LE46TO9QXvmUIfmA#DJog{12e zL*dw1c4wKr5))eE#O{<}ez=z?&%o3gU8n!v7aMzVA@oE&h+5U7nTJs|n1x{+-0{-o z%YH}E$r!;|P-2yH8%A@z6;ygM&t-6JAJ|@7+c;MtB^@B>Z2Q-AfCKF9b-MbkTD7gy zB37=EAk?)OQU)?~?wKbPLTc|m2N+mo0?rhgT?uHY?|MZMZ|mf=@#ior#e^u6Ru8{X zxYju)E*@QWlB5`aY~ww+&E&h(A`AFBOntXSQwo>ZQf`)M+&_^lL&(-oFHP5qvBgwu>3aUhQ-cxUsX! z8bX$K>(WKsN7AA>4Tp;m^04(rqzn(dGqbWzM@;fHszjxb1C*17lw&de^vZjQLBGdt z8Vu8Jl5_M_WTguIt0Mc?tDAqk*X6ivCZ1YP1|*PI+IM`)_?AyzFeGsvT1~0TvuMX8 zI~|sq-Y_x2BsXf#0}N>aXGmSyHp&*PcO=vi7!Lory~J(}f|Vj&pPJ|K+yo{?d>lHIYUfqz+)0!J!UInsx#lLz}_MdD^K%zFU8|n>F$5+*2{7?i+ zINjel$70w!UWx1GT)3>@L5ow$h{{+Z1_Gg~d;NnFbNdx`F5EuqUb4FU`Q_G#@!c#Z z&z#wU8|D=X<50ZrmCGA$uZozv>_#X*DapJMyrO4&nc^{8jtu($>U#6Ip7XE&zY;2; zQYvdo+hDYfoe~ix+aQD_vW^fFg^H+ToutN=(qb-9X(a~|8-T{t7`9NubCN3QMcQ5>Dr-~4|Tm3^E*$wSR*T$~-M z`4aW=NdB3TwjtWYIVw#v*Vd~YpELJ{mg1Re6HmNl7;v=)G8%|i>MFi`LueU=HN4ia z(y=QwTc0gkhDp!D(z4WD0lo?+xE{ZX@>MP%!HvhPM)@t0>5{*g7%0n~9C`MJbm@W9 z0aR&{9ew3Pa_RN{Ez7fC;52?2;(=A+md`%Ay7sNGNKJ)wAi|z_)nm>On0`kL*+|VPF(Tlsj`<)}eRZ<&`Mo@$F4*V3~oqi=bX}!D&j4TyhZ}>NAaq{&n zJ(TqipduA_aYPq_sc69c3YP@x3vF*$biY8R#_&jA>VxASNBnA2qUl_aBI}~j zty5h_|L}<27jhZCJV-1o5b1e|a6JUdUJ-BP7;0kNQTSgJmeFa`sRp_OdEW4&-3}nJ zbpGw^;?gpxfchr9F>Edm1-pXbx9-A`@H*;jISm)dD31YavG>H4gHe?s6}L%?RjICe zqvMgYkKv!*iys(D6y&>uC?}bURs9|O zEHvL^Xi>-gq^#*-wNGIILK8{ziFkvkNQ%Jw=Lb5>sz0goZ8+9MGJ-W#;6XC52b}Px z>;vcg5sxxyG*0AGpB>uR>yd#pHt+Dz86GC7{FLb3Eni!jdrplp4A4mbbI+A}Fts1! zAwdL%N2n3<^U{PbX_lHbsb$9G2is5^Rr&;`q(q8v^-yi=x7s&mx6#KBsaO9W#RVEi}It zp9dpzMIr2DF~T-+tCR)mf1NX6Un@a5pJZobC@~+Yx1pa31uZ{fi;^nTW&yLJ_+=QY zt%vp=W2ClSQV5X7SWME@xuMis6biCCGyMmc^4T8ogB3hZ;1*2JTLu%$VW`ra3>xNetz7cyGw{VKo()gd^yBrDiptj{l+c_QYj z#Xk{x*DKcRBbUiwtC=tMQeQ*UR?y9eJO=FZeJ&5Mcauw)pJCbtHlQJpDTsN3b3}NI zZ;%rbm8hH&JE^v+paFl3)jW^kgmjLbc@MfGnUbnComX8%Vb3f}$Pqw;yxkV;-8bqL^Aj-obVzzT@w`mpYJN9_d+kgK}W(aXPIT-$@tMiV5 zJ_}O*R|~MN6Y$mn*Jboo>OF=<1tg(y&_l&DHGoq%mUg^hP3h^@?%Hp3j}7jCMwwng zpNX&n@2}ic%0DpzWFv>@XZd5yh5lN>lVf$0ss{tW=MJ!BRd)csuUAtYjg;MeR|^d;fY)063siulv&CFn`sgQ@ z#~LH&P**>%=et1J<#EzY&#N|E^7Bvx#>B@v#1sp2_&Hl495MYX<)6!1f|czqJl9I2 zN$J5Y=T^o0%N0~-z247n6gYFvQzAnt+jNxkkBwZjCUtb53w#}tSo`-ZPH| z@6%&qJRXRIAa3;1$;UsmB<4?1e#ys=LfKUC1hNs=Lpaif$&VR#cF^GbJJfb_b9hVh z*IUXbKv2ZBVP>YSaR2YWwNY-0q~?Q2tfd;&3`CI`-qZHT7Y@{X6*aZk(#^!CN$?HX z%=KK_mn1ao%bAVM-v{5^^y8lPN1Bc3tIwIV*^WQYY0y`<(fAmO)SAW$W=~F2B7667#x{Rn%2M@+uFWsH5u9_-pt8CV~V7-JFj& zLw4uQ>A*rbqzelNQfn=2Y%112V9kLn2ZF>ahKipi8+^ZJ&>B+|!vdh`*<>;d;Qy#N2 zFQ&j1&6KRH#A6hc4^HRKD3@g`E+trknHF;6cO4MEZvbQ@FW}_g4Ao9Dr{CS&)xtes z>siUY;zf^mWNd1h$$9xQx?WR9aS>r%4B<~#{|w9va9!UI;k$%LwYp?JH6iWqzax(4 zB6*R8iTJudeGo1VV~cZb+cs@t>EN5@Joq-Vx-9tYH0`3`?b{`-xC+p$BjbMW8ic5_ z6tQI!2&staL6d6E!M^-fNErn>cDBP{6avZKi~3${qqu9+k>9j5G}1C};k017pWJRR zNFyS-z=O*{K8|1K)DGCBGe(wPRugvO*9*O&E;4HE!?%2Em1uip0d)I^;1b9s+jnuN zIuLI%J9h77iy;u#-So|Dw#ALvnZbFSnQng4uXV{G?06J~6PkF%xA9_2oejCm%fonT z?8`Ps`2c^V0%unBQdLv)Xt7K;Q%f|ks|J$cMwk#F&e_v$SN3Jk=6!S|*8_svD3-2R zHaaR#wYU9AeI0ir#KV%sNVNv<9}Dh7`!0&d=oEIQvR-)afenZUhyYvUDO*nX(UwU| z*pJ9@BDd&nravJnB14GXXgSF)*%(oJIxCIugopj2d-^y9QABauL&L*mhfx73HV2!Z zER;)<0Pvm13d2~Q~XB~c7pE?j%7Hh{Co6B5oBA@I=yj8GrM-Ia7g zdmazPW;w#n|JsZ)q>id{zVHIEkvyC(L+$lOe}6XC8(2E=p?3WaZ7ICjoozd`x}G2D zBqVclNPF&1z}{N_e{OEtpyH#bXMu;ou4Q;ZzW3vjyV8;Q zpYKnCh_T9_*eS<*^havsdyd3V9&nzW zv-V)sqmtR_c4Nc--@h?h3ew1be`O%!4+XJgnG8ilx&A#JnTyx~(l#LQ)XJ--jtb}X z;Y@Xx8f)toK$8zF_!Iw=CEJXbo`UOEMA;pbi1s*!R5tf(R8Z)t3#Nw2nzp(zuE{rW zVtj9TzwzsgM_-FRe-6!wGBueTcUR|^NO$#=15f+Jnhns%9y3+rcxd~Sm{{!;0|TYb z8#+zWP&O|b-P-(tfkTSoc$66qn#RvQcKrC~jX__VTNQM?8o4aE+psFX5epX;52Oa@R@BoRWUqwrwHF*ZjeQ z-3=R`TzkbPX9V=Ztr9+q1)WM4+)LdD4H~4;TVQ5mZSy#xs&iKIj=7>mZvUK0E83wR z-Bwu~woU|@tmx9$?XrgSX$(RQD@`UI35RxiP|B<10bZjlt9`eV43iNNOnuc*Ktmar zwPfZzo>IcY{&TKw1yp;GA}1bfL3mm{q9*L=1b44Kv$HK#xmOoPh2k7zy`4DY_<{-AY#ek-9*DJG6(V_29>h>IbG~(e5+h?f%~z_Qs#tOcd1n_wT=d1&t)jZvL;)NV{krtY({*4PT|ephH8DgJk}S zE>|M``_wy6X_75=NU|4#r%t08mu2N`{3*a5g5WI!!nu2GXRT5P?z87 zFxVm;%#`qzO(U8T%iKB}gd1whL=Q7^+>}f3iab&| zG~8Q%kWh9Zk}wgST>&ZWFq=k0rYI!Ujq2z(#pak>3o)s^1A%!f&}F=nUK;g?4L}(` z+)}pF^2WK}g*m0KrZ@;*zU%?|qWpwgh7w+RRR}+}KdSx1Z?OQR-~S(yf^Z2Ko{48m zrdS2bN&k{7*V-6Jatvcg)L-EL4y0{1f3KUksX)p}Qc_5CZoc)S_+COLscMbR`gAmA zq}2%s`E7_b%}phRN2<0*ZDShWhpZhx z@^88YeGJ2^2W=0Z8ClT$s^!N#iSIrtZV5DNPRFuKp>6c*fmbt%i~o$tBBq6tBwyCi z`7m>$-dJ08VBV&YyuRc9(FaN#u%dW$mpQoS`N-N?&OJx?m)vM9usxg_Z)g&6%<|I= zmNozbD92M1L~?t|fPWHo@*Oi?BH9GEO9-B%y^la-p7H$o!8AwZF=Y#6smi>~Klaaj zPg-gk!#m{5g~<}Z)}e#RI>|?){mt`Bft{9Qg%K2)5`Zaf<)+5QV|1Pu%g)_u7KkMM z5NJ$>o)14?tmMdt(tbx-Dbv-Gi4yY%OXav^Usj0!B0RVDk%aa%GCJPM*b_+eU+dSO z_zlLYtPWJIu>`f9>Wc?lHraBGM%NGF)dlUWF2~TQ2_STDXW?nj?(;C=&`4=HvPVG> z@cNa2$xqn?vEnwFBH9VPiNWGHwIfoXMSZ;OmZE}~dTX;E|KI=F(kkWxYN-1wFwJK* zieq;xU@XbCc{>u~;xdaaM)>?^ahP@l0a6XyGR94f7D z!#704Oq)_YWqePbna}{#PVc-(GHoLJ&vPjpjC4pkSO050H#I1}@~>;(zmdUL=BSH% z1pMPlY0RTL7t@@|O=PA!ct<~Zvp83e?{MnkgyHOS6_%>)xNc{>jZ7!}sZ2Ygb$gV( zJs!JEtJFO~!I$7eLnOLoD<mmLwfZb z*C->D&vM$3EaYw~H#r_E+!tpzg6yzr^v=g`7-?3&wVW8uz2M)a5(e`^< zoA=~u0H4Y3V3-zZ@lybaby-e9sKh$C^PGRwWOhUY65IU|Td0T|ap)#UljMCqjAWCl z*dhB9?k1<^?;YqXN+5q1D#;plF*j}84Fx^+Cq!>@3vs_ncH76D^%;=`hcI;Fd{Etr zL!M_dwx{{;Zuz9}UPJw!v-SuB=GgQ1ChY;M-W$1a#fqKm!kpsXI|M6N*&K4aB{O$i zcH^7Gqmrwo)@QAck2fXzhy=FEKAMY@W~JV3+-oql`5WJ`+Q!v%28dC1GQXu&W!}6mG}w(#unM%gsu|q0MStU5**30 zaIdw=4?p8-ky(hK-+BcKdQ?RpzCx2ivW#=@4%5N`evmiRHN{V#rg1G)`u<-Y++gaqZXrGU_wDwUetc96znEwFt@Ti*o`1}aL z9uvqfPewsZISDlLjn>hqW1kaWkaBnSAEjZUe51k>f*WYVO+RVBE@mO1{JBa0m%cJ{ zd;P3kqie)N`**yOMk0d!DAFMi>pk0Tz)oAcyph-~e5AjvW=gd5t#KDA{&(lek5tVL z;~o)jTX-@IJa)tC<-!gl1{&gBy2NoE2H^Hgq{E_UmdGnKz2steo~o|&2pkrAF^+!? zCw!>M@L2@71?|zFX#;I+v?0=7&u=E6zVO=bD_5&kdN|pvA&QrJ*2gW4^ZmhzrUTA* zZ*=rHFJfh#&|_N9Hth6EMKlVqJ%}8VO$=rnxx(~11GXnkAXdKBG)0U zf-GAZF@yh&c+g2?2%&tTXFns0AkfCC#3*)%<>j)x)^{9@)~L1t{9whUox8DhPGvNt z0Q+e|fJOiCdQB7^P)Qn`5XuNNK-;;z4+n9yIJr+Jvh@9iTE4hcY1C~B_pROUr`@w> z!8lQW4Pxnt*k`^#(vjB^EDnJe7lA-Wq1--M2jaGX7#5pPTo!2zE;3t*b5+dJS8sgHCf+HGbCr|eRS*AZaGq;LtbVv83i1V>M<{L|h`H>_2di3hATo0G(g`@i&?*FSY^@6;!*_T$ z9k0FeVj~JESld-AkGGsYE&I>M?Cj#uRw85Ad*M2Aa=(qEf>TY?etj$<9f6xgQ%+l9 z)%6qozig%@3IUSj-MwDluSxP1p*_zc@Rzak^OL%Otl0OlrF#a&0!B8JZtrP!L7g=# zd@<%0%a<)11(g2OX9PV@f)IBX0&uvi_3;1D-vpXKz~?eXra7|+>jmUF4bT2f|?h1ns)v41iA@O zx>W?7nCy_jQY7W?y0hbhTB@d$DF#Tg#+JP&IR+TEOuuxs+RMRYd&uUr6?d2gWcQL^ zIVvYch`MkLwX$MX@ikzOCG1VmOIdP9&xG=_OrI-Vv>b{DofWhT&Gl!ke+^^K%U5%R zAJUFGRIdZdBTF4mJz?PA!nyd5{L{_~phMTzcDncoP7D=?$7e6%!Wop?79<1qKvbF? zx(*p7<;8A}$#0?k1=5u!@&;d$$bQ~`;DQ2y3jk~^oyUjkyMLCet2;hj8!K4Iu|S6? za4wruv$r#O1W%xY7e9y5ZhAEU(^>;xUtd|N)8QK@1nF=2HyNmja8Vv16CEVH<^|wL znfu6(Yre%RN>Fn2!a$&3WU5j3&I+d@C~p5upgl`g0QQKWaHesIonuJX<{o$ceTT+) zyEA-j_7bFp7op@Xq#F=kdq-b;7G^R{UIj$Ew@VsdPbN4YP8&+$?{P{mg09^_x>A~hBr<~MHo{K&Yv~-cG=e!|a zE<=Y5VXexVHMN}mFlP|#Nwa3%yRmQMr`p=m^)g$qqa=6vUA|IDQc(K?n%W!BaFM4F zHh5vpl0FaJ?W7LZiF2gkD(n74TX=_a$c3D)CIGW6ekSq6a*gC>(aLmbYHC6sc3Yka z2j*lUHr^C$VqqV}$cjSs(`Tt0X#tAwHV9Bkbd1}vz8nE}wIOI{!(|rf+ zw-BCFR&Y|oD9`PAml4R|>%N##{|Ei6D9my4@OZ}gH2&XbZD1IC(oM zH(TxVLu{zlY7{7mj#UV+%dm+wzH-oS9mu=<)KRmPH2sIK!RZ(9C)DCsF`m4v1a~Gi z#8N?L|1HnyWR$|s=6)5J5K43LM1mObqi6oR_ri^RX&*qlaU5oIGwHOcM5(-0^#m`n z2nn?{L3h&0;!`LBJ3~37{FX;Ao{&~WSn)}MUiA-xvT$mt#TK&6caffx4*oO1+|W=7 z^ytFpqKyt#<+P;GB-=8!Cg;2VV6+f@qG1724U*k2m_l>P7|=(Xe-eyiAp2PRQJGs2 z$>Cw>g8mm;!8E%X4~f$5QC28ey%q}pz8~IKmFtMCGcCy?V$P!=CsAX{g$9o z#CAfQ7(-dmI^zFt$Y=C#Q7~&S)U2hA%~r?+U7~|@Xvpsr6qp`ZLHK`RVWIoEbeeEk zMqBxqsw*wt^;o)nz|QvC<#!s!n>Rumwd0~O71M85tW?<(xc;|s1&FwGf<0icOn4^vGku2b{8qdy&XtGHuMFn7*^9a8Elg=BtqsF?%@-mIPAf zPYyWmc9{lQ#E>t(F_iDojlkB@<3z;cp=+wP-ex44gJU;v**}GaX#-3T=hk^lLP!({ zOBM7Kp>1zd(}$L_@r!2LW>ZFsPJmeFT6=6DG7b1kLOaq{$|BUjHkqWAy*U!Xj79q~g3GG4D^d122X072AjrX}z#O>(`NJq9H9duulC`3m z`1uylL@N(rD%--hFniuiS64}@`)Ysrp;jRsrygyF`t=X5ZtcckcH|txY4GF0vKWD^ zC9Ju(JNKgwksN(e>Gmmh3Va>!n|rvRgxR#=8-G#~ckfTdhs%w2XURa)+UQ`89wTy9 zB=JzEOZqE~ugpUpVbr&8_`2zfjXxzq8_8TVk+&j^m%%rIfVkaU%Eg~^$1x;LA;s!rWT)>Jy2Hh zybuj4XnJITU9}T+%v6?wt)OS|p9Fm%fV(fuN*MjLg#v;tHILvJ?y z+eb+OI)Wg{2VdU4d9%t4|KD)#Zse7tCNH3Gf+^4*^9*#V2Rla%%+Q)y4jVpP$Lu%8 z>enM5!2$+7k;UobT=mUa4725Y2B z73L#&91uzdQDTA-yW)!5urtI#xmYCPG@-{1ll8`k4afL>!E(!=PM!A~hmi^-dX+Od zjH(v2nV;5IrnWwm8#P{47WIbF0;L8Y3n?6JW*l&1&GVlWvP^Ko3r~o^S@9Vv>_KCu zVMFGkl{YITn1Y1-cHA=8oBo4IbFJPz`i1G+L;Lp!GvDo-*EgcW;5sg^tsFnnoHFc+ zQN3`X#P6Ji>uI*bPEw)erQwELU(egeE!&muRA8gv6FoUZ-m6!8B#`H)g?K`IG1j_hVA3JE?pBFg z7m!r{ABf-b#G{)$Y)&Ie zFwAi=ZcRO##TuJQk<4P_RaEJ!)vN6}Jh!lITnpMTd5}lhXb9Glj~^#0L?K%R9XorF z=9(BiL0B9ob&IRX7e(R%qrH(K9h=z^#j7EKhCa(dYb8omX>)@0L`x=VPMtwV zzK@q%{qjpTSc%m6MbC{rwC2A+dcsD9aSL7(mI%Ssr4h%Ds4It}n2$ju*6n9SU!{Dq z5O-t#ZMkQLpEbN`iq=w{NvP_GqRrHRXMeRAZO3lzaH}TnmZ33=lyC1Tl7!W zQAh`Zcx_s}s6{=+oVX|MPnbW(wQ}Z-(;FNw&R#x+YQSr;WH+K7*)_42J1T=8 zc=-$6Uk+Am6%BBTo$@Pm`bf4nh|$vb+u}G4&+7&YR1zp`jr; z@iX-jYwW${b13u@BCfWDh;-tTU9Z4n<;Yb&-;jVh7)j-MlIu!Emi>fB>Oc%9E!->| z)c?d|3O@T2M&`p`%=zzep3OHj;q9RZm|lIMhB;?C1#Kxt(@ggVbW$iUCMm=-agF6d zU7xUT!pC=sT+iu0ghTB?=&t*F;0O4jMLNQJ@oDQr4Y%sl5P`mkY39#4)*Sh5;FrD3 z%t92)sb5h~_xxtT^sy*-3iry$hvFg?QDvr%+$1Bnw9!0$2AKw5#&;KPbjcL{vgexF z0HR3uMvWSDhs3Qc7VX_N{{_Vmg)%jgp?M1#Zl@B!ENJ75y?5>2?KQE6z(8<1ajGe= zWGubSmm1Z}+0jwL0_n=uk0G6&EV$c#6`)|Cxhb(0K!!k;Rt-DC!tAlPU%70ku;1?u z|C%GAH3(;*Hr%i2lOJ&Tu6VQ*lNK-a{Pc*vCNCDD3 z_66q;-H9qKSE=s5gpx@1-MhP#b%g-kpFmvzC*r0)17ZMZ)wy?tLACd1zIVac;*6HF z?~Pp?p+D`B+6d&V_Ue?3T+wn^e1`8Dct9i@KFtR3Iyq<93|>rejmJ8NEo#D>N4 zs<0s1Z!vA;G(d)CG9x8g>_5gMt$vu;(I}1Dtpmo0#A{Q*PS|E|oG?z5@LYK%G$O@d?fMuv{Rlv<8$vDAwRRTkEUFXH44 z`U|m{)1VFYN%ljv*-=e{a^C+E%Yt}kbO0CKm-QJuNPFp29G( zWXYS&gMDWv625bN~s!BmnGSXam$66L) z@!c3Zo=HlIsTsR<4!xF-KT;bBZ1F~I)m7d_7`;KmPjMMn6SC2MDrgT$(s}BuKqq)^ zWB$z8d~plQy2j9HTMuI_xxDGekOB#$V1`#AxBA;k>>-kU|7<;T-!Qg3FKwWjmC@m< zNARtY!+z(i5|2NVO5^)AGb7`vc^a|#+^6D^aqks9$$TisY@eF1;x2-(fS3&*nV=h# zI?!;F?&QTQ)JBkMHQgLbjH-H}PNZL+lw=o@Jetu=QmXiJ?%U-f1>pB%kc{7h|JF%S z9d@yLl2cQ3q0H?n#u2ytgfWWg)0&1E+W zhso-MVRze#BjdizxEFz#@bboQ7lZC3B_~fB>ARjFB9yS_D|d(VF^)6M>{X$!y!`H* z{;UgN{L{a47~lpnLv4VxaXkKo)9gQ-SAGotjn{jjaSmL%sO%T%&-p3vYKTjAkM}Hv zpT9U?coO=abxWh6`RW@P^T$fb_-|0Kg^W3mu!|+$#&1;~gSw)6U@ieU3%b%SO{Wphj+5dx>|6jVkrDMPpN@nC<9G^O~esXnn z_4YX)kjDY+hATSY9KSD@dP4gCT;rT83_|S5F({k{;mTw8O_l|#7(+gd)q=x}QkfOUGnlvK0^uCkyi-nV*9ue+P; zDkYq7$9t=eTNMjt!QENWe(n2W9keR$SBK!YM}+xq_yELnh&^_^k`=$4iqfxtXdR2V zib--WRu8Q`d7CZ$rAuV;bEn8sh zlczlwUX#59ol!5^yg(c_-Tn5`KbK&YqwY^p$nY3lSw#sso|u@L1*E?P zR5h}Fc2dZe&ITr5djkywoy7|#hD+X9bragxoq&#!U*ErbXMu%DIBl=FH!rE5m1~9# zodtfCB_(O=Mo~{myYA#y4N)r|q>c5|MDNB=3q_}#e~Db6V>|aoM3ItlXYFv`niwV9 zeEJxM2j#a#T^zrBLS&`3=1Bal1ii%|yCRTy z{o-HekBm6|gR?r*r6-xsy!ER7vRuzp+rW)M)S&$R`};S~DXB96PMCC_l`|p{z5bn0 zIEgLJABbv}a!zwn;s;P$4z65Lb)O8h@|>M?n~J(-2W$ z;qtgU*xWp{gb~tDWJJiE&BDa#3|K#zh~4xU##ic-??g}MQ_9~mHb#7U%cx6)X4