diff --git a/northd/lflow-mgr.c b/northd/lflow-mgr.c index 713cab850a..f18e14b3a2 100644 --- a/northd/lflow-mgr.c +++ b/northd/lflow-mgr.c @@ -17,6 +17,7 @@ #include /* OVS includes */ +#include "coverage.h" #include "include/openvswitch/thread.h" #include "lib/bitmap.h" #include "openvswitch/vlog.h" @@ -100,12 +101,6 @@ static bool sync_lflow_to_sb(struct ovn_lflow *, extern int parallelization_state; extern thread_local size_t thread_lflow_counter; -struct dp_refcnt; -static struct dp_refcnt *dp_refcnt_find(struct hmap *dp_refcnts_map, - size_t dp_index); -static void dp_refcnt_use(struct hmap *dp_refcnts_map, size_t dp_index); -static bool dp_refcnt_release(struct hmap *dp_refcnts_map, size_t dp_index); -static void ovn_lflow_clear_dp_refcnts_map(struct ovn_lflow *); static struct lflow_ref_node *lflow_ref_node_find(struct hmap *lflow_ref_nodes, struct ovn_lflow *lflow, uint32_t lflow_hash); @@ -177,9 +172,6 @@ struct ovn_lflow { struct uuid sb_uuid; /* SB DB row uuid, specified by northd. */ struct ovs_list referenced_by; /* List of struct lflow_ref_node. */ - struct hmap dp_refcnts_map; /* Maintains the number of times this ovn_lflow - * is referenced by a given datapath. - * Contains 'struct dp_refcnt' in the map. */ }; /* Logical flow table. */ @@ -566,6 +558,9 @@ lflow_ref_destroy(struct lflow_ref *lflow_ref) free(lflow_ref); } +COVERAGE_DEFINE(ref_unlink_clear) +COVERAGE_DEFINE(ref_unlink_recon) + /* Unlinks the lflows referenced by the 'lflow_ref'. * For each lflow_ref_node (lrn) in the lflow_ref, it basically clears * the datapath id (lrn->dp_index) or all the datapath id bits in the @@ -578,18 +573,20 @@ lflow_ref_unlink_lflows(struct lflow_ref *lflow_ref) struct lflow_ref_node *lrn; HMAP_FOR_EACH (lrn, ref_node, &lflow_ref->lflow_ref_nodes) { - if (lrn->dpgrp_lflow) { - size_t index; - BITMAP_FOR_EACH_1 (index, lrn->dpgrp_bitmap_len, - lrn->dpgrp_bitmap) { - if (dp_refcnt_release(&lrn->lflow->dp_refcnts_map, index)) { - bitmap_set0(lrn->lflow->dpg_bitmap, index); - } + struct lflow_ref_node *lflow_lrn; + + COVERAGE_INC(ref_unlink_clear); + memset(lrn->lflow->dpg_bitmap, 0, lrn->dpgrp_bitmap_len); + LIST_FOR_EACH (lflow_lrn, ref_list_node, &lrn->lflow->referenced_by) { + if (lflow_lrn == lrn) { + continue; } - } else { - if (dp_refcnt_release(&lrn->lflow->dp_refcnts_map, - lrn->dp_index)) { - bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index); + COVERAGE_INC(ref_unlink_recon); + if (lflow_lrn->dpgrp_lflow) { + bitmap_or(lrn->lflow->dpg_bitmap, lflow_lrn->dpgrp_bitmap, + lflow_lrn->dpgrp_bitmap_len); + } else { + bitmap_set1(lrn->lflow->dpg_bitmap, lflow_lrn->dp_index); } } @@ -691,18 +688,8 @@ lflow_table_add_lflow(struct lflow_table *lflow_table, if (lrn->dpgrp_lflow) { lrn->dpgrp_bitmap = bitmap_clone(dp_bitmap, dp_bitmap_len); lrn->dpgrp_bitmap_len = dp_bitmap_len; - - size_t index; - BITMAP_FOR_EACH_1 (index, dp_bitmap_len, dp_bitmap) { - if (bitmap_is_set(lflow->dpg_bitmap, index)) { - dp_refcnt_use(&lflow->dp_refcnts_map, index); - } - } } else { lrn->dp_index = od->index; - if (bitmap_is_set(lflow->dpg_bitmap, od->index)) { - dp_refcnt_use(&lflow->dp_refcnts_map, lrn->dp_index); - } } ovs_list_insert(&lflow->referenced_by, &lrn->ref_list_node); hmap_insert(&lflow_ref->lflow_ref_nodes, &lrn->ref_node, hash); @@ -863,7 +850,6 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od, lflow->dpg = NULL; lflow->where = where; lflow->sb_uuid = UUID_ZERO; - hmap_init(&lflow->dp_refcnts_map); ovs_list_init(&lflow->referenced_by); } @@ -939,7 +925,6 @@ ovn_lflow_destroy(struct lflow_table *lflow_table, struct ovn_lflow *lflow) free(lflow->io_port); free(lflow->stage_hint); free(lflow->ctrl_meter); - ovn_lflow_clear_dp_refcnts_map(lflow); struct lflow_ref_node *lrn; LIST_FOR_EACH_SAFE (lrn, ref_list_node, &lflow->referenced_by) { lflow_ref_node_destroy(lrn); @@ -1314,82 +1299,6 @@ lflow_ref_sync_lflows__(struct lflow_ref *lflow_ref, return true; } -/* Used for the datapath reference counting for a given 'struct ovn_lflow'. - * See the hmap 'dp_refcnts_map in 'struct ovn_lflow'. - * For a given lflow L(M, A) with match - M and actions - A, it can be - * referenced by multiple lflow_refs for the same datapath - * Eg. Two lflow_ref's - op->lflow_ref and op->stateful_lflow_ref of a - * datapath can have a reference to the same lflow L (M, A). In this it - * is important to maintain this reference count so that the sync to the - * SB DB logical_flow is correct. */ -struct dp_refcnt { - struct hmap_node key_node; - - size_t dp_index; /* datapath index. Also used as hmap key. */ - size_t refcnt; /* reference counter. */ -}; - -static struct dp_refcnt * -dp_refcnt_find(struct hmap *dp_refcnts_map, size_t dp_index) -{ - struct dp_refcnt *dp_refcnt; - HMAP_FOR_EACH_WITH_HASH (dp_refcnt, key_node, dp_index, dp_refcnts_map) { - if (dp_refcnt->dp_index == dp_index) { - return dp_refcnt; - } - } - - return NULL; -} - -static void -dp_refcnt_use(struct hmap *dp_refcnts_map, size_t dp_index) -{ - struct dp_refcnt *dp_refcnt = dp_refcnt_find(dp_refcnts_map, dp_index); - - if (!dp_refcnt) { - dp_refcnt = xmalloc(sizeof *dp_refcnt); - dp_refcnt->dp_index = dp_index; - /* Allocation is happening on the second (!) use. */ - dp_refcnt->refcnt = 1; - - hmap_insert(dp_refcnts_map, &dp_refcnt->key_node, dp_index); - } - - dp_refcnt->refcnt++; -} - -/* Decrements the datapath's refcnt from the 'dp_refcnts_map' if it exists - * and returns true if the refcnt is 0 or if the dp refcnt doesn't exist. */ -static bool -dp_refcnt_release(struct hmap *dp_refcnts_map, size_t dp_index) -{ - struct dp_refcnt *dp_refcnt = dp_refcnt_find(dp_refcnts_map, dp_index); - if (!dp_refcnt) { - return true; - } - - if (!--dp_refcnt->refcnt) { - hmap_remove(dp_refcnts_map, &dp_refcnt->key_node); - free(dp_refcnt); - return true; - } - - return false; -} - -static void -ovn_lflow_clear_dp_refcnts_map(struct ovn_lflow *lflow) -{ - struct dp_refcnt *dp_refcnt; - - HMAP_FOR_EACH_POP (dp_refcnt, key_node, &lflow->dp_refcnts_map) { - free(dp_refcnt); - } - - hmap_destroy(&lflow->dp_refcnts_map); -} - static struct lflow_ref_node * lflow_ref_node_find(struct hmap *lflow_ref_nodes, struct ovn_lflow *lflow, uint32_t lflow_hash)