diff --git a/conanfile.py b/conanfile.py index 0fcb6652..43a80f2c 100644 --- a/conanfile.py +++ b/conanfile.py @@ -9,7 +9,7 @@ class HomeObjectConan(ConanFile): name = "homeobject" - version = "2.0.3" + version = "2.1.3" homepage = "https://github.com/eBay/HomeObject" description = "Blob Store built on HomeReplication" topics = ("ebay") diff --git a/src/include/homeobject/homeobject.hpp b/src/include/homeobject/homeobject.hpp index 1b49125c..8fee24fd 100644 --- a/src/include/homeobject/homeobject.hpp +++ b/src/include/homeobject/homeobject.hpp @@ -73,7 +73,8 @@ class HomeObject { virtual HomeObjectStats get_stats() const = 0; }; -extern std::shared_ptr< HomeObject > init_homeobject(std::weak_ptr< HomeObjectApplication >&& application); +extern std::shared_ptr< HomeObject > init_homeobject(std::weak_ptr< HomeObjectApplication >&& application, + uint32_t gc_defrag_refresh_interval_second = 60ul); } // namespace homeobject // diff --git a/src/lib/homestore_backend/CMakeLists.txt b/src/lib/homestore_backend/CMakeLists.txt index c8a546a8..57a4716e 100644 --- a/src/lib/homestore_backend/CMakeLists.txt +++ b/src/lib/homestore_backend/CMakeLists.txt @@ -15,6 +15,7 @@ target_sources("${PROJECT_NAME}_homestore" PRIVATE replication_state_machine.cpp hs_cp_callbacks.cpp hs_http_manager.cpp + gc_manager.cpp $ ) target_link_libraries("${PROJECT_NAME}_homestore" PUBLIC diff --git a/src/lib/homestore_backend/gc_manager.cpp b/src/lib/homestore_backend/gc_manager.cpp new file mode 100644 index 00000000..e6624987 --- /dev/null +++ b/src/lib/homestore_backend/gc_manager.cpp @@ -0,0 +1,169 @@ +#include "gc_manager.hpp" + +#include + +namespace homeobject { + +void GCManager::start(uint32_t gc_defrag_refresh_interval_second) { + if (!m_heap_chunk_selector) { + LOGERRORMOD( + homeobject, + "GCManager have not been initialized!, please use initialzie_with_chunk_selector() to initialize it."); + return; + } + + // gc thread function + auto gc_task_handler = [this](uint32_t pdev_id) { + RELEASE_ASSERT(m_per_dev_gc_controller.contains(pdev_id), "controller of pdev {} is not initialized!", pdev_id); + auto& controller = m_per_dev_gc_controller[pdev_id]; + // the reservced chunk is a brand new chunk. if the move_to chunk of a gc task is not specified, the reserved + // chunk will be used as the move_to chunk. + // here, select chunk is called before homeobject service start, so this will make sure we can select a brand + // new chunk. + homestore::blk_alloc_hints hint; + hint.pdev_id_hint = pdev_id; + VChunk reserved_chunk(m_heap_chunk_selector->select_chunk(0, hint)); + + m_total_threads_num++; + while (!m_stop_gc) { + VChunk move_to_chunk(nullptr); + VChunk move_from_chunk(nullptr); + + std::unique_lock< std::mutex > lock(controller.mtx); + if (!controller.high_priority_gc_task_queue.empty()) { + // TODO: add seperate new thread for high priority gc task if necessary, so that is can be processed + // ASAP. + std::tie(move_from_chunk, move_to_chunk) = controller.high_priority_gc_task_queue.front(); + controller.high_priority_gc_task_queue.pop(); + } else if (!controller.defrag_heap.empty()) { + move_from_chunk = controller.defrag_heap.top(); + controller.defrag_heap.pop(); + } else { + lock.unlock(); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + continue; + } + lock.unlock(); + + RELEASE_ASSERT(move_from_chunk.get_pdev_id() == pdev_id, + "currrent pdev_id is {}, but get a move_from_chunk with chunk_id {} and pdev_id {}, which " + "is not in the same pdev!", + pdev_id, move_from_chunk.get_chunk_id(), move_from_chunk.get_pdev_id()); + + if (!m_heap_chunk_selector->select_specific_chunk(move_from_chunk.get_chunk_id())) { + LOGWARNMOD(homeobject, "failed to select specific chunk with chunk_id {}", + move_from_chunk.get_chunk_id()); + continue; + } + + if (!move_to_chunk.get_internal_chunk()) move_to_chunk = reserved_chunk; + // handle gc task + gc(move_from_chunk, move_to_chunk); + m_total_gc_chunk_count++; + + // after gc, the move_from_chunk is empty, so we can use is as the new reserved chunk. + // the move_to_chunk is not empty, so we need to release it. + reserved_chunk = move_from_chunk; + m_heap_chunk_selector->release_chunk(move_to_chunk.get_chunk_id()); + } + m_total_threads_num--; + }; + + uint32_t gc_total_thread_num = 0; + for (const auto& [pdev_id, controller] : m_per_dev_gc_controller) { + // TODO: add gc thread pool for each pdev if necessary + std::thread(gc_task_handler, pdev_id).detach(); + gc_total_thread_num++; + } + + // wait until all the gc threads have selected a brand new chunk as the reserved move_to_chunk. + while (m_total_threads_num < gc_total_thread_num) { + // we can also use a condition variable to replace all the while wait and notify in this file. + // we can change this if necessary. + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + + // TODO:make the interval of refresh defrag heap and iolimiter configurable + + // start thread to refresh defrag_heap for each pdev periodically + std::thread([this, gc_defrag_refresh_interval_second]() { + m_total_threads_num++; + while (!m_stop_gc) { + for (const auto& [pdev_id, _] : m_per_dev_gc_controller) + refresh_defrag_heap(pdev_id); + + // Sleep for a period of time before refreshing again + std::this_thread::sleep_for(std::chrono::seconds(gc_defrag_refresh_interval_second)); + } + m_total_threads_num--; + }).detach(); + + // start thread to refresh io_limiter for each pdev periodically + std::thread([this]() { + m_total_threads_num++; + while (!m_stop_gc) { + for (auto& [_, controller] : m_per_dev_gc_controller) + controller.io_limiter.refresh(); + // Sleep for a period of time before refreshing again + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + m_total_threads_num--; + }).detach(); + + LOGINFOMOD(homeobject, "gc manager has been started with {} gc thread.", gc_total_thread_num); +} + +void GCManager::stop() { + m_stop_gc = true; + // theoretically,some gc thread might not started before stop() is called. + // TODO:make sure waiting for all gc thread exit gracefully + while (m_total_threads_num > 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + LOGINFOMOD(homeobject, "all GC threads have stopped."); +} + +void GCManager::add_gc_task(VChunk& move_from_chunk, VChunk& move_to_chunk) { + auto pdev_id = move_from_chunk.get_pdev_id(); + if (pdev_id != move_to_chunk.get_pdev_id()) { + LOGWARNMOD(homeobject, "move_from_chunk(pdev_id {}) and move_to_chunk(pdev_id {}) should be in the same pdev!", + pdev_id, move_to_chunk.get_pdev_id()); + return; + } + LOGINFOMOD(homeobject, "high priority gc task is added, move_from_chunk: {}, move_to_chunk: {}, pdev_id: {}", + move_from_chunk.get_chunk_id(), move_to_chunk.get_chunk_id(), pdev_id); + std::unique_lock< std::mutex > lock(m_per_dev_gc_controller[pdev_id].mtx); + m_per_dev_gc_controller[pdev_id].high_priority_gc_task_queue.emplace(move_from_chunk, move_to_chunk); +} + +GCManager::GCManager(std::shared_ptr< HeapChunkSelector > chunk_selector) : + m_heap_chunk_selector{chunk_selector}, metrics_{*this} { + RELEASE_ASSERT(m_heap_chunk_selector, "chunk_selector is nullptr!"); + auto pdev_ids = m_heap_chunk_selector->get_all_pdev_ids(); + RELEASE_ASSERT(pdev_ids.size(), "chunk_selector is not initialized!"); + for (const auto& pdev_id : pdev_ids) + m_per_dev_gc_controller[pdev_id]; +} + +void GCManager::refresh_defrag_heap(uint32_t pdev_id) { + if (!m_heap_chunk_selector) { + LOGERRORMOD(homeobject, + "GCManager has not been initialized, fail to refresh defrag heap for pdev {}. skip the operation", + pdev_id); + return; + } + const auto& pdev_chunks = m_heap_chunk_selector->get_all_chunks(pdev_id); + VChunkDefragHeap new_defrag_heap; + for (const auto& chunk : pdev_chunks) { + if (chunk.get_defrag_nblks()) new_defrag_heap.emplace(chunk); + } + new_defrag_heap.swap(m_per_dev_gc_controller[pdev_id].defrag_heap); +} + +void GCManager::gc(VChunk move_from_chunk, VChunk move_to_chunk) { + LOGINFOMOD(homeobject, "start gc chunk {} , move to chunk {}, pdev_id {}.", move_from_chunk.get_chunk_id(), + move_to_chunk.get_chunk_id(), move_from_chunk.get_pdev_id()); + // TODO: implement gc logic +} + +} // namespace homeobject \ No newline at end of file diff --git a/src/lib/homestore_backend/gc_manager.hpp b/src/lib/homestore_backend/gc_manager.hpp new file mode 100644 index 00000000..a88b2600 --- /dev/null +++ b/src/lib/homestore_backend/gc_manager.hpp @@ -0,0 +1,113 @@ +#pragma once + +#include "heap_chunk_selector.h" + +#include + +namespace homeobject { + +class GCManager { +public: + GCManager(std::shared_ptr< HeapChunkSelector >); + + void start(uint32_t); + void stop(); + + /** + * @brief this is used for emergency or recovered GC task when restarting + */ + void add_gc_task(VChunk&, VChunk&); + + ~GCManager() = default; + +private: + class VChunkDefragComparator { + public: + bool operator()(VChunk& lhs, VChunk& rhs) { return lhs.get_defrag_nblks() < rhs.get_defrag_nblks(); } + }; + + using VChunkDefragHeap = std::priority_queue< VChunk, std::vector< VChunk >, VChunkDefragComparator >; + struct PerDevGCController { + class IOLimiter { + public: + IOLimiter() : m_tokenBucket(1000), m_lastRefreshTime(std::chrono::steady_clock::now()) {} + + void refresh() { + std::lock_guard< std::mutex > lock(m_mutex); + auto currentTime = std::chrono::steady_clock::now(); + auto elapsedTime = + std::chrono::duration_cast< std::chrono::milliseconds >(currentTime - m_lastRefreshTime); + m_tokenBucket += m_tokensPerSecond * elapsedTime.count(); + m_tokenBucket = std::min(m_tokenBucket, m_maxTokens); + m_lastRefreshTime = currentTime; + } + + // gc thread will consume tokens before doing IO to limit the IO resource occupied by gc for each pdev + bool tryConsumeTokens(uint32_t tokens) { + std::lock_guard< std::mutex > lock(m_mutex); + if (m_tokenBucket >= tokens) { + m_tokenBucket -= tokens; + return true; + } + return false; + } + + private: + std::mutex m_mutex; + uint32_t m_tokenBucket; + // TODO: make m_maxTokens and m_tokensPerSecond configurable, and set them as optimized values after testing + const uint32_t m_maxTokens = 1000; // Maximum number of tokens in the bucket + const uint32_t m_tokensPerSecond = 100; // Number of tokens added per millisecond + std::chrono::steady_clock::time_point m_lastRefreshTime; + }; + + std::mutex mtx; + IOLimiter io_limiter; + VChunkDefragHeap defrag_heap; + // emergency and recovered GC tasks will be put in this queue + std::queue< std::pair< VChunk, VChunk > > high_priority_gc_task_queue; + }; + + void refresh_defrag_heap(uint32_t pdev_id); + + void gc(VChunk move_from_chunk, VChunk move_to_chunk); + + // metrics + struct GCMetrics : public sisl::MetricsGroup { + public: + GCMetrics(GCManager const& gc_manager) : sisl::MetricsGroup{"GC", "Singelton"}, gc_manager{gc_manager} { + // TODO:: add more metrics + REGISTER_GAUGE(total_gc_chunk_count, "Number of chunks which has been GC"); + + register_me_to_farm(); + attach_gather_cb(std::bind(&GCMetrics::on_gather, this)); + } + ~GCMetrics() { deregister_me_from_farm(); } + GCMetrics(const GCMetrics&) = delete; + GCMetrics(GCMetrics&&) noexcept = delete; + GCMetrics& operator=(const GCMetrics&) = delete; + GCMetrics& operator=(GCMetrics&&) noexcept = delete; + + void on_gather() { GAUGE_UPDATE(*this, total_gc_chunk_count, gc_manager.total_gc_chunk_count()); } + + private: + GCManager const& gc_manager; + }; + + uint64_t total_gc_chunk_count() const { return m_total_gc_chunk_count; } + +private: + GCManager(const GCManager&) = delete; + GCManager& operator=(const GCManager&) = delete; + + std::shared_ptr< HeapChunkSelector > m_heap_chunk_selector; + std::unordered_map< uint32_t, PerDevGCController > m_per_dev_gc_controller; + std::atomic_ushort m_total_threads_num{0}; + std::atomic_bool m_stop_gc{false}; + + // metrics + GCMetrics metrics_; + std::atomic_uint16_t m_total_gc_chunk_count{0}; +}; + +} // namespace homeobject \ No newline at end of file diff --git a/src/lib/homestore_backend/heap_chunk_selector.cpp b/src/lib/homestore_backend/heap_chunk_selector.cpp index 897ab2ad..a2f4eeb3 100644 --- a/src/lib/homestore_backend/heap_chunk_selector.cpp +++ b/src/lib/homestore_backend/heap_chunk_selector.cpp @@ -1,4 +1,4 @@ -#include "heap_chunk_selector.h" +#include "gc_manager.hpp" #include #include @@ -17,7 +17,11 @@ namespace homeobject { // 2 the key collection of m_chunks will never change // this should only be called when initializing HeapChunkSelector in Homestore -void HeapChunkSelector::add_chunk(csharedChunk& chunk) { m_chunks.emplace(VChunk(chunk).get_chunk_id(), chunk); } +void HeapChunkSelector::add_chunk(csharedChunk& chunk) { + VChunk vchunk(chunk); + m_chunks.emplace(vchunk.get_chunk_id(), chunk); + m_pdev_chunks[vchunk.get_pdev_id()].emplace_back(chunk); +} void HeapChunkSelector::add_chunk_internal(const chunk_num_t chunkID, bool add_to_heap) { if (m_chunks.find(chunkID) == m_chunks.end()) { @@ -42,10 +46,7 @@ void HeapChunkSelector::add_chunk_internal(const chunk_num_t chunkID, bool add_t auto& heapLock = it->second->mtx; auto& heap = it->second->m_heap; - { - std::lock_guard< std::mutex > l(m_defrag_mtx); - m_defrag_heap.emplace(chunk); - } + std::lock_guard< std::mutex > l(heapLock); heap.emplace(chunk); } @@ -97,7 +98,6 @@ csharedChunk HeapChunkSelector::select_chunk(homestore::blk_count_t count, const if (vchunk.get_internal_chunk()) { auto& avalableBlkCounter = it->second->available_blk_count; avalableBlkCounter.fetch_sub(vchunk.available_blks()); - remove_chunk_from_defrag_heap(vchunk.get_chunk_id()); } else { LOGWARNMOD(homeobject, "No pdev found for pdev {}", pdevID); } @@ -142,44 +142,11 @@ csharedChunk HeapChunkSelector::select_specific_chunk(const chunk_num_t chunkID) if (vchunk.get_internal_chunk()) { auto& avalableBlkCounter = it->second->available_blk_count; avalableBlkCounter.fetch_sub(vchunk.available_blks()); - remove_chunk_from_defrag_heap(vchunk.get_chunk_id()); } return vchunk.get_internal_chunk(); } -// most_defrag_chunk will only be called when GC is triggered, and will return the chunk with the most -// defrag blocks -csharedChunk HeapChunkSelector::most_defrag_chunk() { - chunk_num_t chunkID{0}; - // the chunk might be seleted for creating shard. if this happens, we need to select another chunk - for (;;) { - { - std::lock_guard< std::mutex > lg(m_defrag_mtx); - if (m_defrag_heap.empty()) break; - chunkID = m_defrag_heap.top().get_chunk_id(); - } - auto chunk = select_specific_chunk(chunkID); - if (chunk) return chunk; - } - return nullptr; -} - -void HeapChunkSelector::remove_chunk_from_defrag_heap(const chunk_num_t chunkID) { - std::vector< VChunk > chunks; - std::lock_guard< std::mutex > lg(m_defrag_mtx); - chunks.reserve(m_defrag_heap.size()); - while (!m_defrag_heap.empty()) { - auto c = m_defrag_heap.top(); - m_defrag_heap.pop(); - if (c.get_chunk_id() == chunkID) break; - chunks.emplace_back(std::move(c)); - } - for (auto& c : chunks) { - m_defrag_heap.emplace(c); - } -} - void HeapChunkSelector::foreach_chunks(std::function< void(csharedChunk&) >&& cb) { // we should call `cb` on all the chunks, selected or not std::for_each(std::execution::par_unseq, m_chunks.begin(), m_chunks.end(), @@ -266,4 +233,13 @@ uint64_t HeapChunkSelector::total_blks(uint32_t dev_id) const { return it->second->m_total_blks; } +std::set< uint32_t > HeapChunkSelector::get_all_pdev_ids() const { + std::set< uint32_t > pdev_ids; + for (const auto& [pdev_id, _] : m_pdev_chunks) + pdev_ids.insert(pdev_id); + return pdev_ids; +} + +const std::vector< VChunk >& HeapChunkSelector::get_all_chunks(uint32_t pdev_id) { return m_pdev_chunks[pdev_id]; }; + } // namespace homeobject diff --git a/src/lib/homestore_backend/heap_chunk_selector.h b/src/lib/homestore_backend/heap_chunk_selector.h index 1ccf5d15..6edb8f8f 100644 --- a/src/lib/homestore_backend/heap_chunk_selector.h +++ b/src/lib/homestore_backend/heap_chunk_selector.h @@ -12,29 +12,24 @@ #include #include #include +#include namespace homeobject { using csharedChunk = homestore::cshared< homestore::Chunk >; +using sharedChunk = homestore::shared< homestore::Chunk >; +using VChunk = homestore::VChunk; class HeapChunkSelector : public homestore::ChunkSelector { public: HeapChunkSelector() = default; ~HeapChunkSelector() = default; - using VChunk = homestore::VChunk; class VChunkComparator { public: bool operator()(VChunk& lhs, VChunk& rhs) { return lhs.available_blks() < rhs.available_blks(); } }; - - class VChunkDefragComparator { - public: - bool operator()(VChunk& lhs, VChunk& rhs) { return lhs.get_defrag_nblks() < rhs.get_defrag_nblks(); } - }; - using VChunkHeap = std::priority_queue< VChunk, std::vector< VChunk >, VChunkComparator >; - using VChunkDefragHeap = std::priority_queue< VChunk, std::vector< VChunk >, VChunkDefragComparator >; using chunk_num_t = homestore::chunk_num_t; struct PerDevHeap { @@ -53,9 +48,6 @@ class HeapChunkSelector : public homestore::ChunkSelector { // responsible to use release_chunk() interface to release it when no longer to use the chunk anymore. csharedChunk select_specific_chunk(const chunk_num_t); - // this function will be used by GC flow to select a chunk for GC - csharedChunk most_defrag_chunk(); - // this function is used to return a chunk back to ChunkSelector when sealing a shard, and will only be used by // Homeobject. void release_chunk(const chunk_num_t); @@ -112,17 +104,19 @@ class HeapChunkSelector : public homestore::ChunkSelector { */ uint32_t total_chunks() const; + std::set< uint32_t > get_all_pdev_ids() const; + + const std::vector< VChunk >& get_all_chunks(uint32_t pdev_id); + private: std::unordered_map< uint32_t, std::shared_ptr< PerDevHeap > > m_per_dev_heap; // hold all the chunks , selected or not std::unordered_map< chunk_num_t, csharedChunk > m_chunks; - void add_chunk_internal(const chunk_num_t, bool add_to_heap = true); + // hold all the chunks in each pdev + std::unordered_map< uint32_t, std::vector< VChunk > > m_pdev_chunks; - VChunkDefragHeap m_defrag_heap; - std::mutex m_defrag_mtx; - - void remove_chunk_from_defrag_heap(const chunk_num_t); + void add_chunk_internal(const chunk_num_t, bool add_to_heap = true); }; } // namespace homeobject diff --git a/src/lib/homestore_backend/hs_homeobject.cpp b/src/lib/homestore_backend/hs_homeobject.cpp index 64cc2e8d..592e4ec4 100644 --- a/src/lib/homestore_backend/hs_homeobject.cpp +++ b/src/lib/homestore_backend/hs_homeobject.cpp @@ -26,12 +26,14 @@ struct svc_info_superblk_t { peer_id_t svc_id_; }; -extern std::shared_ptr< HomeObject > init_homeobject(std::weak_ptr< HomeObjectApplication >&& application) { +extern std::shared_ptr< HomeObject > init_homeobject(std::weak_ptr< HomeObjectApplication >&& application, + uint32_t gc_defrag_refresh_interval_second) { LOGI("Initializing HomeObject"); auto instance = std::make_shared< HSHomeObject >(std::move(application)); instance->init_homestore(); // instance->init_timer_thread(); instance->init_cp(); + instance->start_gc_manager(gc_defrag_refresh_interval_second); return instance; } @@ -179,7 +181,8 @@ void HSHomeObject::init_homestore() { auto run_on_type = has_fast_dev ? homestore::HSDevType::Fast : homestore::HSDevType::Data; LOGD("Running with Single mode, all service on {}", run_on_type); HomeStore::instance()->format_and_start({ - // FIXME: this is to work around the issue in HS that varsize allocator doesnt work with small chunk size. + // FIXME: this is to work around the issue in HS that varsize allocator doesnt work with small chunk + // size. {HS_SERVICE::META, hs_format_params{.dev_type = run_on_type, .size_pct = 5.0, .num_chunks = 1}}, {HS_SERVICE::LOG, hs_format_params{.dev_type = run_on_type, .size_pct = 10.0, .chunk_size = 32 * Mi}}, {HS_SERVICE::INDEX, hs_format_params{.dev_type = run_on_type, .size_pct = 5.0, .num_chunks = 1}}, @@ -258,6 +261,10 @@ void HSHomeObject::init_cp() { std::move(std::make_unique< MyCPCallbacks >(*this))); } +void HSHomeObject::start_gc_manager(uint32_t gc_defrag_refresh_interval_second) { + gc_manager_->start(gc_defrag_refresh_interval_second); +} + // void HSHomeObject::trigger_timed_events() { persist_pg_sb(); } void HSHomeObject::register_homestore_metablk_callback() { @@ -282,6 +289,7 @@ HSHomeObject::~HSHomeObject() { } trigger_timed_events(); #endif + gc_manager_->stop(); homestore::HomeStore::instance()->shutdown(); homestore::HomeStore::reset_instance(); iomanager.stop(); diff --git a/src/lib/homestore_backend/hs_homeobject.hpp b/src/lib/homestore_backend/hs_homeobject.hpp index f1da45cd..826d7d9b 100644 --- a/src/lib/homestore_backend/hs_homeobject.hpp +++ b/src/lib/homestore_backend/hs_homeobject.hpp @@ -11,6 +11,7 @@ #include "heap_chunk_selector.h" #include "lib/homeobject_impl.hpp" #include "replication_message.hpp" +#include "gc_manager.hpp" namespace homestore { struct meta_blk; @@ -276,6 +277,7 @@ class HSHomeObject : public HomeObjectImpl { private: shared< HeapChunkSelector > chunk_selector_; + shared< GCManager > gc_manager_; unique< HttpManager > http_mgr_; bool recovery_done_{false}; @@ -335,6 +337,12 @@ class HSHomeObject : public HomeObjectImpl { */ void init_cp(); + /** + * @brief start gc_manager for the homeobject. + * + */ + void start_gc_manager(uint32_t); + /** * @brief Callback function invoked when createPG message is committed on a shard. * diff --git a/src/lib/homestore_backend/hs_shard_manager.cpp b/src/lib/homestore_backend/hs_shard_manager.cpp index 14df4153..cc5e9492 100644 --- a/src/lib/homestore_backend/hs_shard_manager.cpp +++ b/src/lib/homestore_backend/hs_shard_manager.cpp @@ -387,7 +387,9 @@ void HSHomeObject::on_shard_meta_blk_recover_completed(bool success) { } } } + chunk_selector_->build_per_dev_chunk_heap(excluding_chunks); + gc_manager_ = std::make_shared< GCManager >(chunk_selector_); } void HSHomeObject::add_new_shard_to_map(ShardPtr&& shard) { diff --git a/src/lib/homestore_backend/tests/CMakeLists.txt b/src/lib/homestore_backend/tests/CMakeLists.txt index 7eda153a..088c89a8 100644 --- a/src/lib/homestore_backend/tests/CMakeLists.txt +++ b/src/lib/homestore_backend/tests/CMakeLists.txt @@ -7,6 +7,7 @@ list(APPEND TEST_SOURCES hs_blob_tests.cpp hs_pg_tests.cpp homeobj_cp_tests.cpp + gc_manager_tests.cpp ) add_library(homestore_tests OBJECT) @@ -17,6 +18,6 @@ target_link_libraries(homestore_tests ) add_executable (test_heap_chunk_selector) -target_sources(test_heap_chunk_selector PRIVATE test_heap_chunk_selector.cpp ../heap_chunk_selector.cpp) +target_sources(test_heap_chunk_selector PRIVATE test_heap_chunk_selector.cpp ../heap_chunk_selector.cpp ../gc_manager.cpp) target_link_libraries(test_heap_chunk_selector homestore::homestore ${COMMON_TEST_DEPS}) add_test(NAME HeapChunkSelectorTest COMMAND ${CMAKE_BINARY_DIR}/bin/test_heap_chunk_selector) diff --git a/src/lib/homestore_backend/tests/gc_manager_tests.cpp b/src/lib/homestore_backend/tests/gc_manager_tests.cpp new file mode 100644 index 00000000..9dcf472c --- /dev/null +++ b/src/lib/homestore_backend/tests/gc_manager_tests.cpp @@ -0,0 +1,9 @@ + +#include "lib/homestore_backend/gc_manager.hpp" + +#include + +// Test case for GC Manager +TEST(GCManagerTest, BasicTest) { + // TODO: Add test logic +} \ No newline at end of file diff --git a/src/lib/homestore_backend/tests/homeobj_cp_tests.cpp b/src/lib/homestore_backend/tests/homeobj_cp_tests.cpp index 505a715a..0e8bd803 100644 --- a/src/lib/homestore_backend/tests/homeobj_cp_tests.cpp +++ b/src/lib/homestore_backend/tests/homeobj_cp_tests.cpp @@ -32,7 +32,7 @@ TEST_F(HomeObjectFixture, HSHomeObjectCPTestBasic) { _obj_inst.reset(); // Step-4: re-create the homeobject and pg infos and shard infos will be recover automatically. - _obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); ho = dynamic_cast< homeobject::HSHomeObject* >(_obj_inst.get()); diff --git a/src/lib/homestore_backend/tests/homeobj_fixture.hpp b/src/lib/homestore_backend/tests/homeobj_fixture.hpp index 33603f49..2b1bade7 100644 --- a/src/lib/homestore_backend/tests/homeobj_fixture.hpp +++ b/src/lib/homestore_backend/tests/homeobj_fixture.hpp @@ -33,7 +33,7 @@ class HomeObjectFixture : public ::testing::Test { void SetUp() override { app = std::make_shared< FixtureApp >(true /* is_hybrid */); - _obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); } void TearDown() override { app->clean(); } @@ -188,7 +188,7 @@ class HomeObjectFixture : public ::testing::Test { void restart() { LOGINFO("Restarting homeobject."); _obj_inst.reset(); - _obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); std::this_thread::sleep_for(std::chrono::seconds{1}); } }; diff --git a/src/lib/homestore_backend/tests/hs_blob_tests.cpp b/src/lib/homestore_backend/tests/hs_blob_tests.cpp index 78922791..622a8b19 100644 --- a/src/lib/homestore_backend/tests/hs_blob_tests.cpp +++ b/src/lib/homestore_backend/tests/hs_blob_tests.cpp @@ -2,7 +2,7 @@ TEST(HomeObject, BasicEquivalence) { auto app = std::make_shared< FixtureApp >(); - auto obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + auto obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); ASSERT_TRUE(!!obj_inst); auto shard_mgr = obj_inst->shard_manager(); auto pg_mgr = obj_inst->pg_manager(); diff --git a/src/lib/homestore_backend/tests/hs_shard_tests.cpp b/src/lib/homestore_backend/tests/hs_shard_tests.cpp index b2d86dca..3e54d744 100644 --- a/src/lib/homestore_backend/tests/hs_shard_tests.cpp +++ b/src/lib/homestore_backend/tests/hs_shard_tests.cpp @@ -152,7 +152,7 @@ TEST_F(ShardManagerTestingRecovery, ShardManagerRecovery) { homeobject::peer_id_t _peer1; homeobject::peer_id_t _peer2; std::shared_ptr< homeobject::HomeObject > _home_object; - _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); _peer1 = _home_object->our_uuid(); _peer2 = boost::uuids::random_generator()(); @@ -174,7 +174,7 @@ TEST_F(ShardManagerTestingRecovery, ShardManagerRecovery) { // restart homeobject and check if pg/shard info will be recovered. _home_object.reset(); LOGI("restart home_object"); - _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); std::this_thread::sleep_for(std::chrono::seconds{5}); homeobject::HSHomeObject* ho = dynamic_cast< homeobject::HSHomeObject* >(_home_object.get()); // check PG after recovery. @@ -202,7 +202,7 @@ TEST_F(ShardManagerTestingRecovery, ShardManagerRecovery) { _home_object.reset(); LOGI("restart home_object again"); // re-create the homeobject and pg infos and shard infos will be recover automatically. - _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); std::this_thread::sleep_for(std::chrono::seconds{5}); auto s = _home_object->shard_manager()->get_shard(shard_id).get(); ASSERT_TRUE(!!s); @@ -227,7 +227,7 @@ TEST_F(ShardManagerTestingRecovery, SealedShardRecovery) { homeobject::peer_id_t _peer1; homeobject::peer_id_t _peer2; std::shared_ptr< homeobject::HomeObject > _home_object; - _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); _peer1 = _home_object->our_uuid(); _peer2 = boost::uuids::random_generator()(); @@ -257,7 +257,7 @@ TEST_F(ShardManagerTestingRecovery, SealedShardRecovery) { LOGI("restart home_object"); // re-create the homeobject and pg infos and shard infos will be recover automatically. - _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + _home_object = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); std::this_thread::sleep_for(std::chrono::seconds{5}); ho = dynamic_cast< homeobject::HSHomeObject* >(_home_object.get()); EXPECT_TRUE(ho->_pg_map.size() == 1); diff --git a/src/lib/homestore_backend/tests/test_heap_chunk_selector.cpp b/src/lib/homestore_backend/tests/test_heap_chunk_selector.cpp index 5358a56a..63e325ef 100644 --- a/src/lib/homestore_backend/tests/test_heap_chunk_selector.cpp +++ b/src/lib/homestore_backend/tests/test_heap_chunk_selector.cpp @@ -144,19 +144,6 @@ TEST_F(HeapChunkSelectorTest, test_select_specific_chunk) { ASSERT_EQ(chunk_id, chunk->get_chunk_id()); } -TEST_F(HeapChunkSelectorTest, test_most_defrag_chunk) { - for (uint32_t i = 1; i < 6; i++) { - auto chunk = HCS.most_defrag_chunk(); - // should always select the chunk with the most defrag blocks - ASSERT_EQ(chunk->get_chunk_id(), i); - } - - // after release a chunk with the most defrag blocks, most_defrag_chunk should select this chunk. - HCS.release_chunk(1); - auto chunk = HCS.most_defrag_chunk(); - ASSERT_EQ(chunk->get_chunk_id(), 1); -} - TEST_F(HeapChunkSelectorTest, test_release_chunk) { homestore::blk_count_t count = 1; homestore::blk_alloc_hints hints; diff --git a/src/lib/memory_backend/mem_homeobject.cpp b/src/lib/memory_backend/mem_homeobject.cpp index 8ab92afc..e6e41eec 100644 --- a/src/lib/memory_backend/mem_homeobject.cpp +++ b/src/lib/memory_backend/mem_homeobject.cpp @@ -3,7 +3,8 @@ namespace homeobject { /// NOTE: We give ourselves the option to provide a different HR instance here than libhomeobject.a -extern std::shared_ptr< HomeObject > init_homeobject(std::weak_ptr< HomeObjectApplication >&& application) { +extern std::shared_ptr< HomeObject > init_homeobject(std::weak_ptr< HomeObjectApplication >&& application, + uint32_t gc_defrag_refresh_interval_second) { return std::make_shared< MemoryHomeObject >(std::move(application)); } diff --git a/src/lib/tests/fixture_app.cpp b/src/lib/tests/fixture_app.cpp index 9e5d35c2..4c90cbc0 100644 --- a/src/lib/tests/fixture_app.cpp +++ b/src/lib/tests/fixture_app.cpp @@ -35,7 +35,7 @@ homeobject::peer_id_t FixtureApp::discover_svcid(std::optional< homeobject::peer void TestFixture::SetUp() { app = std::make_shared< FixtureApp >(); - homeobj_ = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app)); + homeobj_ = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app), 1ul); _peer1 = homeobj_->our_uuid(); _peer2 = boost::uuids::random_generator()(); diff --git a/test_package/test_package.cpp b/test_package/test_package.cpp index 7a02bcac..563ade15 100644 --- a/test_package/test_package.cpp +++ b/test_package/test_package.cpp @@ -11,9 +11,7 @@ class TestApp : public homeobject::HomeObjectApplication { public: bool spdk_mode() const override { return false; } uint32_t threads() const override { return 1; } - std::list< homeobject::device_info_t > devices() const override { - return std::list< homeobject::device_info_t >(); - } + std::list< homeobject::device_info_t > devices() const override { return std::list< homeobject::device_info_t >(); } homeobject::peer_id_t discover_svcid(std::optional< homeobject::peer_id_t > const& p) const override { return boost::uuids::random_generator()(); } @@ -29,6 +27,6 @@ int main(int argc, char** argv) { auto f = ::folly::Init(&parsed_argc, &argv, true); auto a = std::make_shared< TestApp >(); - homeobject::init_homeobject(a); + homeobject::init_homeobject(a, 1ul); return 0; }