From d753a854a5f9a2ae46575d995982e5ee30a986c7 Mon Sep 17 00:00:00 2001 From: Izabella Raulin Date: Thu, 29 Sep 2022 17:24:19 +0200 Subject: [PATCH] WIP: Add pruning dead pid - draft static method --- examples/cpp/pyperf/PyPerfLoggingHelper.cc | 20 +++++- examples/cpp/pyperf/PyPerfNativeStackTrace.cc | 66 +++++++++++-------- examples/cpp/pyperf/PyPerfNativeStackTrace.h | 22 ++++--- examples/cpp/pyperf/PyPerfProfiler.cc | 19 ++++++ 4 files changed, 87 insertions(+), 40 deletions(-) diff --git a/examples/cpp/pyperf/PyPerfLoggingHelper.cc b/examples/cpp/pyperf/PyPerfLoggingHelper.cc index 61e96ad8810d..c619adcdf6c3 100644 --- a/examples/cpp/pyperf/PyPerfLoggingHelper.cc +++ b/examples/cpp/pyperf/PyPerfLoggingHelper.cc @@ -11,13 +11,28 @@ namespace ebpf { namespace pyperf { -static uint64_t setVerbosityLevel = 0; +static uint64_t setVerbosityLevel = 9; void setVerbosity(uint64_t verbosityLevel) { setVerbosityLevel = verbosityLevel; } void logInfo(uint64_t logLevel, const char* fmt, ...) { + + if (logLevel <= 2 ) { + va_list va; + va_start(va, fmt); + // dopisane - iza + FILE * pFile; + pFile = fopen("/tmp/gprofiler_tmp/izahelperfile.txt","a"); + if (pFile) { + std::vfprintf(pFile, fmt, va); + fclose (pFile); + va_end(va); + } + //koniec + } + if (logLevel > setVerbosityLevel) { return; } @@ -25,8 +40,11 @@ void logInfo(uint64_t logLevel, const char* fmt, ...) { va_list va; va_start(va, fmt); std::vfprintf(stderr, fmt, va); + va_end(va); + } + } // namespace pyperf } // namespace ebpf diff --git a/examples/cpp/pyperf/PyPerfNativeStackTrace.cc b/examples/cpp/pyperf/PyPerfNativeStackTrace.cc index 3f678605ce22..c2639457099f 100644 --- a/examples/cpp/pyperf/PyPerfNativeStackTrace.cc +++ b/examples/cpp/pyperf/PyPerfNativeStackTrace.cc @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ time_t NativeStackTrace::now; UnwindCache NativeStackTrace::cache; + NativeStackTrace::NativeStackTrace(uint32_t pid, const unsigned char *raw_stack, size_t stack_len, uintptr_t ip, uintptr_t sp) : error_occurred(false) { NativeStackTrace::stack = raw_stack; @@ -38,6 +40,7 @@ NativeStackTrace::NativeStackTrace(uint32_t pid, const unsigned char *raw_stack, NativeStackTrace::ip = ip; NativeStackTrace::sp = sp; NativeStackTrace::now = time(NULL); + logInfo(2,"DEBUGIZA: Welcome in NativeStackTrace, pid=%d\n", pid); if (stack_len == 0) { return; @@ -56,11 +59,11 @@ NativeStackTrace::NativeStackTrace(uint32_t pid, const unsigned char *raw_stack, int res; // Pseudo-proactive way of implementing TTL - whenever any call is made, all expired entries are removed - cache_eviction(cache); + cache_eviction(); // Check whether the entry for the process ID is presented in the cache - if (!is_cached(cache, pid)) { - logInfo(3,"The given key %d is not presented in the cache\n", pid); + if (!is_cached(pid)) { + logInfo(2,"The given key %d is not presented in the cache\n", pid); as = unw_create_addr_space(&my_accessors, 0); upt = _UPT_create(pid); @@ -82,12 +85,12 @@ NativeStackTrace::NativeStackTrace(uint32_t pid, const unsigned char *raw_stack, } // Put to the cache - cache_put(cache, pid, cursor, as, upt); + cache_put(pid, cursor, as, upt); } else { - logInfo(3,"Found entry for the given key %d in the cache\n", pid); + logInfo(2,"Found entry for the given key %d in the cache\n", pid); // Get from the cache - UnwindCacheEntry cached_entry = cache_get(cache, pid); + UnwindCacheEntry cached_entry = cache_get(pid); cursor = cached_entry.cursor; as = cached_entry.as; upt = cached_entry.upt; @@ -137,6 +140,11 @@ NativeStackTrace::NativeStackTrace(uint32_t pid, const unsigned char *raw_stack, } +void NativeStackTrace::Prune_dead_pid(uint32_t dead_pid) { + logInfo(2, "DEBUGIZA: D. Try to call cache_delete %d\n", dead_pid); + cache_delete_key(dead_pid); +} + void NativeStackTrace::cleanup(void *upt, unw_addr_space_t as) { if (upt) { _UPT_destroy(upt); @@ -230,75 +238,75 @@ bool NativeStackTrace::error_occured() const { return error_occurred; } -bool NativeStackTrace::is_cached(const UnwindCache &map, const uint32_t &key) { +bool NativeStackTrace::is_cached(const uint32_t &key) { try { - map.at(key); + cache.at(key); return true; } catch (const std::out_of_range&) { - logInfo(3, "No entry for %d in the cache\n", key); + logInfo(2, "No entry for %d in the cache\n", key); } return false; } -UnwindCacheEntry NativeStackTrace::cache_get(const UnwindCache &map, const uint32_t &key) { - const UnwindCacheEntry & entry = map.at(key); +UnwindCacheEntry NativeStackTrace::cache_get(const uint32_t &key) { + const UnwindCacheEntry & entry = cache.at(key); return entry; } // cache_put adds a new entry to the unwind cache if the capacity allows -void NativeStackTrace::cache_put(UnwindCache &mp, const uint32_t &key, const unw_cursor_t cursor, const unw_addr_space_t as, void *upt) { +void NativeStackTrace::cache_put(const uint32_t &key, const unw_cursor_t cursor, const unw_addr_space_t as, void *upt) { // Check available capacity if (cache_size() > NativeStackTrace::CacheMaxSizeMB*1024*1024 - cache_single_entry_size()) { - logInfo(3, "The cache usage is %.2f MB, close to reaching the max memory usage (%d MB)\n", cache_size_KB()/1024, NativeStackTrace::CacheMaxSizeMB); - logInfo(3, "Skipping adding an entry for %d to the cache\n", key); + logInfo(2, "The cache usage is %.2f MB, close to reaching the max memory usage (%d MB)\n", cache_size_KB()/1024, NativeStackTrace::CacheMaxSizeMB); + logInfo(2, "Skipping adding an entry for %d to the cache\n", key); return; } UnwindCacheEntry entry = {cursor, as, upt, now}; - mp[key] = entry; - logInfo(3, "New entry for %d was added to the cache\n", key); + cache[key] = entry; + logInfo(2, "New entry for %d was added to the cache\n", key); } // cache_delete_key removes the element from the cache and destroys unwind address space and UPT // to ensure that all memory and other resources are freed up -bool NativeStackTrace::cache_delete_key(UnwindCache &mp, const uint32_t &key) { +bool NativeStackTrace::cache_delete_key(const uint32_t &key) { UnwindCacheEntry e; try { - e = cache_get(mp, key); + e = cache_get(key); } catch (const std::out_of_range&) { - logInfo(3, "Failed to delete entry for %d: no such key in the cache\n", key); + logInfo(2, "Failed to delete entry for %d: no such key in the cache\n", key); return false; } - mp.erase(key); + cache.erase(key); cleanup(e.upt, e.as); - logInfo(3, "The entry for %d was deleted from the cache\n", key); + logInfo(2, "The entry for %d was deleted from the cache\n", key); return true; } // cache_single_entry_size returns the number of bytes taken by single entry -uint32_t NativeStackTrace::cache_single_entry_size() const { +uint32_t NativeStackTrace::cache_single_entry_size() { return sizeof(decltype(cache)::key_type) + sizeof(decltype(cache)::mapped_type); } // cache_size returns the number of bytes currently in use by the cache -uint32_t NativeStackTrace::cache_size() const { +uint32_t NativeStackTrace::cache_size() { return sizeof(cache) + cache.size()*cache_single_entry_size(); } // cache_size_KB returns the number of kilobytes currently in use by the cache -float NativeStackTrace::cache_size_KB() const { +float NativeStackTrace::cache_size_KB() { return cache_size()/1024; } // cache_eviction removes elements older than 5 minutes (CacheMaxTTL=300) -void NativeStackTrace::cache_eviction(UnwindCache &mp) { +void NativeStackTrace::cache_eviction() { std::vector keys_to_delete; float _prev_cache_size = cache_size_KB(); - for(std::map::iterator iter = mp.begin(); iter != mp.end(); ++iter) + for(std::map::iterator iter = cache.begin(); iter != cache.end(); ++iter) { uint32_t k = iter->first; const UnwindCacheEntry & e = iter->second; @@ -310,13 +318,13 @@ void NativeStackTrace::cache_eviction(UnwindCache &mp) { // Delete expired entries for( size_t i = 0; i < keys_to_delete.size(); i++ ) { - cache_delete_key(mp, keys_to_delete[i]); + cache_delete_key(keys_to_delete[i]); } if (keys_to_delete.size() > 0) { float _cache_size = cache_size_KB(); - logInfo(3,"Evicted %d item(s) from the cache\n", keys_to_delete.size()); - logInfo(3,"The cache usage after eviction action is %.2f KB (released %.2f KB)\n", _cache_size, _prev_cache_size - _cache_size); + logInfo(2,"Evicted %d item(s) from the cache\n", keys_to_delete.size()); + logInfo(2,"The cache usage after eviction action is %.2f KB (released %.2f KB)\n", _cache_size, _prev_cache_size - _cache_size); } } diff --git a/examples/cpp/pyperf/PyPerfNativeStackTrace.h b/examples/cpp/pyperf/PyPerfNativeStackTrace.h index cb0503eeb071..f9cf5ca7b701 100644 --- a/examples/cpp/pyperf/PyPerfNativeStackTrace.h +++ b/examples/cpp/pyperf/PyPerfNativeStackTrace.h @@ -22,16 +22,19 @@ typedef struct { typedef std::map UnwindCache; + class NativeStackTrace { public: explicit NativeStackTrace(uint32_t pid, const uint8_t *raw_stack, size_t stack_len, uintptr_t ip, uintptr_t sp); + static void Prune_dead_pid(uint32_t dead_pid); std::vector get_stack_symbol() const; bool error_occured() const; private: std::vector symbols; bool error_occurred; + static UnwindCache cache; static const uint8_t *stack; static size_t stack_len; @@ -39,13 +42,12 @@ class NativeStackTrace { static uintptr_t sp; static time_t now; - static UnwindCache cache; static const uint16_t CacheMaxSizeMB; static const uint16_t CacheMaxTTL; - uint32_t cache_size() const; - uint32_t cache_single_entry_size() const; - float cache_size_KB() const; + static uint32_t cache_size(); + static uint32_t cache_single_entry_size(); + static float cache_size_KB(); static int access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write, void *arg); @@ -53,13 +55,13 @@ class NativeStackTrace { static int access_mem(unw_addr_space_t as, unw_word_t addr, unw_word_t *valp, int write, void *arg); - void cleanup(void *upt, unw_addr_space_t as); + static void cleanup(void *upt, unw_addr_space_t as); - bool is_cached(const UnwindCache &map, const uint32_t &key); - void cache_put(UnwindCache &map, const uint32_t &key, const unw_cursor_t cursor, const unw_addr_space_t as, void *upt); - UnwindCacheEntry cache_get(const UnwindCache &map, const uint32_t &key); - bool cache_delete_key(UnwindCache &map, const uint32_t &key); - void cache_eviction(UnwindCache &map); + bool is_cached(const uint32_t &key); + void cache_put(const uint32_t &key, const unw_cursor_t cursor, const unw_addr_space_t as, void *upt); + static UnwindCacheEntry cache_get(const uint32_t &key); + static bool cache_delete_key(const uint32_t &key); + static void cache_eviction(); }; } // namespace pyperf diff --git a/examples/cpp/pyperf/PyPerfProfiler.cc b/examples/cpp/pyperf/PyPerfProfiler.cc index 9db85915ee3e..a200b54d0f85 100644 --- a/examples/cpp/pyperf/PyPerfProfiler.cc +++ b/examples/cpp/pyperf/PyPerfProfiler.cc @@ -29,6 +29,7 @@ #include "PyPerfLoggingHelper.h" #include "PyPerfVersion.h" #include "PyPerfProc.h" +#include "PyPerfNativeStackTrace.h" #include "bcc_elf.h" #include "bcc_proc.h" #include "bcc_syms.h" @@ -235,20 +236,38 @@ bool PyPerfProfiler::populatePidTable() { auto pid_config_map = bpf_.get_hash_table(kPidCfgTableName); logInfo(3, "Pruning dead pids\n"); + logInfo(2, "DEBUGIZA: Pruning dead pids\n"); auto pid_config_keys = pid_config_map.get_keys_offline(); for (const auto pid : pid_config_keys) { + logInfo(2, "DEBUGIZA: Pruning dead pid=%d\n", pid); auto pos = std::find(pids.begin(), pids.end(), pid); if (pos == pids.end()) { + // no element found + logInfo(2, "DEBUGIZA: A. Pruning dead\n"); + logInfo(2, "DEBUGIZA: A1. sizeof(pid_config_map)=%d\n", pid_config_keys.size()); + logInfo(2, "DEBUGIZA: A1. sizeof(pids)=%d\n", pids.size()); pid_config_map.remove_value(pid); + NativeStackTrace::Prune_dead_pid(pid); + // logInfo(2, "DEBUGIZA: A2. sizeof(pid_config_map)=%d\n", pid_config_map.size()); + logInfo(2, "DEBUGIZA: A2. sizeof(pids)=%d\n", pids.size()); } else { + logInfo(2, "DEBUGIZA: B Pruning dead wszedl\n"); result = true; + // logInfo(2, "DEBUGIZA: B1. sizeof(pid_config_map)=%d\n", pid_config_map.size); + logInfo(2, "DEBUGIZA: B1. sizeof(pids)=%d\n", pids.size()); + + // erase to avoid re-population (see below) pids.erase(pos); + // logInfo(2, "DEBUGIZA: B2. sizeof(pid_config_map)=%d\n", pid_config_map.size()); + logInfo(2, "DEBUGIZA: B2. sizeof(pids)=%d\n", pids.size()); + } } logInfo(3, "Populating pid table\n"); for (const auto pid : pids) { + // update only those pids that have not been observed before PidData pidData; if (!tryTargetPid(pid, pidData)) {