From 1866577986594cadd7052443cceb09508f147c44 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 11:01:32 +0000 Subject: [PATCH 001/112] Deactivated the Store flags, added log level --- src/system_variables.star | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/system_variables.star b/src/system_variables.star index 321bafc..bab0f49 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -12,8 +12,8 @@ NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_gene CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" NODE_CONFIGURATION_FILE_EXTENSION = ".toml" -NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--store=true", "--storenode=/dns4/node_0"] -GOWAKU_ENTRYPOINT = ["/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--store=true", "--storenode=/dns4/node_0"] +2NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=INFO"] +GOWAKU_ENTRYPOINT = ["/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=INFO"] # Prometheus Configuration PROMETHEUS_IMAGE = "prom/prometheus:latest" From 677898381cf42ab9fffe9151467181cf0eac8174 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 11:01:56 +0000 Subject: [PATCH 002/112] Ignored logs / results --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 7c9d98a..fc8c79d 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,5 @@ config/topology_generated/ config/waku_config_files/ kurtosisrun_log.txt summary.json +wakurtosis_logs/ +log_trace_test From f49a572d50bb2aa66db86dcf69e045ae48e5a132 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 12:40:54 +0000 Subject: [PATCH 003/112] Generates a dictionary of messages sent to be used by wls_analyse.py --- wsl-module/wsl.py | 84 +++++++++++++++++++++++------------------------ 1 file changed, 41 insertions(+), 43 deletions(-) diff --git a/wsl-module/wsl.py b/wsl-module/wsl.py index 6a7a615..3bcd923 100644 --- a/wsl-module/wsl.py +++ b/wsl-module/wsl.py @@ -121,7 +121,7 @@ def send_waku_msg(node_address, topic, payload, nonce=1): my_payload = { 'nonce' : nonce, - 'timestamp' : time.time_ns(), + 'ts' : time.time_ns(), 'payload' : payload } @@ -138,8 +138,10 @@ def send_waku_msg(node_address, topic, payload, nonce=1): G_LOGGER.debug('Waku RPC: %s from %s Topic: %s' %(data['method'], node_address, topic)) s_time = time.time() + + json_data = json.dumps(data) - response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'}) + response = requests.post(node_address, data=json_data, headers={'content-type': 'application/json'}) elapsed_ms =(time.time() - s_time) * 1000 @@ -147,7 +149,7 @@ def send_waku_msg(node_address, topic, payload, nonce=1): G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms)) - return response_obj, elapsed_ms + return response_obj, elapsed_ms, hash(json_data), my_payload['ts'] # Generate a random interval using a Poisson distribution def poisson_interval(rate): @@ -163,7 +165,7 @@ def make_payload_dist(dist_type, min_size, max_size): # Check if min and max packet sizes are the same if min_size == max_size: G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' %min_size) - return make_payload(min_size) + return make_payload(min_size), min_size # Payload sizes are even integers uniformly distributed in [min_size, max_size] if dist_type == 'uniform': @@ -173,7 +175,7 @@ def make_payload_dist(dist_type, min_size, max_size): while(size % 2) != 0: size = int(random.uniform(min_size, max_size)) - return make_payload(size) + return make_payload(size), size # Payload sizes are even integers ~"normally" distributed in [min_size, max_size] if dist_type == 'gaussian': @@ -185,11 +187,11 @@ def make_payload_dist(dist_type, min_size, max_size): while(size % 2) != 0: size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - return make_payload(size) + return make_payload(size), size G_LOGGER.error('Unknown distribution type %s') - return '0x00' + return '0x00', 0 def parse_targets(enclave_dump_path, waku_port=8545): @@ -354,13 +356,10 @@ def main(): G_LOGGER.info('Selected %d emitters out of %d total nodes' %(len(emitters), len(targets))) """ Start simulation """ - stats = {} - msg_cnt = 0 - failed_cnt = 0 - bytes_cnt = 0 s_time = time.time() last_msg_time = 0 next_time_to_msg = 0 + msgs_dict = {} G_LOGGER.info('Starting a simulation of %d seconds ...' %config['general']['simulation_time']) @@ -369,7 +368,7 @@ def main(): # Check end condition elapsed_s = time.time() - s_time if elapsed_s >= config['general']['simulation_time']: - G_LOGGER.info('Simulation ended. Sent %d messages (%d bytes) in %ds.' %(msg_cnt, bytes_cnt, elapsed_s)) + G_LOGGER.info('Simulation ended. Sent %d messages in %ds.' %(len(msgs_dict), elapsed_s)) break # Send message @@ -392,15 +391,14 @@ def main(): G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' %(emitter_topic, node_address)) - payload = make_payload_dist(dist_type=config['general']['dist_type'].lower(), min_size=config['general']['min_packet_size'], max_size=config['general']['max_packet_size']) - response, elapsed = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=msg_cnt) - + payload, size = make_payload_dist(dist_type=config['general']['dist_type'].lower(), min_size=config['general']['min_packet_size'], max_size=config['general']['max_packet_size']) + response, elapsed, msg_hash, ts = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) + if response['result']: - msg_cnt += 1 - topics_msg_cnt[emitter_topic] += 1 - else: - G_LOGGER.info('Message failed!') - failed_cnt += 1 + if msg_hash in msgs_dict: + G_LOGGER.error('Hash collision. %s already exists in dictionary' %msg_hash) + continue + msgs_dict[msg_hash] = {'ts' : ts, 'injection_point' : node_address, 'nonce' : len(msgs_dict), 'topic' : emitter_topic, 'payload' : payload, 'payload_size' : size} # Compute the time to next message next_time_to_msg = get_next_time_to_msg(config['general']['inter_msg_type'], config['general']['msg_rate'], config['general']['simulation_time']) @@ -411,32 +409,32 @@ def main(): elapsed_s = time.time() - s_time # Retrieve messages from every node and topic - G_LOGGER.info('Retriving messages from the enclave ...') - for node_idx, target in enumerate(targets): - node_address = 'http://%s/' %target + # G_LOGGER.info('Retriving messages from the enclave ...') + # for node_idx, target in enumerate(targets): + # node_address = 'http://%s/' %target - for topic_idx, topic in enumerate(topics[node_idx]): - msg_cnt = get_all_messages_from_node_from_topic(node_address, topic) - msg_lost = topics_msg_cnt[topic] - msg_cnt - G_LOGGER.info('- Retrieved %d messages on topic %s from node %s. Lost %d message(s).' %(msg_cnt, topic, node_address, msg_lost)) + # for topic_idx, topic in enumerate(topics[node_idx]): + # msg_cnt = get_all_messages_from_node_from_topic(node_address, topic) + # msg_lost = topics_msg_cnt[topic] - msg_cnt + # G_LOGGER.info('- Retrieved %d messages on topic %s from node %s. Lost %d message(s).' %(msg_cnt, topic, node_address, msg_lost)) # Output - summary = { - "end_ts" : time.time(), - "params" : config['general'], - "topics" : list(topics_msg_cnt.keys()), - "topics_msg_cnt" : topics_msg_cnt, - "simulation_time" : elapsed_s, - "total_messages" : msg_cnt, - "avg_latency" : 0, - "max_latency" : 0, - "min_latency" : 0 - } - - G_LOGGER.info('Simulation sumnmary: %s' %summary) - - with open('./summary.json', 'w') as summary_file: - summary_file.write(json.dumps(summary, indent=4)) + # summary = { + # "end_ts" : time.time(), + # "params" : config['general'], + # "topics" : list(topics_msg_cnt.keys()), + # "topics_msg_cnt" : topics_msg_cnt, + # "simulation_time" : elapsed_s, + # "total_messages" : len() + # } + + # G_LOGGER.info('Simulation sumnmary: %s' %summary) + + # with open('./summary.json', 'w') as summary_file: + # summary_file.write(json.dumps(summary, indent=4)) + + with open('./messages.json', 'w') as f: + f.write(json.dumps(msgs_dict, indent=4)) """ We are done """ G_LOGGER.info('Ended') From 5dbc94e4e4abca4529770348fe6c5591d6c12f7d Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 12:41:04 +0000 Subject: [PATCH 004/112] Typoi --- main.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.star b/main.star index d834b12..6c046ac 100644 --- a/main.star +++ b/main.star @@ -35,5 +35,5 @@ def run(plan, args): waku.interconnect_waku_nodes(plan, waku_topology, services) - # # Setup WSL & Start the Simulation + # Setup WSL & Start the Simulation wsl_service = wsl.init(plan, services, wsl_config) From 94f8896e886d14998fa202173d1da3fdd97650e2 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 12:41:25 +0000 Subject: [PATCH 005/112] Typo --- src/system_variables.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/system_variables.star b/src/system_variables.star index bab0f49..ae90d94 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -12,7 +12,7 @@ NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_gene CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" NODE_CONFIGURATION_FILE_EXTENSION = ".toml" -2NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=INFO"] +NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=INFO"] GOWAKU_ENTRYPOINT = ["/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=INFO"] # Prometheus Configuration From 2319e94d7f3b2b89b4abb3f08929fb819ae6cea8 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 15:45:24 +0000 Subject: [PATCH 006/112] Activate Waku debug logs --- src/system_variables.star | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/system_variables.star b/src/system_variables.star index ae90d94..09d5cb2 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -12,8 +12,8 @@ NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_gene CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" NODE_CONFIGURATION_FILE_EXTENSION = ".toml" -NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=INFO"] -GOWAKU_ENTRYPOINT = ["/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=INFO"] +NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=DEBUG"] +GOWAKU_ENTRYPOINT = ["/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=DEBUG"] # Prometheus Configuration PROMETHEUS_IMAGE = "prom/prometheus:latest" From b465231bad0ad92ed2fba6056111ce35002bc6e8 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 15:45:49 +0000 Subject: [PATCH 007/112] Added hash calculaton of the payload --- wsl-module/wsl.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/wsl-module/wsl.py b/wsl-module/wsl.py index 3bcd923..7454ad9 100644 --- a/wsl-module/wsl.py +++ b/wsl-module/wsl.py @@ -5,7 +5,7 @@ """ """ Dependencies """ -import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob +import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, hashlib import requests import rtnorm # from pathlib import Path @@ -149,7 +149,7 @@ def send_waku_msg(node_address, topic, payload, nonce=1): G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms)) - return response_obj, elapsed_ms, hash(json_data), my_payload['ts'] + return response_obj, elapsed_ms, json.dumps(waku_msg), my_payload['ts'] # Generate a random interval using a Poisson distribution def poisson_interval(rate): @@ -392,9 +392,10 @@ def main(): G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' %(emitter_topic, node_address)) payload, size = make_payload_dist(dist_type=config['general']['dist_type'].lower(), min_size=config['general']['min_packet_size'], max_size=config['general']['max_packet_size']) - response, elapsed, msg_hash, ts = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) + response, elapsed, waku_msg, ts = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) if response['result']: + msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() if msg_hash in msgs_dict: G_LOGGER.error('Hash collision. %s already exists in dictionary' %msg_hash) continue From 1020842b062815d269267371316beca49b177dcf Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 2 Feb 2023 15:46:10 +0000 Subject: [PATCH 008/112] Analysis module to be run at the end of a simulation --- wls_analyse.py | 121 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 wls_analyse.py diff --git a/wls_analyse.py b/wls_analyse.py new file mode 100644 index 0000000..10109e6 --- /dev/null +++ b/wls_analyse.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" +Description: Wakurtosis simulation analysis + +""" + +""" Dependencies """ +import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, csv + +""" Globals """ +G_APP_NAME = 'WLS-ANALYSE' +G_LOG_LEVEL = 'DEBUG' +G_DEFAULT_CONFIG_FILE = './config/config.json' +G_DEFAULT_CONFIG_FILE = './config/config.json' +G_DEFAULT_TOPOLOGY_PATH = './config/topology_generated' +G_DEFAULT_SIMULATION_PATH = './wakurtosis_logs' +G_LOGGER = None + +""" Custom logging formatter """ +class CustomFormatter(logging.Formatter): + + # Set different formats for every logging level + time_name_stamp = "[%(asctime)s.%(msecs)03d] [" + G_APP_NAME + "]" + FORMATS = { + logging.ERROR: time_name_stamp + " ERROR in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", + logging.WARNING: time_name_stamp + " WARNING - %(msg)s", + logging.CRITICAL: time_name_stamp + " CRITICAL in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", + logging.INFO: time_name_stamp + " %(msg)s", + logging.DEBUG: time_name_stamp + " %(funcName)s() %(msg)s", + 'DEFAULT': time_name_stamp + " %(msg)s", + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT']) + formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') + return formatter.format(record) + +def main(): + + global G_LOGGER + + """ Init Logging """ + G_LOGGER = logging.getLogger(G_APP_NAME) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(CustomFormatter()) + G_LOGGER.addHandler(handler) + + # Set loglevel from config + G_LOGGER.setLevel(G_LOG_LEVEL) + handler.setLevel(G_LOG_LEVEL) + + G_LOGGER.info('Started') + + """ Parse command line args """ + parser = argparse.ArgumentParser() + parser.add_argument("-sp", "--simulation_path", help="Simulation results path", action="store_true", default=G_DEFAULT_SIMULATION_PATH) + args = parser.parse_args() + + simulation_path = args.simulation_path + + """ Load Topics Structure """ + nodes_topics = [] + try: + tomls = glob.glob('%s/*.toml' %G_DEFAULT_TOPOLOGY_PATH) + # Index is the node id + tomls.sort() + for toml_file in tomls: + topics = [] + with open(toml_file, mode='rb') as read_file: + toml_config = tomllib.load(read_file) + node_topics_str = toml_config['topics'] + nodes_topics.append(list(node_topics_str.split(' '))) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + """ Load Simulation Messages """ + msgs_dict = None + try: + with open('%s/messages.json' %simulation_path, 'r') as f: + msgs_dict = json.load(f) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + G_LOGGER.info('Loaded %d messages.' %len(msgs_dict)) + + """ Load node level logs """ + # node_logs = [] + # try: + # node_logs_paths = glob.glob('%s/*--node_*' %simulation_path) + # node_logs_paths.sort() + # for node_log_path in node_logs_paths: + # with open('%s/output.log' %node_log_path, mode='r') as f: + # node_log_reader = csv.reader(f, delimiter=" ") + # for log_line in node_log_reader: + # # if 'waku.relay received' in log_line: + # # print(log_line) + # # elif 'waku.relay received' in log_line: + # # print(log_line) + # if 'subscribe' in log_line: + # print(log_line) + # G_LOGGER.info('Parsed log in %s/output.log' %node_log_path) + # # print(node_log) + # except Exception as e: + # G_LOGGER.error('%s: %s' % (e.__doc__, e)) + # sys.exit() + + ### Statistics we want to compute: + # 1 - Make sure that all messages have been delivered to their respective peers (x topic) + # 2 - Calculate the latency of every message at every peer wrt injection time + # 3 - Summarise/Visualise latencies per node / per topic / per message size? + # 4 - Reconstruct the path of the messages throughout the network where edge weight is latency + # 5 - Calculate propagation times per message (time for injection to last node) + + """ We are done """ + G_LOGGER.info('Ended') + +if __name__ == "__main__": + + main() From 1a2ffa1690bd65fa071f0d2139e95c466eeb5133 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Fri, 10 Feb 2023 10:25:59 +0000 Subject: [PATCH 009/112] Prepare environment for run / Delete logs --- run.sh | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/run.sh b/run.sh index ab85f05..4938212 100644 --- a/run.sh +++ b/run.sh @@ -21,8 +21,26 @@ cd .. docker rm gennet-container > /dev/null 2>&1 +### Prepare environment for scale +ulimit -n $(ulimit -n -H) +ulimit -u $(ulimit -u -H) + +sudo sysctl -w net.ipv4.neigh.default.gc_thresh3=4096 +sudo sysctl fs.inotify.max_user_instances=1048576 +sudo sysctl -w vm.max_map_count=262144 + +sudo docker container rm $(docker container ls -aq) +sudo docker volume rm $(docker volume ls -q) + # Delete the enclave just in case +echo -e "\nCleaning up Kurtosis environment "$enclave_name kurtosis enclave rm -f $enclave_name > /dev/null 2>&1 +kurtosis clean -a + +# Delete previous logs +echo -e "\Deleting previous logs in ${enclave_name}_logs" +rm -rf ./${enclave_name}_logs +rm ./kurtosisrun_log.txt # Create the new enclave and run the simulation echo -e "\nInitiating enclave "$enclave_name @@ -31,15 +49,15 @@ eval $kurtosis_cmd echo -e "Enclave " $enclave_name " is up and running" # Fetch the WSL service id and display the log of the simulation -wsl_service_id=$(kurtosis enclave inspect wakurtosis 2>/dev/null | grep wsl- | awk '{print $1}') +wsl_service_id=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep wsl- | awk '{print $1}') # kurtosis service logs wakurtosis $wsl_service_id -echo -e "\n--> To see simulation logs run: kurtosis service logs wakurtosis $wsl_service_id <--" +echo -e "\n--> To see simulation logs run: kurtosis service logs $enclave_name $wsl_service_id <--" # Fetch the Grafana address & port -grafana_host=$(kurtosis enclave inspect wakurtosis 2>/dev/null | grep grafana- | awk '{print $6}') +grafana_host=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep grafana- | awk '{print $6}') echo -e "\n--> Statistics in Grafana server at http://$grafana_host/ <--" -# echo "Output of kurtosis run command written in kurtosisrun_log.txt" +echo "Output of kurtosis run command written in kurtosisrun_log.txt" ### Wait for WSL to finish @@ -53,13 +71,18 @@ cid="$enclave_name--user-service--$cid_suffix" echo "Waiting for simulation to finish ..." status_code="$(docker container wait $cid)" +### Logs +rm -rf ./$enclave_name_logs > /dev/null 2>&1 +kurtosis enclave dump ${enclave_name} ${enclave_name}_logs > /dev/null 2>&1 +echo "Simulation ended with code $status_code Results in ./${enclave_name}_logs" + # Copy simulation results -docker cp "$cid:/wsl/summary.json" "./" -echo "Simulation ended with code $status_code Results in ./summary.json" +# docker cp "$cid:/wsl/summary.json" "./${enclave_name}_logs" +docker cp "$cid:/wsl/messages.json" "./${enclave_name}_logs" # Stop and delete the enclave -kurtosis enclave stop $enclave_name > /dev/null 2>&1 -kurtosis enclave rm -f $enclave_name > /dev/null 2>&1 -echo "Enclave $enclave_name stopped and deleted." +# kurtosis enclave stop $enclave_name > /dev/null 2>&1 +# kurtosis enclave rm -f $enclave_name > /dev/null 2>&1 +# echo "Enclave $enclave_name stopped and deleted." echo "Done." From e3e10f2d656177b076b6383f73a3f5b2cb70cecb Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Fri, 10 Feb 2023 10:32:42 +0000 Subject: [PATCH 010/112] Added prepare environment script --- prepare_env.sh | 15 +++++++++++++++ run.sh | 11 ----------- 2 files changed, 15 insertions(+), 11 deletions(-) create mode 100644 prepare_env.sh diff --git a/prepare_env.sh b/prepare_env.sh new file mode 100644 index 0000000..74031a6 --- /dev/null +++ b/prepare_env.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +kurtosis engine stop + +ulimit -n $(ulimit -n -H) +ulimit -u $(ulimit -u -H) + +sudo sysctl -w net.ipv4.neigh.default.gc_thresh3=4096 +sudo sysctl fs.inotify.max_user_instances=1048576 +sudo sysctl -w vm.max_map_count=262144 + +sudo docker container rm -f $(docker container ls -aq) +sudo docker volume rm -f $(docker volume ls -q) + +kurtosis engine start \ No newline at end of file diff --git a/run.sh b/run.sh index 4938212..b1c47af 100644 --- a/run.sh +++ b/run.sh @@ -21,17 +21,6 @@ cd .. docker rm gennet-container > /dev/null 2>&1 -### Prepare environment for scale -ulimit -n $(ulimit -n -H) -ulimit -u $(ulimit -u -H) - -sudo sysctl -w net.ipv4.neigh.default.gc_thresh3=4096 -sudo sysctl fs.inotify.max_user_instances=1048576 -sudo sysctl -w vm.max_map_count=262144 - -sudo docker container rm $(docker container ls -aq) -sudo docker volume rm $(docker volume ls -q) - # Delete the enclave just in case echo -e "\nCleaning up Kurtosis environment "$enclave_name kurtosis enclave rm -f $enclave_name > /dev/null 2>&1 From c807a5c94fd1989270172b15d7386d4561c6a885 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Fri, 10 Feb 2023 10:39:36 +0000 Subject: [PATCH 011/112] Minor --- run.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/run.sh b/run.sh index b1c47af..64f6f9c 100644 --- a/run.sh +++ b/run.sh @@ -24,12 +24,12 @@ docker rm gennet-container > /dev/null 2>&1 # Delete the enclave just in case echo -e "\nCleaning up Kurtosis environment "$enclave_name kurtosis enclave rm -f $enclave_name > /dev/null 2>&1 -kurtosis clean -a +kurtosis clean -a > /dev/null 2>&1 # Delete previous logs echo -e "\Deleting previous logs in ${enclave_name}_logs" -rm -rf ./${enclave_name}_logs -rm ./kurtosisrun_log.txt +rm -rf ./${enclave_name}_logs > /dev/null 2>&1 +rm ./kurtosisrun_log.txt > /dev/null 2>&1 # Create the new enclave and run the simulation echo -e "\nInitiating enclave "$enclave_name @@ -57,17 +57,17 @@ cid_suffix="$(kurtosis enclave inspect $enclave_name | grep $wsl_service_id | cu cid="$enclave_name--user-service--$cid_suffix" # Wait for the container to halt; this will block -echo "Waiting for simulation to finish ..." +echo -e "Waiting for simulation to finish ..." status_code="$(docker container wait $cid)" ### Logs rm -rf ./$enclave_name_logs > /dev/null 2>&1 kurtosis enclave dump ${enclave_name} ${enclave_name}_logs > /dev/null 2>&1 -echo "Simulation ended with code $status_code Results in ./${enclave_name}_logs" +echo -e "Simulation ended with code $status_code Results in ./${enclave_name}_logs" # Copy simulation results -# docker cp "$cid:/wsl/summary.json" "./${enclave_name}_logs" -docker cp "$cid:/wsl/messages.json" "./${enclave_name}_logs" +# docker cp "$cid:/wsl/summary.json" "./${enclave_name}_logs" > /dev/null 2>&1 +docker cp "$cid:/wsl/messages.json" "./${enclave_name}_logs" > /dev/null 2>&1 # Stop and delete the enclave # kurtosis enclave stop $enclave_name > /dev/null 2>&1 From d19f52fd580324f9d380c56f80e2e7c2c9acfe48 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Fri, 10 Feb 2023 11:36:28 +0000 Subject: [PATCH 012/112] Delete the previous topology --- run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/run.sh b/run.sh index 64f6f9c..c70d52c 100644 --- a/run.sh +++ b/run.sh @@ -13,6 +13,9 @@ wakurtosis_config_file=$ARGS2 echo "- Enclave name: " $enclave_name echo "- Configuration file: " $wakurtosis_config_file +# Delete topology +rm -rf ./config/topology_generated > /dev/null 2>&1 + # Create and run Gennet docker container echo -e "\nRunning topology generation" cd gennet-module From 30fca82c78903451fd290b7ced12db73c30e0251 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Fri, 10 Feb 2023 13:02:18 +0000 Subject: [PATCH 013/112] Fixed issues recovering the WSL UUID from the Kurtosis inspect command --- run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run.sh b/run.sh index 56254ab..e655988 100644 --- a/run.sh +++ b/run.sh @@ -41,9 +41,9 @@ eval $kurtosis_cmd echo -e "Enclave " $enclave_name " is up and running" # Fetch the WSL service id and display the log of the simulation -wsl_service_id=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep wsl- | awk '{print $1}') +wsl_service_name=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep wsl | awk '{print $1}') # kurtosis service logs wakurtosis $wsl_service_id -echo -e "\n--> To see simulation logs run: kurtosis service logs $enclave_name $wsl_service_id <--" +echo -e "\n--> To see simulation logs run: kurtosis service logs $enclave_name $wsl_service_name <--" # Fetch the Grafana address & port grafana_host=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep grafana- | awk '{print $6}') From 0e94d2a4700cc309fcdf721e61616260d9270775 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Fri, 10 Feb 2023 13:40:19 +0000 Subject: [PATCH 014/112] Changed log parsing to the new Kurtosis output format --- wls_analyse.py | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/wls_analyse.py b/wls_analyse.py index 10109e6..cd51e4e 100644 --- a/wls_analyse.py +++ b/wls_analyse.py @@ -86,26 +86,32 @@ def main(): G_LOGGER.info('Loaded %d messages.' %len(msgs_dict)) """ Load node level logs """ - # node_logs = [] - # try: - # node_logs_paths = glob.glob('%s/*--node_*' %simulation_path) - # node_logs_paths.sort() - # for node_log_path in node_logs_paths: - # with open('%s/output.log' %node_log_path, mode='r') as f: - # node_log_reader = csv.reader(f, delimiter=" ") - # for log_line in node_log_reader: - # # if 'waku.relay received' in log_line: - # # print(log_line) - # # elif 'waku.relay received' in log_line: - # # print(log_line) - # if 'subscribe' in log_line: - # print(log_line) - # G_LOGGER.info('Parsed log in %s/output.log' %node_log_path) - # # print(node_log) - # except Exception as e: - # G_LOGGER.error('%s: %s' % (e.__doc__, e)) - # sys.exit() + node_logs = {} + try: + services_log_paths = glob.glob('%s/*--user-service--*' %simulation_path) + + for log_path in services_log_paths: + with open('%s/spec.json' %log_path, mode='r') as f: + spec_json = json.load(f) + if spec_json['Path'] == '/usr/bin/wakunode': + node_id = spec_json['Config']['Labels']['com.kurtosistech.id'] + node_logs[node_id] = [] + with open('%s/output.log' %log_path, mode='r') as f: + node_log_reader = csv.reader(f, delimiter=" ") + for log_line in node_log_reader: + # if 'waku.relay received' in log_line: + # print(log_line) + # elif 'waku.relay received' in log_line: + # print(log_line) + if 'subscribe' in log_line: + node_logs[node_id].append(log_line) + G_LOGGER.info('Parsed node \"%s\" log in %s/output.log' %(node_id, log_path)) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + G_LOGGER.debug(node_logs.keys()) + ### Statistics we want to compute: # 1 - Make sure that all messages have been delivered to their respective peers (x topic) # 2 - Calculate the latency of every message at every peer wrt injection time From f8375a7a97bfb171d291093dc5ccb611c431832f Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Fri, 10 Feb 2023 14:18:23 +0000 Subject: [PATCH 015/112] Added parse unique topics --- wls_analyse.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/wls_analyse.py b/wls_analyse.py index cd51e4e..b79c860 100644 --- a/wls_analyse.py +++ b/wls_analyse.py @@ -59,21 +59,28 @@ def main(): simulation_path = args.simulation_path """ Load Topics Structure """ + topics = set() nodes_topics = [] try: tomls = glob.glob('%s/*.toml' %G_DEFAULT_TOPOLOGY_PATH) # Index is the node id tomls.sort() for toml_file in tomls: - topics = [] + with open(toml_file, mode='rb') as read_file: toml_config = tomllib.load(read_file) node_topics_str = toml_config['topics'] - nodes_topics.append(list(node_topics_str.split(' '))) + topics_list = list(node_topics_str.split(' ')) + nodes_topics.append(topics_list) + topics.update(topics_list) except Exception as e: G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() + G_LOGGER.info('Loaded topic structure with %d topic(s) and %d node(s).' %(len(topics), len(nodes_topics))) + G_LOGGER.debug(topics) + G_LOGGER.debug(nodes_topics) + """ Load Simulation Messages """ msgs_dict = None try: @@ -84,6 +91,7 @@ def main(): sys.exit() G_LOGGER.info('Loaded %d messages.' %len(msgs_dict)) + # G_LOGGER.debug(msgs_dict) """ Load node level logs """ node_logs = {} @@ -98,6 +106,7 @@ def main(): node_logs[node_id] = [] with open('%s/output.log' %log_path, mode='r') as f: node_log_reader = csv.reader(f, delimiter=" ") + # Check and extract if the log entry is relevant to us for log_line in node_log_reader: # if 'waku.relay received' in log_line: # print(log_line) @@ -111,6 +120,7 @@ def main(): sys.exit() G_LOGGER.debug(node_logs.keys()) + # G_LOGGER.debug(node_logs) ### Statistics we want to compute: # 1 - Make sure that all messages have been delivered to their respective peers (x topic) From 8bac04c7abf025a843b84a16acbae6e8ac5a7c2c Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 16 Feb 2023 07:50:14 +0000 Subject: [PATCH 016/112] Minor --- wls_analyse.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/wls_analyse.py b/wls_analyse.py index b79c860..affb2ce 100644 --- a/wls_analyse.py +++ b/wls_analyse.py @@ -78,8 +78,8 @@ def main(): sys.exit() G_LOGGER.info('Loaded topic structure with %d topic(s) and %d node(s).' %(len(topics), len(nodes_topics))) - G_LOGGER.debug(topics) - G_LOGGER.debug(nodes_topics) + # G_LOGGER.debug(topics) + # G_LOGGER.debug(nodes_topics) """ Load Simulation Messages """ msgs_dict = None @@ -128,6 +128,7 @@ def main(): # 3 - Summarise/Visualise latencies per node / per topic / per message size? # 4 - Reconstruct the path of the messages throughout the network where edge weight is latency # 5 - Calculate propagation times per message (time for injection to last node) + # 6 - Pull statistics from cCadvisor using API (memory, CPU, badnwitdh per node) """ We are done """ G_LOGGER.info('Ended') From 5877fa638c3c127e4fbd8e86b1e9307e5f5110c9 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 16 Feb 2023 15:03:09 +0000 Subject: [PATCH 017/112] Added --log-level=TRACE to NWaku --- src/system_variables.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/system_variables.star b/src/system_variables.star index 934cbde..b0d6ebe 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -11,7 +11,7 @@ NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_gene CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" NODE_CONFIGURATION_FILE_EXTENSION = ".toml" NODE_CONFIGURATION_FILE_FLAG = "--config-file=" -NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0"] # todo: check, "--store=true", "--storenode=/dns4/node_0"] +NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=TRACE"] # todo: check, "--store=true", "--storenode=/dns4/node_0"] GOWAKU_ENTRYPOINT = ["/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0"] # todo: check, "--store=true", "--storenode=/dns4/node_0"] # Prometheus Configuration From af0eaa6733b159cdfda25266c58b0a5226f6a01d Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 16 Feb 2023 15:06:44 +0000 Subject: [PATCH 018/112] Changed Nwaku imagine to the new message trace image --- src/system_variables.star | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/system_variables.star b/src/system_variables.star index b0d6ebe..4ca3c7f 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -1,5 +1,6 @@ # Waku Configuration -NWAKU_IMAGE = "statusteam/nim-waku:019f357d" +# NWAKU_IMAGE = "statusteam/nim-waku:019f357d" +NWAKU_IMAGE = "statusteam/nim-waku:nwaku-trace" GOWAKU_IMAGE = "gowaku" WAKU_RPC_PORT_ID = "rpc" From e3d4849a0e677cce24ffa0b6a50e8f09df8ce744 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 16 Feb 2023 15:48:11 +0000 Subject: [PATCH 019/112] Delete previous logs before starting --- run.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/run.sh b/run.sh index e312e46..bd28760 100644 --- a/run.sh +++ b/run.sh @@ -20,6 +20,9 @@ echo "- Configuration file: " $wakurtosis_config_file # Delete topology rm -rf ./config/topology_generated > /dev/null 2>&1 +# Remove previous logs +rm -rf ./$enclave_name_logs > /dev/null 2>&1 + # Create and run Gennet docker container echo -e "\nRunning network generation" docker rm gennet-container # cleanup the old docker if any @@ -79,13 +82,12 @@ echo -e "Waiting for simulation to finish ..." status_code="$(docker container wait $cid)" ### Logs -rm -rf ./$enclave_name_logs > /dev/null 2>&1 kurtosis enclave dump ${enclave_name} ${enclave_name}_logs > /dev/null 2>&1 echo -e "Simulation ended with code $status_code Results in ./${enclave_name}_logs" # Copy simulation results # docker cp "$cid:/wsl/summary.json" "./${enclave_name}_logs" > /dev/null 2>&1 -docker cp "$cid:/wsl/messages.json" "./${enclave_name}_logs" > /dev/null 2>&1 +docker cp "$cid:/wsl/messages.json" "./${enclave_name}_logs" # Stop and delete the enclave # kurtosis enclave stop $enclave_name > /dev/null 2>&1 From 86f062f0563833ea474413b12637691b4d7a215d Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 16 Feb 2023 15:48:50 +0000 Subject: [PATCH 020/112] Rename the analsysis script --- wls_analyse.py => analysis.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) rename wls_analyse.py => analysis.py (92%) diff --git a/wls_analyse.py b/analysis.py similarity index 92% rename from wls_analyse.py rename to analysis.py index affb2ce..3d50585 100644 --- a/wls_analyse.py +++ b/analysis.py @@ -108,12 +108,15 @@ def main(): node_log_reader = csv.reader(f, delimiter=" ") # Check and extract if the log entry is relevant to us for log_line in node_log_reader: - # if 'waku.relay received' in log_line: - # print(log_line) - # elif 'waku.relay received' in log_line: - # print(log_line) - if 'subscribe' in log_line: - node_logs[node_id].append(log_line) + if 'waku.relay received' in log_line: + print(log_line) + G_LOGGER.debug(node_logs.keys()) + elif 'waku.relay published' in log_line: + print(log_line) + # if 'subscribe' in log_line: + # node_logs[node_id].append(log_line) + + G_LOGGER.info('Parsed node \"%s\" log in %s/output.log' %(node_id, log_path)) except Exception as e: G_LOGGER.error('%s: %s' % (e.__doc__, e)) From 5ebdd583d5e734dd498f740c3af016a4f53fc21e Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Mon, 20 Feb 2023 18:42:35 +0000 Subject: [PATCH 021/112] Added cAdvisor summary stats for each node --- analysis.py | 145 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 105 insertions(+), 40 deletions(-) diff --git a/analysis.py b/analysis.py index 3d50585..bb65528 100644 --- a/analysis.py +++ b/analysis.py @@ -5,10 +5,10 @@ """ """ Dependencies """ -import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, csv +import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, re, requests """ Globals """ -G_APP_NAME = 'WLS-ANALYSE' +G_APP_NAME = 'WLS-ANALYSIS' G_LOG_LEVEL = 'DEBUG' G_DEFAULT_CONFIG_FILE = './config/config.json' G_DEFAULT_CONFIG_FILE = './config/config.json' @@ -35,6 +35,24 @@ def format(self, record): formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') return formatter.format(record) +def fetch_hw_metrics_from_container(container_id): + + # cAdvisor API URL endpoint + url = 'http://localhost:8080/api/v2.1/summary/docker/%s' %container_id + G_LOGGER.debug('Fetching summary stats from %s ...' %url) + + # Make an HTTP request to the cAdvisor API to get the summary stats of the container + try: + response = requests.get(url) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + return + + # Parse the response as JSON + summary_stats = json.loads(response.text) + + return summary_stats + def main(): global G_LOGGER @@ -82,49 +100,88 @@ def main(): # G_LOGGER.debug(nodes_topics) """ Load Simulation Messages """ - msgs_dict = None - try: - with open('%s/messages.json' %simulation_path, 'r') as f: - msgs_dict = json.load(f) - except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() - - G_LOGGER.info('Loaded %d messages.' %len(msgs_dict)) + msgs_dict = {} + # try: + # with open('%s/messages.json' %simulation_path, 'r') as f: + # msgs_dict = json.load(f) + # except Exception as e: + # G_LOGGER.error('%s: %s' % (e.__doc__, e)) + # sys.exit() + + # G_LOGGER.info('Loaded %d messages.' %len(msgs_dict)) # G_LOGGER.debug(msgs_dict) """ Load node level logs """ node_logs = {} - try: - services_log_paths = glob.glob('%s/*--user-service--*' %simulation_path) - - for log_path in services_log_paths: - with open('%s/spec.json' %log_path, mode='r') as f: - spec_json = json.load(f) - if spec_json['Path'] == '/usr/bin/wakunode': - node_id = spec_json['Config']['Labels']['com.kurtosistech.id'] - node_logs[node_id] = [] - with open('%s/output.log' %log_path, mode='r') as f: - node_log_reader = csv.reader(f, delimiter=" ") - # Check and extract if the log entry is relevant to us - for log_line in node_log_reader: - if 'waku.relay received' in log_line: - print(log_line) - G_LOGGER.debug(node_logs.keys()) - elif 'waku.relay published' in log_line: - print(log_line) - # if 'subscribe' in log_line: - # node_logs[node_id].append(log_line) - - - G_LOGGER.info('Parsed node \"%s\" log in %s/output.log' %(node_id, log_path)) - except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() - - G_LOGGER.debug(node_logs.keys()) - # G_LOGGER.debug(node_logs) + # try: + services_log_paths = glob.glob('%s/*--user-service--*' %simulation_path) + for log_path in services_log_paths: + with open('%s/spec.json' %log_path, mode='r') as f: + spec_json = json.load(f) + if spec_json['Path'] == '/usr/bin/wakunode': + node_id = spec_json['Config']['Labels']['com.kurtosistech.id'] + # container_id = spec_json['Name'][1:] + container_id = spec_json['Id'] + node_logs[node_id] = {'published' : [], 'received' : [], 'container_id' : container_id} + + with open('%s/output.log' %log_path, mode='r') as f: + + # Process log line by line as a text string + for log_line in f: + # At this stage we only care about Waku Relay protocol + if 'waku.relay' in log_line: + + msg_topics = re.search(r'topics="([^"]+)"', log_line).group(1) + msg_topic = re.search(r'pubsubTopic=([^ ]+)', log_line).group(1) + msg_hash = re.search(r'hash=([^ ]+)', log_line).group(1) + + if 'published' in log_line: + msg_publishTime = re.search(r'publishTime=([\d]+)', log_line).group(1) + node_logs[node_id]['published'].append([msg_publishTime, msg_topics, msg_topic, msg_hash]) + + if msg_hash not in msgs_dict: + msgs_dict[msg_hash] = {'published_ts' : [msg_publishTime], 'received_ts' : []} + else: + msgs_dict[msg_hash]['published_ts'].append(msg_publishTime) + + # G_LOGGER.debug('Published by %s: %s %s %s %s' %(node_id, msg_publishTime, msg_hash, msg_topic, msg_topics)) + + elif 'received' in log_line: + msg_receivedTime = re.search(r'receivedTime=([\d]+)', log_line).group(1) + node_logs[node_id]['received'].append([msg_receivedTime, msg_topics, msg_topic, msg_hash]) + + if msg_hash not in msgs_dict: + msgs_dict[msg_hash] = {'published_ts' : [], 'received_ts' : [msg_receivedTime]} + else: + msgs_dict[msg_hash]['received_ts'].append(msg_receivedTime) + + # G_LOGGER.debug('Received in node %s: %s %s %s %s' %(node_id, msg_receivedTime, msg_hash, msg_topic, msg_topics)) + + + G_LOGGER.info('Parsed node \"%s\" log in %s/output.log' %(node_id, log_path)) + # except Exception as e: + # G_LOGGER.error('%s: %s' % (e.__doc__, e)) + # sys.exit() + + # G_LOGGER.debug(node_logs.keys()) + # G_LOGGER.debug(node_logs) + # for item in node_logs.items(): + # print(item[0], len(item[1]['published']), len(item[1]['received'])) + + # Calculate tota propagation times, ie time for a messafe to reach all the nodes + # Easiest way is likely to sort the reception time stamps and get the oldest for a specific message within the node + for msg in msgs_dict.items(): + published_ts = msg[1]['published_ts'][0] + msg[1]['latencies'] = [] + for received_ts in msg[1]['received_ts']: + latency = int(received_ts) - int(published_ts) + msg[1]['latencies'].append(latency) + # print(msg[0], msg[1]['published_ts'], received_ts, latency) + msg[1]['max_latency'] = max(msg[1]['latencies']) + msg[1]['min_latency'] = min(msg[1]['latencies']) + + # print(msgs_dict) ### Statistics we want to compute: # 1 - Make sure that all messages have been delivered to their respective peers (x topic) # 2 - Calculate the latency of every message at every peer wrt injection time @@ -133,6 +190,14 @@ def main(): # 5 - Calculate propagation times per message (time for injection to last node) # 6 - Pull statistics from cCadvisor using API (memory, CPU, badnwitdh per node) + # Fetch Hardware metrics from Node containers + for node in node_logs.items(): + node_logs[node[0]]['hw_stats'] = fetch_hw_metrics_from_container(node[1]['container_id']) + + # Do Some plotting? + + + """ We are done """ G_LOGGER.info('Ended') From bae5ab834b85b7e4a08b4d80210165179e64abe3 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Mon, 20 Feb 2023 18:43:17 +0000 Subject: [PATCH 022/112] Updated the docker image of cAdvisor to an explicit version instead of latest --- run.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/run.sh b/run.sh index bd28760..2ec82d9 100644 --- a/run.sh +++ b/run.sh @@ -3,9 +3,8 @@ dir=$(pwd) # Set up Cadvisor -docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg gcr.io/cadvisor/cadvisor - - +# docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg gcr.io/cadvisor/cadvisor +docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg gcr.io/cadvisor/cadvisor:v0.47.0 # Parse arg if any ARGS1=${1:-"wakurtosis"} ARGS2=${2:-"config.json"} From 084429bd88db46f45bb1cfbee015785bbebd8fec Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Mon, 20 Feb 2023 18:56:05 +0000 Subject: [PATCH 023/112] Minor --- analysis.py | 1 + 1 file changed, 1 insertion(+) diff --git a/analysis.py b/analysis.py index bb65528..3e8cc75 100644 --- a/analysis.py +++ b/analysis.py @@ -50,6 +50,7 @@ def fetch_hw_metrics_from_container(container_id): # Parse the response as JSON summary_stats = json.loads(response.text) + G_LOGGER.debug(summary_stats) return summary_stats From b71799398448a739de05c70e424198b443f8f6a1 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Mon, 20 Feb 2023 18:56:14 +0000 Subject: [PATCH 024/112] Minor --- analysis.py | 1 + 1 file changed, 1 insertion(+) diff --git a/analysis.py b/analysis.py index 3e8cc75..1acdc18 100644 --- a/analysis.py +++ b/analysis.py @@ -39,6 +39,7 @@ def fetch_hw_metrics_from_container(container_id): # cAdvisor API URL endpoint url = 'http://localhost:8080/api/v2.1/summary/docker/%s' %container_id + # Note: We can also use the endpoint /stats instead of summary to get timepoints G_LOGGER.debug('Fetching summary stats from %s ...' %url) # Make an HTTP request to the cAdvisor API to get the summary stats of the container From 028cfee457149d3f6d022faf2b4c248c2f59d453 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Thu, 23 Feb 2023 19:23:12 +0000 Subject: [PATCH 025/112] Addded violin plots generation --- analysis.py | 126 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 106 insertions(+), 20 deletions(-) diff --git a/analysis.py b/analysis.py index 1acdc18..c5de468 100644 --- a/analysis.py +++ b/analysis.py @@ -6,6 +6,8 @@ """ Dependencies """ import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, re, requests +import matplotlib.pyplot as plt +from scipy import stats """ Globals """ G_APP_NAME = 'WLS-ANALYSIS' @@ -35,7 +37,7 @@ def format(self, record): formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') return formatter.format(record) -def fetch_hw_metrics_from_container(container_id): +def fetch_cadvisor_summary_from_container(container_id): # cAdvisor API URL endpoint url = 'http://localhost:8080/api/v2.1/summary/docker/%s' %container_id @@ -51,10 +53,42 @@ def fetch_hw_metrics_from_container(container_id): # Parse the response as JSON summary_stats = json.loads(response.text) - G_LOGGER.debug(summary_stats) + # G_LOGGER.debug(summary_stats) return summary_stats +def fetch_cadvisor_stats_from_container(container_id): + + # cAdvisor API URL endpoint + url = 'http://localhost:8080/api/v2.1/stats/docker/%s' %container_id + # Note: We can also use the endpoint /stats instead of summary to get timepoints + G_LOGGER.debug('Fetching cAdvisor stats from %s ...' %url) + + # Make an HTTP request to the cAdvisor API to get the summary stats of the container + try: + response = requests.get(url) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + return + + # Parse the response as JSON + stats_dict = json.loads(response.text) + + cpu_usage = [] + memory_usage = [] + for stats_obj in stats_dict.values(): + # print(stats_obj['spec']) + for data_point in stats_obj['stats']: + # print(data_point['timestamp']) + # NOTE: This is comes empty. Check in Ubuntu + # print(data_point['diskio']) + # print('CPU:', data_point['cpu']['usage']['user']) + # print('Memory:', data_point['memory']['usage']) + cpu_usage.append(data_point['cpu']['usage']['user']) + memory_usage.append(data_point['memory']['usage']) + + return {'cpu_usage' : cpu_usage, 'memory_usage' : memory_usage} + def main(): global G_LOGGER @@ -143,9 +177,9 @@ def main(): node_logs[node_id]['published'].append([msg_publishTime, msg_topics, msg_topic, msg_hash]) if msg_hash not in msgs_dict: - msgs_dict[msg_hash] = {'published_ts' : [msg_publishTime], 'received_ts' : []} + msgs_dict[msg_hash] = {'published' : [{'ts' : msg_publishTime, 'node_id' : node_id}], 'received' : []} else: - msgs_dict[msg_hash]['published_ts'].append(msg_publishTime) + msgs_dict[msg_hash]['published'].append({'ts' : msg_publishTime, 'node_id' : node_id}) # G_LOGGER.debug('Published by %s: %s %s %s %s' %(node_id, msg_publishTime, msg_hash, msg_topic, msg_topics)) @@ -154,9 +188,9 @@ def main(): node_logs[node_id]['received'].append([msg_receivedTime, msg_topics, msg_topic, msg_hash]) if msg_hash not in msgs_dict: - msgs_dict[msg_hash] = {'published_ts' : [], 'received_ts' : [msg_receivedTime]} + msgs_dict[msg_hash] = {'published' : [], 'received' : [{'ts' : msg_receivedTime, 'node_id' : node_id}]} else: - msgs_dict[msg_hash]['received_ts'].append(msg_receivedTime) + msgs_dict[msg_hash]['received'].append({'ts' : msg_receivedTime, 'node_id' : node_id}) # G_LOGGER.debug('Received in node %s: %s %s %s %s' %(node_id, msg_receivedTime, msg_hash, msg_topic, msg_topics)) @@ -173,17 +207,42 @@ def main(): # Calculate tota propagation times, ie time for a messafe to reach all the nodes # Easiest way is likely to sort the reception time stamps and get the oldest for a specific message within the node - for msg in msgs_dict.items(): - published_ts = msg[1]['published_ts'][0] - msg[1]['latencies'] = [] - for received_ts in msg[1]['received_ts']: - latency = int(received_ts) - int(published_ts) - msg[1]['latencies'].append(latency) - # print(msg[0], msg[1]['published_ts'], received_ts, latency) - msg[1]['max_latency'] = max(msg[1]['latencies']) - msg[1]['min_latency'] = min(msg[1]['latencies']) - - # print(msgs_dict) + + for _, msg_data in msgs_dict.items(): + # NOTE: Carefull here as I am assuming that every message is published once ... + published_ts = int(msg_data['published'][0]['ts']) + node_id = msg_data['published'][0]['node_id'] + + # Compute latencies + latencies = [] + for received_data in msg_data['received']: + # Skip self + if received_data['node_id'] == node_id: + continue + # NOTE: We are getting some negative latencies meaning that the message appears to be received before it was sent ... I assume this must be because those are the nodes that got the message injected in the first place + # TLDR: Should be safe to ignore all the negative latencies + latency = int(received_data['ts']) - published_ts + node_id = msg_data['published'][0]['node_id'] + latencies.append(latency) + + msgs_dict[_]['latencies'] = latencies + + msg_propagation_times = [] + for msg_hash, msg_data in msgs_dict.items(): + msg_propagation_times.append(round(max(msg_data['latencies'])/1000000)) + + print(stats.describe(msg_propagation_times)) + + # fig, ax = plt.subplots() + # ax.violinplot(msg_propagation_times, showmedians=True) + # ax.set_title('Message propagation times (sample size: %d messages)' %len(msg_propagation_times)) + # ax.set_ylabel('Milliseconds (ms)') + # ax.spines[['right', 'top']].set_visible(False) + # ax.axes.xaxis.set_visible(False) + # plt.tight_layout() + # plt.savefig("propagation.pdf", format="pdf", bbox_inches="tight") + # plt.show() + ### Statistics we want to compute: # 1 - Make sure that all messages have been delivered to their respective peers (x topic) # 2 - Calculate the latency of every message at every peer wrt injection time @@ -191,15 +250,42 @@ def main(): # 4 - Reconstruct the path of the messages throughout the network where edge weight is latency # 5 - Calculate propagation times per message (time for injection to last node) # 6 - Pull statistics from cCadvisor using API (memory, CPU, badnwitdh per node) + # Pull networking info from prometheus/grafana # Fetch Hardware metrics from Node containers + cpu_usage = [] + memory_usage = [] for node in node_logs.items(): - node_logs[node[0]]['hw_stats'] = fetch_hw_metrics_from_container(node[1]['container_id']) - - # Do Some plotting? + container_stats = fetch_cadvisor_stats_from_container(node[1]['container_id']) + # NOTE: Here we could also chose a different statistic such as mean or average instead of max + cpu_usage.append(max(container_stats['cpu_usage'])) + memory_usage.append(max(container_stats['memory_usage'])) + # Generate plots + fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 10)) + + ax1.violinplot(msg_propagation_times, showmedians=True) + ax1.set_title('Message propagation times \n(sample size: %d messages)' %len(msg_propagation_times)) + ax1.set_ylabel('Milliseconds (ms)') + ax1.spines[['right', 'top']].set_visible(False) + ax1.axes.xaxis.set_visible(False) + + ax2.violinplot(cpu_usage, showmedians=True) + ax2.set_title('Maximum CPU usage per Waku node \n(sample size: %d nodes)' %len(cpu_usage)) + ax2.set_ylabel('CPU Cycles') + ax2.spines[['right', 'top']].set_visible(False) + ax2.axes.xaxis.set_visible(False) + ax3.violinplot(memory_usage, showmedians=True) + ax3.set_title('Maximum memory usage per Waku node \n(sample size: %d nodes)' %len(memory_usage)) + ax3.set_ylabel('Bytes') + ax3.spines[['right', 'top']].set_visible(False) + ax3.axes.xaxis.set_visible(False) + plt.tight_layout() + plt.savefig("analysis.pdf", format="pdf", bbox_inches="tight") + # plt.show() + """ We are done """ G_LOGGER.info('Ended') From 041a6f4e981f135566fae86b364d21660b262476 Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Mon, 27 Feb 2023 10:58:07 +0000 Subject: [PATCH 026/112] Minor --- wsl-module/wsl.py | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/wsl-module/wsl.py b/wsl-module/wsl.py index d6c93da..d1fc996 100644 --- a/wsl-module/wsl.py +++ b/wsl-module/wsl.py @@ -419,31 +419,7 @@ def main(): elapsed_s = time.time() - s_time - # Retrieve messages from every node and topic - # G_LOGGER.info('Retriving messages from the enclave ...') - # for node_idx, target in enumerate(targets): - # node_address = 'http://%s/' %target - - # for topic_idx, topic in enumerate(topics[node_idx]): - # msg_cnt = get_all_messages_from_node_from_topic(node_address, topic) - # msg_lost = topics_msg_cnt[topic] - msg_cnt - # G_LOGGER.info('- Retrieved %d messages on topic %s from node %s. Lost %d message(s).' %(msg_cnt, topic, node_address, msg_lost)) - - # Output - # summary = { - # "end_ts" : time.time(), - # "params" : config['general'], - # "topics" : list(topics_msg_cnt.keys()), - # "topics_msg_cnt" : topics_msg_cnt, - # "simulation_time" : elapsed_s, - # "total_messages" : len() - # } - - # G_LOGGER.info('Simulation sumnmary: %s' %summary) - - # with open('./summary.json', 'w') as summary_file: - # summary_file.write(json.dumps(summary, indent=4)) - + # Save messages for further analysis with open('./messages.json', 'w') as f: f.write(json.dumps(msgs_dict, indent=4)) From 2e16a76b386b1cc8714fff95bff4c8620194861f Mon Sep 17 00:00:00 2001 From: Jordi Arranz Date: Mon, 27 Feb 2023 10:58:31 +0000 Subject: [PATCH 027/112] Minor --- analysis.py | 326 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 210 insertions(+), 116 deletions(-) diff --git a/analysis.py b/analysis.py index c5de468..ebc5770 100644 --- a/analysis.py +++ b/analysis.py @@ -5,10 +5,15 @@ """ """ Dependencies """ -import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, re, requests +import sys, logging, json, argparse, tomllib, glob, re, requests +from datetime import datetime +from tqdm_loggable.auto import tqdm +from tqdm_loggable.tqdm_logging import tqdm_logging import matplotlib.pyplot as plt from scipy import stats +from prometheus_api_client import PrometheusConnect + """ Globals """ G_APP_NAME = 'WLS-ANALYSIS' G_LOG_LEVEL = 'DEBUG' @@ -16,6 +21,8 @@ G_DEFAULT_CONFIG_FILE = './config/config.json' G_DEFAULT_TOPOLOGY_PATH = './config/topology_generated' G_DEFAULT_SIMULATION_PATH = './wakurtosis_logs' +G_DEFAULT_FIG_FILENAME = 'analysis.pdf' +G_DEFAULT_SUMMARY_FILENAME = 'summary.json' G_LOGGER = None """ Custom logging formatter """ @@ -37,6 +44,85 @@ def format(self, record): formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') return formatter.format(record) +def generate_summary(): + + # summary = { + # "end_ts" : time.time(), + # "params" : config['general'], + # "topics" : list(topics_msg_cnt.keys()), + # "topics_msg_cnt" : topics_msg_cnt, + # "simulation_time" : elapsed_s, + # "total_messages" : len() + # } + + + + # with open('./summary.json', 'w') as summary_file: + # summary_file.write(json.dumps(summary, indent=4)) + + G_LOGGER.info('Analsysis sumnmary saved in %s' %summary) + +def plot_figure(msg_propagation_times, cpu_usage, memory_usage): + + fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 10)) + + ax1.violinplot(msg_propagation_times, showmedians=True) + ax1.set_title('Message propagation times \n(sample size: %d messages)' %len(msg_propagation_times)) + ax1.set_ylabel('Propagation Time (ms)') + ax1.spines[['right', 'top']].set_visible(False) + ax1.axes.xaxis.set_visible(False) + + ax2.violinplot(cpu_usage, showmedians=True) + ax2.set_title('Maximum CPU usage per Waku node \n(sample size: %d nodes)' %len(cpu_usage)) + ax2.set_ylabel('CPU Cycles') + ax2.spines[['right', 'top']].set_visible(False) + ax2.axes.xaxis.set_visible(False) + + ax3.violinplot(memory_usage, showmedians=True) + ax3.set_title('Maximum memory usage per Waku node \n(sample size: %d nodes)' %len(memory_usage)) + ax3.set_ylabel('Bytes') + ax3.spines[['right', 'top']].set_visible(False) + ax3.axes.xaxis.set_visible(False) + + plt.tight_layout() + + figure_path = '%s/%s' %(G_DEFAULT_SIMULATION_PATH, G_DEFAULT_FIG_FILENAME) + plt.savefig(figure_path, format="pdf", bbox_inches="tight") + + G_LOGGER.info('Figure saved in %s' %figure_path) + +# def fetch_cadvisor_stats_from_container(container_id, start_ts, end_ts, prometheus_port=52118): + +# url='http://localhost:%d' %52118 + +# try: +# G_LOGGER.debug('Connecting to Prometheus server in %s' %url) +# prometheus = PrometheusConnect(url, disable_ssl=True, container_label="container_label_com_docker_container_id=%s" %container_id) +# print(prometheus) +# except Exception as e: +# G_LOGGER.error('%s: %s' % (e.__doc__, e)) +# return None + +# metrics = prometheus.get_label_values("__name__") +# print(metrics) + +# try: +# # query = '100 - (avg by(instance) (irate(container_cpu_usage_seconds_total{container_label_com_docker_container_id="<%s>"}[5m])) * 100)' %container_id +# # query = "container_file_descriptors{process_cpu_seconds_total=\"<%s>\"}" %container_id +# # result = prometheus.query(query) +# query = 'process_cpu_seconds_total' +# result = prometheus.custom_query(query) +# G_LOGGER.debug('Querying: %s' %query) +# except Exception as e: +# G_LOGGER.error('%s: %s' % (e.__doc__, e)) +# return None + + + +# print('--->', result) + +# return {'cpu_usage' : 0, 'memory_usage' : 0, 'bandwidth_in' : 0, 'bandwidth_out' : 0} + def fetch_cadvisor_summary_from_container(container_id): # cAdvisor API URL endpoint @@ -57,10 +143,10 @@ def fetch_cadvisor_summary_from_container(container_id): return summary_stats -def fetch_cadvisor_stats_from_container(container_id): +def fetch_cadvisor_stats_from_container(container_id, start_ts, end_ts): # cAdvisor API URL endpoint - url = 'http://localhost:8080/api/v2.1/stats/docker/%s' %container_id + url = 'http://localhost:8080/api/v2.1/stats/docker/%s?count=1000' %(container_id) # Note: We can also use the endpoint /stats instead of summary to get timepoints G_LOGGER.debug('Fetching cAdvisor stats from %s ...' %url) @@ -77,8 +163,28 @@ def fetch_cadvisor_stats_from_container(container_id): cpu_usage = [] memory_usage = [] for stats_obj in stats_dict.values(): - # print(stats_obj['spec']) + for data_point in stats_obj['stats']: + + # Only take into account data points wihtin the simulation time + datetime_str = data_point['timestamp'] + # print(datetime_str) + datetime_obj = datetime.fromisoformat(datetime_str[:-1]) + # print(datetime_obj) + # timestamp_ns = int(datetime_obj.timestamp() * 1e9) + # Calculate the total number of seconds and microseconds since the Unix epoch + unix_seconds = (datetime_obj - datetime(1970, 1, 1)).total_seconds() + microseconds = datetime_obj.microsecond + + # Convert to nanoseconds + timestamp_ns = int((unix_seconds * 1e9) + (microseconds * 1e3)) + + # if timestamp_ns < start_ts or timestamp_ns > end_ts: + # G_LOGGER.debug('Data point %d out of the time window [%d-%d]' %(timestamp_ns, start_ts, end_ts)) + # continue + + G_LOGGER.debug('Data point %d' %(timestamp_ns)) + # print(data_point['timestamp']) # NOTE: This is comes empty. Check in Ubuntu # print(data_point['diskio']) @@ -87,6 +193,8 @@ def fetch_cadvisor_stats_from_container(container_id): cpu_usage.append(data_point['cpu']['usage']['user']) memory_usage.append(data_point['memory']['usage']) + print(len(cpu_usage)) + return {'cpu_usage' : cpu_usage, 'memory_usage' : memory_usage} def main(): @@ -99,6 +207,8 @@ def main(): handler.setFormatter(CustomFormatter()) G_LOGGER.addHandler(handler) + tqdm_logging.set_level(logging.INFO) + # Set loglevel from config G_LOGGER.setLevel(G_LOG_LEVEL) handler.setLevel(G_LOG_LEVEL) @@ -136,83 +246,103 @@ def main(): # G_LOGGER.debug(nodes_topics) """ Load Simulation Messages """ - msgs_dict = {} - # try: - # with open('%s/messages.json' %simulation_path, 'r') as f: - # msgs_dict = json.load(f) - # except Exception as e: - # G_LOGGER.error('%s: %s' % (e.__doc__, e)) - # sys.exit() + injected_msgs_dict = {} + try: + with open('%s/messages.json' %simulation_path, 'r') as f: + injected_msgs_dict = json.load(f) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() - # G_LOGGER.info('Loaded %d messages.' %len(msgs_dict)) - # G_LOGGER.debug(msgs_dict) + G_LOGGER.info('Loaded %d messages.' %len(injected_msgs_dict)) + # G_LOGGER.debug(injected_msgs_dict) - """ Load node level logs """ node_logs = {} - # try: - services_log_paths = glob.glob('%s/*--user-service--*' %simulation_path) + msgs_dict = {} + + # Helper list with all the timestamps + tss = [] + try: + services_log_paths = glob.glob('%s/*--user-service--*' %simulation_path) - for log_path in services_log_paths: - with open('%s/spec.json' %log_path, mode='r') as f: - spec_json = json.load(f) - if spec_json['Path'] == '/usr/bin/wakunode': - node_id = spec_json['Config']['Labels']['com.kurtosistech.id'] - # container_id = spec_json['Name'][1:] - container_id = spec_json['Id'] - node_logs[node_id] = {'published' : [], 'received' : [], 'container_id' : container_id} - - with open('%s/output.log' %log_path, mode='r') as f: + pbar = tqdm(services_log_paths) + for log_path in pbar: + with open('%s/spec.json' %log_path, mode='r') as f: + spec_json = json.load(f) + if spec_json['Path'] == '/usr/bin/wakunode': + node_id = spec_json['Config']['Labels']['com.kurtosistech.id'] - # Process log line by line as a text string - for log_line in f: - # At this stage we only care about Waku Relay protocol - if 'waku.relay' in log_line: - - msg_topics = re.search(r'topics="([^"]+)"', log_line).group(1) - msg_topic = re.search(r'pubsubTopic=([^ ]+)', log_line).group(1) - msg_hash = re.search(r'hash=([^ ]+)', log_line).group(1) + # container_id = spec_json['Name'][1:] + container_id = spec_json['Id'] + node_logs[node_id] = {'published' : [], 'received' : [], 'container_id' : container_id} + + pbar.set_description("Parsing log of node %s" %node_id) - if 'published' in log_line: - msg_publishTime = re.search(r'publishTime=([\d]+)', log_line).group(1) - node_logs[node_id]['published'].append([msg_publishTime, msg_topics, msg_topic, msg_hash]) - - if msg_hash not in msgs_dict: - msgs_dict[msg_hash] = {'published' : [{'ts' : msg_publishTime, 'node_id' : node_id}], 'received' : []} - else: - msgs_dict[msg_hash]['published'].append({'ts' : msg_publishTime, 'node_id' : node_id}) + with open('%s/output.log' %log_path, mode='r') as f: + + # Process log line by line as a text string + for log_line in f: + + # At this stage we only care about Waku Relay protocol + if 'waku.relay' in log_line: - # G_LOGGER.debug('Published by %s: %s %s %s %s' %(node_id, msg_publishTime, msg_hash, msg_topic, msg_topics)) + msg_topics = re.search(r'topics="([^"]+)"', log_line).group(1) + msg_topic = re.search(r'pubsubTopic=([^ ]+)', log_line).group(1) + msg_hash = re.search(r'hash=([^ ]+)', log_line).group(1) - elif 'received' in log_line: - msg_receivedTime = re.search(r'receivedTime=([\d]+)', log_line).group(1) - node_logs[node_id]['received'].append([msg_receivedTime, msg_topics, msg_topic, msg_hash]) - - if msg_hash not in msgs_dict: - msgs_dict[msg_hash] = {'published' : [], 'received' : [{'ts' : msg_receivedTime, 'node_id' : node_id}]} - else: - msgs_dict[msg_hash]['received'].append({'ts' : msg_receivedTime, 'node_id' : node_id}) - - # G_LOGGER.debug('Received in node %s: %s %s %s %s' %(node_id, msg_receivedTime, msg_hash, msg_topic, msg_topics)) - - - G_LOGGER.info('Parsed node \"%s\" log in %s/output.log' %(node_id, log_path)) - # except Exception as e: - # G_LOGGER.error('%s: %s' % (e.__doc__, e)) - # sys.exit() - - # G_LOGGER.debug(node_logs.keys()) - # G_LOGGER.debug(node_logs) - # for item in node_logs.items(): - # print(item[0], len(item[1]['published']), len(item[1]['received'])) - - # Calculate tota propagation times, ie time for a messafe to reach all the nodes - # Easiest way is likely to sort the reception time stamps and get the oldest for a specific message within the node + if 'published' in log_line: + msg_publishTime = int(re.search(r'publishTime=([\d]+)', log_line).group(1)) + tss.append(msg_publishTime) + + node_logs[node_id]['published'].append([msg_publishTime, msg_topics, msg_topic, msg_hash]) + + if msg_hash not in msgs_dict: + msgs_dict[msg_hash] = {'published' : [{'ts' : msg_publishTime, 'node_id' : node_id}], 'received' : []} + else: + msgs_dict[msg_hash]['published'].append({'ts' : msg_publishTime, 'node_id' : node_id}) + + elif 'received' in log_line: + msg_receivedTime = int(re.search(r'receivedTime=([\d]+)', log_line).group(1)) + tss.append(msg_receivedTime) + + node_logs[node_id]['received'].append([msg_receivedTime, msg_topics, msg_topic, msg_hash]) + + if msg_hash not in msgs_dict: + msgs_dict[msg_hash] = {'published' : [], 'received' : [{'ts' : msg_receivedTime, 'node_id' : node_id}]} + else: + msgs_dict[msg_hash]['received'].append({'ts' : msg_receivedTime, 'node_id' : node_id}) + + G_LOGGER.debug('Parsed node \"%s\" log in %s/output.log' %(node_id, log_path)) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + # Compute simulation time window + simulation_start_ts = min(tss) + simulation_end_ts = max(tss) + simulation_time_ms = round((simulation_end_ts - simulation_start_ts) / 1000000) + G_LOGGER.info('Simulation started at %d, ended at %d Effective simulation time was %d ms. ' %(simulation_start_ts, simulation_end_ts, simulation_time_ms)) + + # Compute message delivery + total_messages = len(injected_msgs_dict) + delivered_messages = len(msgs_dict) + lost_messages = total_messages - delivered_messages + delivery_rate = delivered_messages * 100 / total_messages - for _, msg_data in msgs_dict.items(): + G_LOGGER.info('%d of %d messages delivered. Lost: %d Delivery rate %.2f%%' %(delivered_messages, total_messages, lost_messages, delivery_rate)) + + # Compute message latencies and propagation times througout the network + pbar = tqdm(msgs_dict.items()) + for msg_id, msg_data in pbar: # NOTE: Carefull here as I am assuming that every message is published once ... + if len(msg_data['published']) > 1: + G_LOGGER.warning('Several publishers of message %s') + published_ts = int(msg_data['published'][0]['ts']) node_id = msg_data['published'][0]['node_id'] + pbar.set_description('Computing latencies of message %s' %msg_id) + # Compute latencies latencies = [] for received_data in msg_data['received']: @@ -225,67 +355,31 @@ def main(): node_id = msg_data['published'][0]['node_id'] latencies.append(latency) - msgs_dict[_]['latencies'] = latencies + msgs_dict[msg_id]['latencies'] = latencies msg_propagation_times = [] - for msg_hash, msg_data in msgs_dict.items(): + pbar = tqdm(msgs_dict.items()) + for msg_id, msg_data in pbar: + pbar.set_description('Computing propagation time of message %s' %msg_id) msg_propagation_times.append(round(max(msg_data['latencies'])/1000000)) - print(stats.describe(msg_propagation_times)) - - # fig, ax = plt.subplots() - # ax.violinplot(msg_propagation_times, showmedians=True) - # ax.set_title('Message propagation times (sample size: %d messages)' %len(msg_propagation_times)) - # ax.set_ylabel('Milliseconds (ms)') - # ax.spines[['right', 'top']].set_visible(False) - # ax.axes.xaxis.set_visible(False) - # plt.tight_layout() - # plt.savefig("propagation.pdf", format="pdf", bbox_inches="tight") - # plt.show() - - ### Statistics we want to compute: - # 1 - Make sure that all messages have been delivered to their respective peers (x topic) - # 2 - Calculate the latency of every message at every peer wrt injection time - # 3 - Summarise/Visualise latencies per node / per topic / per message size? - # 4 - Reconstruct the path of the messages throughout the network where edge weight is latency - # 5 - Calculate propagation times per message (time for injection to last node) - # 6 - Pull statistics from cCadvisor using API (memory, CPU, badnwitdh per node) - # Pull networking info from prometheus/grafana - # Fetch Hardware metrics from Node containers cpu_usage = [] memory_usage = [] - for node in node_logs.items(): - container_stats = fetch_cadvisor_stats_from_container(node[1]['container_id']) + pbar = tqdm(node_logs.items()) + for node in pbar: + pbar.set_description('Fetching hardware stats from container %s' %node[1]['container_id']) + container_stats = fetch_cadvisor_stats_from_container(node[1]['container_id'], simulation_start_ts, simulation_end_ts) # NOTE: Here we could also chose a different statistic such as mean or average instead of max cpu_usage.append(max(container_stats['cpu_usage'])) memory_usage.append(max(container_stats['memory_usage'])) - # Generate plots - fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 10)) - - ax1.violinplot(msg_propagation_times, showmedians=True) - ax1.set_title('Message propagation times \n(sample size: %d messages)' %len(msg_propagation_times)) - ax1.set_ylabel('Milliseconds (ms)') - ax1.spines[['right', 'top']].set_visible(False) - ax1.axes.xaxis.set_visible(False) + # Generate Figure + plot_figure(msg_propagation_times, cpu_usage, memory_usage) - ax2.violinplot(cpu_usage, showmedians=True) - ax2.set_title('Maximum CPU usage per Waku node \n(sample size: %d nodes)' %len(cpu_usage)) - ax2.set_ylabel('CPU Cycles') - ax2.spines[['right', 'top']].set_visible(False) - ax2.axes.xaxis.set_visible(False) - - ax3.violinplot(memory_usage, showmedians=True) - ax3.set_title('Maximum memory usage per Waku node \n(sample size: %d nodes)' %len(memory_usage)) - ax3.set_ylabel('Bytes') - ax3.spines[['right', 'top']].set_visible(False) - ax3.axes.xaxis.set_visible(False) + # Generate summary + # generate_summary() - plt.tight_layout() - plt.savefig("analysis.pdf", format="pdf", bbox_inches="tight") - # plt.show() - """ We are done """ G_LOGGER.info('Ended') From 035fbc9379ee8571d53935a68369ad6866b711f1 Mon Sep 17 00:00:00 2001 From: Alberto Date: Mon, 27 Feb 2023 13:40:59 +0100 Subject: [PATCH 028/112] Modified runner and wakurtosis to add cadvisor to the enclave. Also updated kurtosis version --- README.md | 2 +- build.sh | 4 +++- monitoring/prometheus.yml | 6 ++++- run.sh | 48 ++++++++++++++++++++++++++------------- 4 files changed, 41 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 9a90897..f976807 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ More info about Kurtosis: https://docs.kurtosis.com/ #### Before using this repository note that: -- **You are using Kurtosis version 0.66.2**. This is important, as they are working on it and changes can be huge depending on different versions. You can find all Kurtosis versions [here](https://github.com/kurtosis-tech/kurtosis-cli-release-artifacts/releases). +- **You are using Kurtosis version 0.67.1**. This is important, as they are working on it and changes can be huge depending on different versions. You can find all Kurtosis versions [here](https://github.com/kurtosis-tech/kurtosis-cli-release-artifacts/releases). - The topology files that will be used by default are defined in `config/topology_generated/`. This topology is created with the [gennet](gennet-module/Readme.md) module. - Kurtosis can set up services in a parallel manner, defined in the `config.json` file (see below). - Only `kurtosis` and `docker` are needed to run this. diff --git a/build.sh b/build.sh index 943a439..9e5d06d 100644 --- a/build.sh +++ b/build.sh @@ -2,8 +2,10 @@ sudo apt-get update sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin +apt-get install -y jq + # Install the suitable kurtosis-cli -kurtosis_version=0.66.2 +kurtosis_version=0.67.1 echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt-mark unhold kurtosis-cli diff --git a/monitoring/prometheus.yml b/monitoring/prometheus.yml index 0ffb898..4b03340 100644 --- a/monitoring/prometheus.yml +++ b/monitoring/prometheus.yml @@ -10,4 +10,8 @@ scrape_configs: scrape_interval: 1s file_sd_configs: - files: - - '/tmp/targets.json' \ No newline at end of file + - '/tmp/targets.json' + - job_name: 'cadvisor' + scrape_interval: 5s + static_configs: + - targets: ['cadvisor:8080'] diff --git a/run.sh b/run.sh index 2ec82d9..bb5185a 100644 --- a/run.sh +++ b/run.sh @@ -2,9 +2,6 @@ dir=$(pwd) -# Set up Cadvisor -# docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg gcr.io/cadvisor/cadvisor -docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg gcr.io/cadvisor/cadvisor:v0.47.0 # Parse arg if any ARGS1=${1:-"wakurtosis"} ARGS2=${2:-"config.json"} @@ -16,9 +13,38 @@ wakurtosis_config_file=$ARGS2 echo "- Enclave name: " $enclave_name echo "- Configuration file: " $wakurtosis_config_file +# Delete the enclave just in case +echo -e "\nCleaning up Kurtosis environment "$enclave_name +docker container stop cadvisor > /dev/null 2>&1 +docker container rm cadvisor > /dev/null 2>&1 +kurtosis enclave rm -f $enclave_name > /dev/null 2>&1 +# kurtosis clean -a > /dev/null 2>&1 we do not want to delete all enclaves, just the one we will execute + +# Delete previous logs +echo -e "\Deleting previous logs in ${enclave_name}_logs" +rm -rf ./${enclave_name}_logs > /dev/null 2>&1 +rm ./kurtosisrun_log.txt > /dev/null 2>&1 + +# Preparing enclave +echo "Preparing enclave..." +kurtosis enclave add --name ${enclave_name} +enclave_preffix="$(kurtosis enclave inspect --full-uuids $enclave_name | grep UUID: | awk '{print $2}')" +echo "Enclave network: "$enclave_preffix + +# Get enclave last IP +subnet="$(docker network inspect $enclave_preffix | jq -r '.[].IPAM.Config[0].Subnet')" +echo "Enclave subnetork: $subnet" +last_ip="$(ipcalc $subnet | grep HostMax | awk '{print $2}')" +echo "cAdvisor IP: $last_ip" + + +# Set up Cadvisor +# docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg gcr.io/cadvisor/cadvisor +docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg --network $enclave_preffix --ip=$last_ip gcr.io/cadvisor/cadvisor:v0.47.0 + + # Delete topology rm -rf ./config/topology_generated > /dev/null 2>&1 - # Remove previous logs rm -rf ./$enclave_name_logs > /dev/null 2>&1 @@ -38,16 +64,6 @@ fi docker rm gennet-container > /dev/null 2>&1 -# Delete the enclave just in case -echo -e "\nCleaning up Kurtosis environment "$enclave_name -kurtosis enclave rm -f $enclave_name > /dev/null 2>&1 -kurtosis clean -a > /dev/null 2>&1 - -# Delete previous logs -echo -e "\Deleting previous logs in ${enclave_name}_logs" -rm -rf ./${enclave_name}_logs > /dev/null 2>&1 -rm ./kurtosisrun_log.txt > /dev/null 2>&1 - # Create the new enclave and run the simulation jobs=$(cat config/${wakurtosis_config_file} | jq -r ".kurtosis.jobs") @@ -62,7 +78,7 @@ wsl_service_name=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep wsl echo -e "\n--> To see simulation logs run: kurtosis service logs $enclave_name $wsl_service_name <--" # Fetch the Grafana address & port -grafana_host=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep grafana- | awk '{print $6}') +grafana_host=$(kurtosis enclave inspect $enclave_name | grep grafana | awk '{print $6}') echo -e "\n--> Statistics in Grafana server at http://$grafana_host/ <--" echo "Output of kurtosis run command written in kurtosisrun_log.txt" @@ -76,7 +92,7 @@ cid_suffix="$(kurtosis enclave inspect --full-uuids $enclave_name | grep $wsl_se # Construct the fully qualified container name that kurtosis created cid="$enclave_preffix--user-service--$cid_suffix" -# Wait for the container to halt; this will block +# Wait for the container to halt; this will block echo -e "Waiting for simulation to finish ..." status_code="$(docker container wait $cid)" From 9022c6dc3b564ee3a8061aa24c0da519748b8a48 Mon Sep 17 00:00:00 2001 From: 0xFugue <119708655+0xFugue@users.noreply.github.com> Date: Wed, 22 Feb 2023 20:20:17 +0530 Subject: [PATCH 029/112] multiple nodes per containers: done (#89) --- config/config.json | 1 + gennet-module/gennet.py | 91 +++++++++++++++++++++++++++++------------ 2 files changed, 66 insertions(+), 26 deletions(-) diff --git a/config/config.json b/config/config.json index 23273f5..3a0a62a 100644 --- a/config/config.json +++ b/config/config.json @@ -13,6 +13,7 @@ "num_topics": 1, "num_partitions": 1, "num_subnets": 1, + "container_size": "1", "node_type_distribution": { "nwaku":100, "gowaku":0 }, "node_type": "desktop", "network_type": "newmanwattsstrogatz", diff --git a/gennet-module/gennet.py b/gennet-module/gennet.py index 9c71c34..11dbabd 100755 --- a/gennet-module/gennet.py +++ b/gennet-module/gennet.py @@ -45,8 +45,7 @@ class networkType(Enum): NW_DATA_FNAME = "network_data.json" -NODE_PREFIX = "node" -SUBNET_PREFIX = "subnetwork" +NODE_PREFIX, SUBNET_PREFIX, CONTAINER_PREFIX = "node", "subnetwork", "container" ### I/O related fns ############################################################## @@ -182,16 +181,15 @@ def generate_subnets(G, num_subnets): offsets = sorted(random.sample(range(0, n), num_subnets - 1)) offsets.append(n - 1) - start, subnet_id, subnets = 0, 0, {} - #start = 0 - #subnets = {} - #subnet_id = 0 + start, subnet_id, node2subnet = 0, 0, {} for end in offsets: + l = [] for i in range(start, end + 1): - subnets[f"{NODE_PREFIX}_{lst[i]}"] = f"{SUBNET_PREFIX}_{subnet_id}" + node2subnet[f"{NODE_PREFIX}_{lst[i]}"] = f"{SUBNET_PREFIX}_{subnet_id}" + #node2subnet[lst[i]] = subnet_id start = end subnet_id += 1 - return subnets + return node2subnet ### file format related fns ########################################################### @@ -223,33 +221,71 @@ def generate_node_types(node_type_distribution, G): node_types_enum = [nodeType(s) for s in node_types_str] return node_types_enum -port_shift = 0 +# Inverts a dictionary of lists +def invert_dict_of_list(d): + inv = {} + for key, val in d.items(): + if val not in inv: + inv[val] = [key] + else: + inv[val].append(key) + return inv + + +# Packs the nodes into container in a subnet aware manner : optimal +# Number of containers = +# $$ O(\sum_{i=0}^{num_subnets} log_{container_size}(#Nodes_{numsubnets}) + num_subnets) +def pack_nodes(container_size, node2subnet, G): + subnet2node = invert_dict_of_list(node2subnet) + port_shift, cid, node2container = 0, 0, {} + for subnet in subnet2node: + for node in subnet2node[subnet]: + if port_shift >= container_size : + port_shift, cid = 0, cid+1 + node2container[node] = (port_shift, f"{CONTAINER_PREFIX}_{cid}") + port_shift += 1 + return node2container -# Generates network-wide json and per-node toml and writes them -def generate_and_write_files(dirname, num_topics, num_subnets, node_type_distribution, G): - topics = generate_topics(num_topics) - subnets = generate_subnets(G, num_subnets) - node_types_enum = generate_node_types(node_type_distribution, G) - global port_shift - i, json_dump = 0, {} +# Generates network-wide json and per-node toml and writes them +def generate_and_write_files(ctx: typer, G): + topics = generate_topics(ctx.params["num_topics"]) + node2subnet = generate_subnets(G, ctx.params["num_subnets"]) + node_types_enum = generate_node_types(ctx.params["node_type_distribution"], G) + node2container = pack_nodes(ctx.params["container_size"], node2subnet, G) + + json_dump = {} + json_dump[CONTAINER_PREFIX] = {} + inv = {} + for key, val in node2container.items(): + if val[1] not in inv: + inv[val[1]] = [key] + else: + inv[val[1]].append(key) + for container, nodes in inv.items(): + json_dump[CONTAINER_PREFIX][container] = nodes + + i = 0 for node in G.nodes: - # write the per node toml for the ith node of appropriate type + # package container_size nodes per container + + # write the per node toml for the i^ith node of appropriate type node_type, i = node_types_enum[i], i+1 - write_toml(dirname, node, generate_toml(topics, node_type)) + write_toml(ctx.params["output_dir"], node, generate_toml(topics, node_type)) json_dump[node] = {} json_dump[node]["static_nodes"] = [] for edge in G.edges(node): json_dump[node]["static_nodes"].append(edge[1]) - json_dump[node][SUBNET_PREFIX] = subnets[node] + json_dump[node][SUBNET_PREFIX] = node2subnet[node] json_dump[node]["image"] = nodeTypeToDocker.get(node_type) # the per node tomls will continue for now as they include topics json_dump[node]["node_config"] = f"{node}.toml" # logs ought to continue as they need to be unique json_dump[node]["node_log"] = f"{node}.log" + port_shift, cid = node2container[node] json_dump[node]["port_shift"] = port_shift - port_shift += 1 - write_json(dirname, json_dump) # network wide json + json_dump[node]["container_id"] = cid + write_json(ctx.params["output_dir"], json_dump) # network wide json # sanity check : valid json with "gennet" config @@ -292,8 +328,9 @@ def _num_subnets_callback(ctx: typer, Context, num_subnets: int): return num_subnets -def main(benchmark: bool = False, - container_size: int = 1, # TODO: subnet aware container packing +def main(ctx: typer.Context, + benchmark: bool = False, + container_size: int = 1, # TODO: reduce container packer memory consumption output_dir: str = "network_data", prng_seed: int = 3, num_nodes: int = 4, @@ -337,16 +374,18 @@ def main(benchmark: bool = False, os.makedirs(output_dir, exist_ok=True) # Generate file format specific data structs and write the files - generate_and_write_files(output_dir, num_topics, num_subnets, node_type_distribution, G) + generate_and_write_files(ctx, G) + #generate_and_write_files(output_dir, num_topics, num_subnets, node_type_distribution, G) #draw(G, outpur_dir) end = time.time() - print(f"Network generation took {end/10**9} secs.\nThe generated network is under ./{output_dir}") + time_took = end - start + print(f"For {num_nodes} nodes, network generation took {time_took} secs.\nThe generated network is under ./{output_dir}") # Benchmarking. Record finish time and stop the malloc tracing if benchmark : mem_curr, mem_max = tracemalloc.get_traced_memory() tracemalloc.stop() - print(f"STATS: For {num_nodes} nodes, time took is {(end-start)} secs, peak memory usage is {mem_max/(1024*1024)} MBs\n") + print(f"STATS: For {num_nodes} nodes, time took is {time_took} secs, peak memory usage is {mem_max/(1024*1024)} MBs\n") if __name__ == "__main__": From 974d5f5adbffce657b3ee314fb8b8707ccb20f1f Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 16 Feb 2023 16:43:11 +0100 Subject: [PATCH 030/112] WIP: Starting to refactor code so different nodes can be set up per container --- config/config.json | 3 ++- main.star | 3 ++- src/node_builders.star | 14 ++++++++++++-- src/system_variables.star | 1 - 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/config/config.json b/config/config.json index 3a0a62a..f78903d 100644 --- a/config/config.json +++ b/config/config.json @@ -6,7 +6,8 @@ "enclave_name": "wakurtosis", "topology_path": "./config/topology_generated/", "jobs": 4, - "interconnection_batch": 10 + "interconnection_batch": 10, + "nodes_per_container": 5 }, "gennet": { "num_nodes": 9, diff --git a/main.star b/main.star index 86383bb..b1a7cc1 100644 --- a/main.star +++ b/main.star @@ -20,13 +20,14 @@ def run(plan, args): kurtosis_config = config['kurtosis'] wsl_config = config['wsl'] interconnection_batch = kurtosis_config['interconnection_batch'] + nodes_per_container = kurtosis_config['nodes_per_container'] # Load network topology waku_topology_json = read_file(src=vars.TOPOLOGIES_LOCATION + vars.DEFAULT_TOPOLOGY_FILE) waku_topology = json.decode(waku_topology_json) # Set up nodes - services = nodes.instantiate_services(plan, waku_topology, False) + services = nodes.instantiate_services(plan, waku_topology, nodes_per_container, False) # Set up prometheus + graphana prometheus_service = prometheus.set_up_prometheus(plan, services) diff --git a/src/node_builders.star b/src/node_builders.star index 8d5f57d..77516c1 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -62,7 +62,7 @@ def prepare_nomos_service(plan, test, test2): plan.print("nomos") -def instantiate_services(plan, network_topology, testing): +def instantiate_services(plan, network_topology, nodes_per_container, testing): """ As we will need to access for the service information later, the structure is the following: @@ -91,7 +91,17 @@ def instantiate_services(plan, network_topology, testing): all_services = {} # Get up all nodes - for service_name in network_topology.keys(): + filterByImage = lambda keys: {x: network_topology[x] for x in keys} + services_by_image = [] + for image in vars.NODE_IMAGES_FROM_GENNET: + services_by_image.append(filterByImage(image)) + + # set up dicts by batch by grouped images + + for i in range(0, len(service_names), nodes_per_container): + services_in_container = service_names[i:i+nodes_per_container] + + image = network_topology[service_name]["image"] config_file = network_topology[service_name]["node_config"] diff --git a/src/system_variables.star b/src/system_variables.star index 4ca3c7f..2ae721a 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -1,5 +1,4 @@ # Waku Configuration -# NWAKU_IMAGE = "statusteam/nim-waku:019f357d" NWAKU_IMAGE = "statusteam/nim-waku:nwaku-trace" GOWAKU_IMAGE = "gowaku" From b31f087863847e82acfbf9e15f44ef4a4d661190 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 16 Feb 2023 17:00:06 +0100 Subject: [PATCH 031/112] Pass all necessary information to builder --- src/node_builders.star | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/src/node_builders.star b/src/node_builders.star index 77516c1..c959611 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -6,7 +6,9 @@ waku = import_module(vars.WAKU_MODULE) files = import_module(vars.FILE_HELPERS_MODULE) -def prepare_nwaku_service(nwakunode_name, all_services, config_file, artifact_id): +def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids): + + add_service_config = ServiceConfig( image=vars.NWAKU_IMAGE, ports={ @@ -20,16 +22,16 @@ def prepare_nwaku_service(nwakunode_name, all_services, config_file, artifact_id transport_protocol="TCP"), }, files={ - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_ids }, entrypoint=vars.NWAKU_ENTRYPOINT, cmd=[ vars.NODE_CONFIGURATION_FILE_FLAG + - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + config_file + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + config_files ] ) - all_services[nwakunode_name] = add_service_config + all_services[nwakunode_names] = add_service_config def prepare_gowaku_service(gowakunode_name, all_services, config_file, artifact_id): @@ -97,20 +99,27 @@ def instantiate_services(plan, network_topology, nodes_per_container, testing): services_by_image.append(filterByImage(image)) # set up dicts by batch by grouped images + for services in services_by_image: + service_names = services.keys() + image = services[service_names[0]]["image"] + service_builder = service_dispatcher[image] - for i in range(0, len(service_names), nodes_per_container): - services_in_container = service_names[i:i+nodes_per_container] - + for i in range(0, len(service_names), nodes_per_container): + # We have a batch of nodes + services_in_container = service_names[i:i+nodes_per_container] - image = network_topology[service_name]["image"] - config_file = network_topology[service_name]["node_config"] + # Get all config file names needed + config_file_names = [services[service_config_file["node_config"]] + for service_config_file in services_in_container] - service_builder = service_dispatcher[image] + config_files_artifact_ids = [files.get_toml_configuration_artifact(plan, config_file_name, + service_name, testing) + for config_file_name, service_name in zip(config_file_names, services_in_container)] - configuration_artifact_id = files.get_toml_configuration_artifact(plan, config_file, - service_name, testing) - service_builder(service_name, all_services, config_file, configuration_artifact_id) + # All them in ServiceConfig + service_builder(services_in_container, all_services, config_file_names, + config_files_artifact_ids) all_services_information = plan.add_services( configs = all_services From 5100214440ac8348a8b7ad900c2637731fda90b6 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 16 Feb 2023 17:31:13 +0100 Subject: [PATCH 032/112] Changed builder to add several nodes in ServiceConfig --- src/node_builders.star | 50 +++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/src/node_builders.star b/src/node_builders.star index c959611..3486ec6 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -8,27 +8,41 @@ files = import_module(vars.FILE_HELPERS_MODULE) def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids): + # TODO MAKE SURE THEY MATCH + + prepared_ports = {} + for i in range(len(nwakunode_names)): + prepared_ports[vars.WAKU_RPC_PORT_ID+"_"+nwakunode_names[i]] = PortSpec(number=vars.WAKU_TCP_PORT + i, + transport_protocol="TCP"), + prepared_ports[vars.PROMETHEUS_PORT_ID+"_"+nwakunode_names[i]] = PortSpec( + number=vars.PROMETHEUS_TCP_PORT + i, + transport_protocol="TCP") + prepared_ports[vars.WAKU_LIBP2P_PORT_ID+"_"+nwakunode_names[i]] = PortSpec( + number=vars.WAKU_LIBP2P_PORT + i, + transport_protocol="TCP") + + + prepared_files = {} + for i in range(len(nwakunode_names)): + prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+nwakunode_names[i]] = artifact_ids[i] + + prepared_cmd = [] + for i in range(len(nwakunode_names)): + prepared_cmd.extend(vars.NWAKU_ENTRYPOINT) + prepared_cmd.append(vars.NODE_CONFIGURATION_FILE_FLAG+ + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+nwakunode_names[i]+ + config_files[i]) + if i != len(nwakunode_names) - 1: + prepared_cmd.append(" && ") + + add_service_config = ServiceConfig( image=vars.NWAKU_IMAGE, - ports={ - vars.WAKU_RPC_PORT_ID: PortSpec(number=vars.WAKU_TCP_PORT, - transport_protocol="TCP"), - vars.PROMETHEUS_PORT_ID: PortSpec( - number=vars.PROMETHEUS_TCP_PORT, - transport_protocol="TCP"), - vars.WAKU_LIBP2P_PORT_ID: PortSpec( - number=vars.WAKU_LIBP2P_PORT, - transport_protocol="TCP"), - }, - files={ - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_ids - }, - entrypoint=vars.NWAKU_ENTRYPOINT, - cmd=[ - vars.NODE_CONFIGURATION_FILE_FLAG + - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + config_files - ] + ports=prepared_ports, + files=prepared_files, + entrypoint="", + cmd=prepared_cmd ) all_services[nwakunode_names] = add_service_config From 315722a99e4d4948cd034eadd30e2f4047fb513b Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 16 Feb 2023 19:30:47 +0100 Subject: [PATCH 033/112] Several nodes starting in same container working --- src/node_builders.star | 39 ++++++++++++++++++++------------------- src/system_variables.star | 5 +++-- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/src/node_builders.star b/src/node_builders.star index 3486ec6..db6c0b3 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -6,14 +6,14 @@ waku = import_module(vars.WAKU_MODULE) files = import_module(vars.FILE_HELPERS_MODULE) -def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids): +def prepare_nwaku_service(plan, nwakunode_names, all_services, config_files, artifact_ids): # TODO MAKE SURE THEY MATCH prepared_ports = {} for i in range(len(nwakunode_names)): prepared_ports[vars.WAKU_RPC_PORT_ID+"_"+nwakunode_names[i]] = PortSpec(number=vars.WAKU_TCP_PORT + i, - transport_protocol="TCP"), + transport_protocol="TCP") prepared_ports[vars.PROMETHEUS_PORT_ID+"_"+nwakunode_names[i]] = PortSpec( number=vars.PROMETHEUS_TCP_PORT + i, transport_protocol="TCP") @@ -26,26 +26,24 @@ def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ for i in range(len(nwakunode_names)): prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+nwakunode_names[i]] = artifact_ids[i] - prepared_cmd = [] + prepared_cmd = "" for i in range(len(nwakunode_names)): - prepared_cmd.extend(vars.NWAKU_ENTRYPOINT) - prepared_cmd.append(vars.NODE_CONFIGURATION_FILE_FLAG+ - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+nwakunode_names[i]+ - config_files[i]) + prepared_cmd += vars.NWAKU_ENTRYPOINT + " " + prepared_cmd += vars.NODE_CONFIGURATION_FILE_FLAG + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION +\ + nwakunode_names[i] + "/" + config_files[i] + " " + prepared_cmd += "--ports-shift="+str(i) if i != len(nwakunode_names) - 1: - prepared_cmd.append(" && ") - - + prepared_cmd += " & " add_service_config = ServiceConfig( image=vars.NWAKU_IMAGE, ports=prepared_ports, files=prepared_files, - entrypoint="", - cmd=prepared_cmd + entrypoint=["/bin/sh", "-c"], + cmd=[prepared_cmd] ) - all_services[nwakunode_names] = add_service_config + all_services["0"] = add_service_config def prepare_gowaku_service(gowakunode_name, all_services, config_file, artifact_id): @@ -107,13 +105,15 @@ def instantiate_services(plan, network_topology, nodes_per_container, testing): all_services = {} # Get up all nodes - filterByImage = lambda keys: {x: network_topology[x] for x in keys} services_by_image = [] for image in vars.NODE_IMAGES_FROM_GENNET: - services_by_image.append(filterByImage(image)) + services_by_image.append({k: v for (k, v) in network_topology.items() if v["image"] == image}) # set up dicts by batch by grouped images for services in services_by_image: + if len(services) == 0: + continue + service_names = services.keys() image = services[service_names[0]]["image"] service_builder = service_dispatcher[image] @@ -123,7 +123,7 @@ def instantiate_services(plan, network_topology, nodes_per_container, testing): services_in_container = service_names[i:i+nodes_per_container] # Get all config file names needed - config_file_names = [services[service_config_file["node_config"]] + config_file_names = [services[service_config_file]["node_config"] for service_config_file in services_in_container] config_files_artifact_ids = [files.get_toml_configuration_artifact(plan, config_file_name, @@ -132,15 +132,16 @@ def instantiate_services(plan, network_topology, nodes_per_container, testing): # All them in ServiceConfig - service_builder(services_in_container, all_services, config_file_names, + service_builder(plan, services_in_container, all_services, config_file_names, config_files_artifact_ids) + all_services_information = plan.add_services( configs = all_services ) - services_information = _add_waku_service_information(plan, all_services_information) + # services_information = _add_waku_service_information(plan, all_services_information) - return services_information + return {} # services_information def _add_waku_service_information(plan, all_services_information): diff --git a/src/system_variables.star b/src/system_variables.star index 2ae721a..8809323 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -11,8 +11,9 @@ NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_gene CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" NODE_CONFIGURATION_FILE_EXTENSION = ".toml" NODE_CONFIGURATION_FILE_FLAG = "--config-file=" -NWAKU_ENTRYPOINT = ["/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=TRACE"] # todo: check, "--store=true", "--storenode=/dns4/node_0"] -GOWAKU_ENTRYPOINT = ["/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0"] # todo: check, "--store=true", "--storenode=/dns4/node_0"] + +NWAKU_ENTRYPOINT = "/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=TRACE" +GOWAKU_ENTRYPOINT = "/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0" # Prometheus Configuration PROMETHEUS_IMAGE = "prom/prometheus:latest" From 154c4f1319f1605c2e974d5f863dc023b8e94ee5 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Fri, 17 Feb 2023 11:26:26 +0100 Subject: [PATCH 034/112] Doing packaging inside starlar is wrong. Will start to refactor towards assuming gennet provides this information --- src/node_builders.star | 41 ++++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/src/node_builders.star b/src/node_builders.star index db6c0b3..6174495 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -6,9 +6,14 @@ waku = import_module(vars.WAKU_MODULE) files = import_module(vars.FILE_HELPERS_MODULE) -def prepare_nwaku_service(plan, nwakunode_names, all_services, config_files, artifact_ids): +def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, + services_information, containers_counter): # TODO MAKE SURE THEY MATCH + group = "group_"+str(containers_counter) + + for nwaku_name in nwakunode_names: + services_information[nwaku_name]["service_id"] = group prepared_ports = {} for i in range(len(nwakunode_names)): @@ -21,7 +26,6 @@ def prepare_nwaku_service(plan, nwakunode_names, all_services, config_files, art number=vars.WAKU_LIBP2P_PORT + i, transport_protocol="TCP") - prepared_files = {} for i in range(len(nwakunode_names)): prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+nwakunode_names[i]] = artifact_ids[i] @@ -43,7 +47,7 @@ def prepare_nwaku_service(plan, nwakunode_names, all_services, config_files, art cmd=[prepared_cmd] ) - all_services["0"] = add_service_config + all_services[] = add_service_config def prepare_gowaku_service(gowakunode_name, all_services, config_file, artifact_id): @@ -96,13 +100,16 @@ def instantiate_services(plan, network_topology, nodes_per_container, testing): Example: service_peer_id = services["nwaku_0"]["peer_id"] - service_ip = services["nwaku_0"]["service_info"].hostname + service_id = services["nwaku_0"]["service_id"] + service_hostname = services["nwaku_0"]["service_info"].hostname service_ip = services["nwaku_0"]["service_info"].ip_address rpc_node_number = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].number - rpc_node_protocol = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].protocol + rpc_node_protocol = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].transport_protocol """ - all_services = {} + services_information = {} + all_services_configuration = {} + containers_counter = 0 # Get up all nodes services_by_image = [] @@ -132,30 +139,26 @@ def instantiate_services(plan, network_topology, nodes_per_container, testing): # All them in ServiceConfig - service_builder(plan, services_in_container, all_services, config_file_names, - config_files_artifact_ids) + service_builder(services_in_container, all_services_configuration, config_file_names, + config_files_artifact_ids, services_information, containers_counter) + containers_counter += 1 all_services_information = plan.add_services( - configs = all_services + configs = all_services_configuration ) - # services_information = _add_waku_service_information(plan, all_services_information) + _add_waku_service_information(plan, all_services_information, services_information) - return {} # services_information + return services_information -def _add_waku_service_information(plan, all_services_information): - - new_services_information = {} +def _add_waku_service_information(plan, all_services_information, services_information): for service_name in all_services_information: node_peer_id = waku.get_wakunode_peer_id(plan, service_name, vars.WAKU_RPC_PORT_ID) - new_services_information[service_name] = {} - new_services_information[service_name]["peer_id"] = node_peer_id - new_services_information[service_name]["service_info"] = all_services_information[service_name] - - return new_services_information + services_information[service_name]["peer_id"] = node_peer_id + services_information[service_name]["service_info"] = all_services_information[service_name] service_dispatcher = { From 9bf23faa8262bd4937b88cbec35a05e27eaca6ab Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Fri, 17 Feb 2023 20:30:42 +0100 Subject: [PATCH 035/112] Refactor structure so now several nodes can be in same service if gennet provides propper information in network_data.json --- main.star | 2 +- src/node_builders.star | 84 ++++++++++++++++++------------------------ 2 files changed, 36 insertions(+), 50 deletions(-) diff --git a/main.star b/main.star index b1a7cc1..a457d3d 100644 --- a/main.star +++ b/main.star @@ -27,7 +27,7 @@ def run(plan, args): waku_topology = json.decode(waku_topology_json) # Set up nodes - services = nodes.instantiate_services(plan, waku_topology, nodes_per_container, False) + services = nodes.instantiate_services(plan, waku_topology, False) # Set up prometheus + graphana prometheus_service = prometheus.set_up_prometheus(plan, services) diff --git a/src/node_builders.star b/src/node_builders.star index 6174495..d4cd835 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -6,15 +6,9 @@ waku = import_module(vars.WAKU_MODULE) files = import_module(vars.FILE_HELPERS_MODULE) -def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, - services_information, containers_counter): +def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, container): # TODO MAKE SURE THEY MATCH - group = "group_"+str(containers_counter) - - for nwaku_name in nwakunode_names: - services_information[nwaku_name]["service_id"] = group - prepared_ports = {} for i in range(len(nwakunode_names)): prepared_ports[vars.WAKU_RPC_PORT_ID+"_"+nwakunode_names[i]] = PortSpec(number=vars.WAKU_TCP_PORT + i, @@ -47,7 +41,7 @@ def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ cmd=[prepared_cmd] ) - all_services[] = add_service_config + all_services[container] = add_service_config def prepare_gowaku_service(gowakunode_name, all_services, config_file, artifact_id): @@ -80,8 +74,9 @@ def prepare_nomos_service(plan, test, test2): plan.print("nomos") -def instantiate_services(plan, network_topology, nodes_per_container, testing): +def instantiate_services(plan, network_topology, testing): """ + todo refactor this As we will need to access for the service information later, the structure is the following: services = { @@ -94,71 +89,62 @@ def instantiate_services(plan, network_topology, nodes_per_container, testing): }, "nwaku_1": {...}, "gowaku_": {...} - } Example: service_peer_id = services["nwaku_0"]["peer_id"] - service_id = services["nwaku_0"]["service_id"] service_hostname = services["nwaku_0"]["service_info"].hostname service_ip = services["nwaku_0"]["service_info"].ip_address rpc_node_number = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].number rpc_node_protocol = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].transport_protocol """ - services_information = {} all_services_configuration = {} - containers_counter = 0 - # Get up all nodes - services_by_image = [] - for image in vars.NODE_IMAGES_FROM_GENNET: - services_by_image.append({k: v for (k, v) in network_topology.items() if v["image"] == image}) - - # set up dicts by batch by grouped images - for services in services_by_image: - if len(services) == 0: - continue - - service_names = services.keys() - image = services[service_names[0]]["image"] + for service_id, nodes_in_service in network_topology["containers"].items(): + image = network_topology["nodes"][nodes_in_service[0]]["image"] service_builder = service_dispatcher[image] - for i in range(0, len(service_names), nodes_per_container): - # We have a batch of nodes - services_in_container = service_names[i:i+nodes_per_container] - - # Get all config file names needed - config_file_names = [services[service_config_file]["node_config"] - for service_config_file in services_in_container] - - config_files_artifact_ids = [files.get_toml_configuration_artifact(plan, config_file_name, - service_name, testing) - for config_file_name, service_name in zip(config_file_names, services_in_container)] + # Get all config file names needed + config_file_names = [network_topology["nodes"][node]["node_config"] for node in nodes_in_service] + config_files_artifact_ids = [ + files.get_toml_configuration_artifact(plan, config_file_name,service_name, testing) + for config_file_name, service_name + in zip(config_file_names, nodes_in_service) + ] - # All them in ServiceConfig - service_builder(services_in_container, all_services_configuration, config_file_names, - config_files_artifact_ids, services_information, containers_counter) - containers_counter += 1 - + service_builder(nodes_in_service, all_services_configuration, config_file_names, + config_files_artifact_ids, service_id) all_services_information = plan.add_services( - configs = all_services_configuration + configs=all_services_configuration ) - _add_waku_service_information(plan, all_services_information, services_information) + nodes_information = _add_waku_service_information(plan, all_services_information, network_topology) + + return nodes_information + + +def _add_waku_service_information(plan, all_services_information, network_topology): - return services_information + new_information = {} + for node_id, node_info in network_topology["nodes"].items(): + new_information[node_id] = {} + rpc_identifier = vars.WAKU_RPC_PORT_ID + "_" + node_id -def _add_waku_service_information(plan, all_services_information, services_information): + node_peer_id = waku.get_wakunode_peer_id(plan, node_info["container_id"], rpc_identifier) - for service_name in all_services_information: - node_peer_id = waku.get_wakunode_peer_id(plan, service_name, vars.WAKU_RPC_PORT_ID) + new_information[node_id]["peer_id"] = node_peer_id + new_information[node_id]["hostname"] = all_services_information[node_info["container_id"]].hostname + new_information[node_id]["ip_address"] = all_services_information[node_info["container_id"]].ip_address + new_information[node_id]["rpc_node_number"] = \ + all_services_information[node_info["container_id"]].ports[rpc_identifier].number + new_information[node_id]["rpc_node_protocol"] = \ + all_services_information[node_info["container_id"]].ports[rpc_identifier].transport_protocol - services_information[service_name]["peer_id"] = node_peer_id - services_information[service_name]["service_info"] = all_services_information[service_name] + return new_information service_dispatcher = { From 51ea8aa4ff94b53720ae5a82ba3eb4b37d1a4779 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Mon, 20 Feb 2023 16:40:01 +0100 Subject: [PATCH 036/112] Added new information to topology dict instead of creating a new data structure --- src/node_builders.star | 45 ++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/src/node_builders.star b/src/node_builders.star index d4cd835..7701a75 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -121,30 +121,37 @@ def instantiate_services(plan, network_topology, testing): all_services_information = plan.add_services( configs=all_services_configuration ) - nodes_information = _add_waku_service_information(plan, all_services_information, network_topology) - return nodes_information + _add_service_info_to_topology(plan, all_services_information, network_topology) -def _add_waku_service_information(plan, all_services_information, network_topology): - - new_information = {} +def _add_service_info_to_topology(plan, all_services_information, network_topology): for node_id, node_info in network_topology["nodes"].items(): - new_information[node_id] = {} - rpc_identifier = vars.WAKU_RPC_PORT_ID + "_" + node_id - - node_peer_id = waku.get_wakunode_peer_id(plan, node_info["container_id"], rpc_identifier) - - new_information[node_id]["peer_id"] = node_peer_id - new_information[node_id]["hostname"] = all_services_information[node_info["container_id"]].hostname - new_information[node_id]["ip_address"] = all_services_information[node_info["container_id"]].ip_address - new_information[node_id]["rpc_node_number"] = \ - all_services_information[node_info["container_id"]].ports[rpc_identifier].number - new_information[node_id]["rpc_node_protocol"] = \ - all_services_information[node_info["container_id"]].ports[rpc_identifier].transport_protocol - - return new_information + waku_port_id = vars.WAKU_RPC_PORT_ID + "_" + node_id + libp2p_port_id = vars.WAKU_LIBP2P_PORT_ID + "_" + node_id + prometheus_port_id = vars.PROMETHEUS_PORT_ID + "_" + node_id + + node_peer_id = waku.get_wakunode_peer_id(plan, node_info["container_id"], waku_port_id) + + network_topology["nodes"][node_id]["peer_id"] = node_peer_id + network_topology["nodes"][node_id]["hostname"] = \ + all_services_information[node_info["container_id"]].hostname + network_topology["nodes"][node_id]["ip_address"] = \ + all_services_information[node_info["container_id"]].ip_address + + network_topology["nodes"][node_id]["ports"] = {} + network_topology["nodes"][node_id]["ports"][waku_port_id] = \ + (all_services_information[node_info["container_id"]].ports[waku_port_id].number, + all_services_information[node_info["container_id"]].ports[waku_port_id].transport_protocol) + + network_topology["nodes"][node_id]["ports"][libp2p_port_id] = \ + (all_services_information[node_info["container_id"]].ports[libp2p_port_id].number, + all_services_information[node_info["container_id"]].ports[libp2p_port_id].transport_protocol) + + network_topology["nodes"][node_id]["ports"][prometheus_port_id] = \ + (all_services_information[node_info["container_id"]].ports[prometheus_port_id].number, + all_services_information[node_info["container_id"]].ports[prometheus_port_id].transport_protocol) service_dispatcher = { From 9ce433f5ef4b809438661e6baea35561540fd0c4 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Mon, 20 Feb 2023 18:11:15 +0100 Subject: [PATCH 037/112] Entire starlark workflow now works with several nodes per container --- main.star | 15 ++++++++------- src/file_helpers.star | 6 +++--- src/monitoring/prometheus.star | 9 +++++---- src/node_builders.star | 2 -- src/waku.star | 26 ++++++++++++++------------ 5 files changed, 30 insertions(+), 28 deletions(-) diff --git a/main.star b/main.star index a457d3d..577c544 100644 --- a/main.star +++ b/main.star @@ -23,17 +23,18 @@ def run(plan, args): nodes_per_container = kurtosis_config['nodes_per_container'] # Load network topology - waku_topology_json = read_file(src=vars.TOPOLOGIES_LOCATION + vars.DEFAULT_TOPOLOGY_FILE) - waku_topology = json.decode(waku_topology_json) + network_topology = read_file(src=vars.TOPOLOGIES_LOCATION + vars.DEFAULT_TOPOLOGY_FILE) + network_topology = json.decode(network_topology) # Set up nodes - services = nodes.instantiate_services(plan, waku_topology, False) + nodes.instantiate_services(plan, network_topology, False) + + # Set up prometheus + grafana + prometheus_service = prometheus.set_up_prometheus(plan, network_topology) - # Set up prometheus + graphana - prometheus_service = prometheus.set_up_prometheus(plan, services) grafana_service = grafana.set_up_grafana(plan, prometheus_service) - waku.interconnect_waku_nodes(plan, waku_topology, services, interconnection_batch) + waku.interconnect_waku_nodes(plan, network_topology, interconnection_batch) # Setup WSL & Start the Simulation - wsl_service = wsl.init(plan, services, wsl_config) + wsl_service = wsl.init(plan, network_topology, wsl_config) diff --git a/src/file_helpers.star b/src/file_helpers.star index b08e582..f390b84 100644 --- a/src/file_helpers.star +++ b/src/file_helpers.star @@ -22,9 +22,9 @@ def get_toml_configuration_artifact(plan, config_file, name, testing): def generate_template_node_targets(services, port_id): template_data = {} targets_data = [] - for service_name in services.keys(): - service_ip = services[service_name]["service_info"].ip_address - service_port_number = str(services[service_name]["service_info"].ports[port_id].number) + for service_name in services["nodes"].keys(): + service_ip = services["nodes"][service_name]["ip_address"] + service_port_number = str(services["nodes"][service_name]["ports"][port_id+"_"+service_name][0]) targets_data.append('"' + service_ip + ":" + service_port_number + '"') data_as_string = ",".join(targets_data) diff --git a/src/monitoring/prometheus.star b/src/monitoring/prometheus.star index 4ff8149..9de5343 100644 --- a/src/monitoring/prometheus.star +++ b/src/monitoring/prometheus.star @@ -6,9 +6,9 @@ files = import_module(vars.FILE_HELPERS_MODULE) templates = import_module(vars.TEMPLATES_MODULE) -def set_up_prometheus(plan, services): +def set_up_prometheus(plan, network_topology): # Create targets.json - targets_artifact_id = create_prometheus_targets(plan, services) + targets_artifact_id = create_prometheus_targets(plan, network_topology) # Set up prometheus artifact_id = plan.upload_files( @@ -40,9 +40,10 @@ def set_up_prometheus(plan, services): return prometheus_service -def create_prometheus_targets(plan, services): +def create_prometheus_targets(plan, network_topology): # get ip and ports of all nodes - template_data = files.generate_template_node_targets(services, + + template_data = files.generate_template_node_targets(network_topology, vars.PROMETHEUS_PORT_ID) template = templates.get_prometheus_template() diff --git a/src/node_builders.star b/src/node_builders.star index 7701a75..d4f302c 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -135,8 +135,6 @@ def _add_service_info_to_topology(plan, all_services_information, network_topolo node_peer_id = waku.get_wakunode_peer_id(plan, node_info["container_id"], waku_port_id) network_topology["nodes"][node_id]["peer_id"] = node_peer_id - network_topology["nodes"][node_id]["hostname"] = \ - all_services_information[node_info["container_id"]].hostname network_topology["nodes"][node_id]["ip_address"] = \ all_services_information[node_info["container_id"]].ip_address diff --git a/src/waku.star b/src/waku.star index 6544e23..c6345a4 100644 --- a/src/waku.star +++ b/src/waku.star @@ -34,12 +34,11 @@ def get_wakunode_peer_id(plan, service_name, port_id): return response["extract.peer_id"] -def create_waku_id(waku_service_information): - waku_service = waku_service_information["service_info"] - - ip = waku_service.ip_address - port = waku_service.ports[vars.WAKU_LIBP2P_PORT_ID].number - waku_node_id = waku_service_information["peer_id"] +# todo better name for this function +def create_waku_id(node_id, node_information): + ip = node_information["ip_address"] + port = node_information["ports"][vars.WAKU_LIBP2P_PORT_ID + "_" + node_id][0] + waku_node_id = node_information["peer_id"] return '"/ip4/' + str(ip) + '/tcp/' + str(port) + '/p2p/' + waku_node_id + '"' @@ -48,9 +47,10 @@ def _merge_peer_ids(peer_ids): return "[" + ",".join(peer_ids) + "]" -def connect_wakunode_to_peers(plan, service_name, port_id, peer_ids): +def connect_wakunode_to_peers(plan, service_name, node_id, port_id, peer_ids): method = vars.CONNECT_TO_PEER_METHOD params = _merge_peer_ids(peer_ids) + port_id = port_id + "_" + node_id response = send_json_rpc(plan, service_name, port_id, method, params) @@ -88,15 +88,17 @@ def get_waku_peers(plan, waku_service_name): return response["extract.peers"] -def interconnect_waku_nodes(plan, topology_information, services, interconnection_batch): +def interconnect_waku_nodes(plan, topology_information, interconnection_batch): # Interconnect them - for waku_service_name in services.keys(): - peers = topology_information[waku_service_name]["static_nodes"] + for node_id in topology_information["nodes"].keys(): + peers = topology_information["nodes"][node_id]["static_nodes"] for i in range(0, len(peers), interconnection_batch): x = i - peer_ids = [create_waku_id(services[peer]) for peer in peers[x:x + interconnection_batch]] + peer_ids = [create_waku_id(peer, topology_information["nodes"][peer]) + for peer in peers[x:x + interconnection_batch]] - connect_wakunode_to_peers(plan, waku_service_name, vars.WAKU_RPC_PORT_ID, peer_ids) + connect_wakunode_to_peers(plan, topology_information["nodes"][node_id]["container_id"], + node_id, vars.WAKU_RPC_PORT_ID, peer_ids) From 4f7b257656d3c4a17e98dca06554952424ea97c7 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 13:05:46 +0100 Subject: [PATCH 038/112] Refactored builders structure --- src/node_builders.star | 159 -------------------------- src/node_builders/gowaku_builder.star | 35 ++++++ src/node_builders/node_builders.star | 95 +++++++++++++++ src/node_builders/nwaku_builder.star | 35 ++++++ src/node_builders/waku_builder.star | 39 +++++++ src/system_variables.star | 44 +++++-- src/tests/test_file_helpers.star | 2 +- src/tests/test_node_builders.star | 8 +- src/tests/test_waku_methods.star | 6 +- src/waku.star | 6 +- src/wsl.star | 2 +- 11 files changed, 248 insertions(+), 183 deletions(-) delete mode 100644 src/node_builders.star create mode 100644 src/node_builders/gowaku_builder.star create mode 100644 src/node_builders/node_builders.star create mode 100644 src/node_builders/nwaku_builder.star create mode 100644 src/node_builders/waku_builder.star diff --git a/src/node_builders.star b/src/node_builders.star deleted file mode 100644 index d4f302c..0000000 --- a/src/node_builders.star +++ /dev/null @@ -1,159 +0,0 @@ -# System Imports -vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") - -# Module Imports -waku = import_module(vars.WAKU_MODULE) -files = import_module(vars.FILE_HELPERS_MODULE) - - -def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, container): - - # TODO MAKE SURE THEY MATCH - prepared_ports = {} - for i in range(len(nwakunode_names)): - prepared_ports[vars.WAKU_RPC_PORT_ID+"_"+nwakunode_names[i]] = PortSpec(number=vars.WAKU_TCP_PORT + i, - transport_protocol="TCP") - prepared_ports[vars.PROMETHEUS_PORT_ID+"_"+nwakunode_names[i]] = PortSpec( - number=vars.PROMETHEUS_TCP_PORT + i, - transport_protocol="TCP") - prepared_ports[vars.WAKU_LIBP2P_PORT_ID+"_"+nwakunode_names[i]] = PortSpec( - number=vars.WAKU_LIBP2P_PORT + i, - transport_protocol="TCP") - - prepared_files = {} - for i in range(len(nwakunode_names)): - prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+nwakunode_names[i]] = artifact_ids[i] - - prepared_cmd = "" - for i in range(len(nwakunode_names)): - prepared_cmd += vars.NWAKU_ENTRYPOINT + " " - prepared_cmd += vars.NODE_CONFIGURATION_FILE_FLAG + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION +\ - nwakunode_names[i] + "/" + config_files[i] + " " - prepared_cmd += "--ports-shift="+str(i) - if i != len(nwakunode_names) - 1: - prepared_cmd += " & " - - add_service_config = ServiceConfig( - image=vars.NWAKU_IMAGE, - ports=prepared_ports, - files=prepared_files, - entrypoint=["/bin/sh", "-c"], - cmd=[prepared_cmd] - ) - - all_services[container] = add_service_config - - -def prepare_gowaku_service(gowakunode_name, all_services, config_file, artifact_id): - add_service_config = ServiceConfig( - image=vars.GOWAKU_IMAGE, - ports={ - vars.WAKU_RPC_PORT_ID: PortSpec(number=vars.WAKU_TCP_PORT, - transport_protocol="TCP"), - vars.PROMETHEUS_PORT_ID: PortSpec( - number=vars.PROMETHEUS_TCP_PORT, - transport_protocol="TCP"), - vars.WAKU_LIBP2P_PORT_ID: PortSpec( - number=vars.WAKU_LIBP2P_PORT, - transport_protocol="TCP"), - }, - files={ - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id - }, - entrypoint=vars.GOWAKU_ENTRYPOINT, - cmd=[ - vars.NODE_CONFIGURATION_FILE_FLAG + - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + config_file - ] - ) - - all_services[gowakunode_name] = add_service_config - - -def prepare_nomos_service(plan, test, test2): - plan.print("nomos") - - -def instantiate_services(plan, network_topology, testing): - """ - todo refactor this - As we will need to access for the service information later, the structure is the following: - - services = { - "nwaku_0": { - "hostname": service hostname - "peer_id" : peer id of the node, as string, - "service_info": Kurtosis service struct, that has - "ip": ip of the service that is running the node, - "ports": Kurtosis PortSpec, that you can access with their respective identifier - }, - "nwaku_1": {...}, - "gowaku_": {...} - } - - Example: - - service_peer_id = services["nwaku_0"]["peer_id"] - service_hostname = services["nwaku_0"]["service_info"].hostname - service_ip = services["nwaku_0"]["service_info"].ip_address - rpc_node_number = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].number - rpc_node_protocol = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].transport_protocol - """ - - all_services_configuration = {} - - for service_id, nodes_in_service in network_topology["containers"].items(): - image = network_topology["nodes"][nodes_in_service[0]]["image"] - service_builder = service_dispatcher[image] - - # Get all config file names needed - config_file_names = [network_topology["nodes"][node]["node_config"] for node in nodes_in_service] - - config_files_artifact_ids = [ - files.get_toml_configuration_artifact(plan, config_file_name,service_name, testing) - for config_file_name, service_name - in zip(config_file_names, nodes_in_service) - ] - - service_builder(nodes_in_service, all_services_configuration, config_file_names, - config_files_artifact_ids, service_id) - - all_services_information = plan.add_services( - configs=all_services_configuration - ) - - _add_service_info_to_topology(plan, all_services_information, network_topology) - - -def _add_service_info_to_topology(plan, all_services_information, network_topology): - - for node_id, node_info in network_topology["nodes"].items(): - waku_port_id = vars.WAKU_RPC_PORT_ID + "_" + node_id - libp2p_port_id = vars.WAKU_LIBP2P_PORT_ID + "_" + node_id - prometheus_port_id = vars.PROMETHEUS_PORT_ID + "_" + node_id - - node_peer_id = waku.get_wakunode_peer_id(plan, node_info["container_id"], waku_port_id) - - network_topology["nodes"][node_id]["peer_id"] = node_peer_id - network_topology["nodes"][node_id]["ip_address"] = \ - all_services_information[node_info["container_id"]].ip_address - - network_topology["nodes"][node_id]["ports"] = {} - network_topology["nodes"][node_id]["ports"][waku_port_id] = \ - (all_services_information[node_info["container_id"]].ports[waku_port_id].number, - all_services_information[node_info["container_id"]].ports[waku_port_id].transport_protocol) - - network_topology["nodes"][node_id]["ports"][libp2p_port_id] = \ - (all_services_information[node_info["container_id"]].ports[libp2p_port_id].number, - all_services_information[node_info["container_id"]].ports[libp2p_port_id].transport_protocol) - - network_topology["nodes"][node_id]["ports"][prometheus_port_id] = \ - (all_services_information[node_info["container_id"]].ports[prometheus_port_id].number, - all_services_information[node_info["container_id"]].ports[prometheus_port_id].transport_protocol) - - -service_dispatcher = { - "go-waku": prepare_gowaku_service, - "nim-waku": prepare_nwaku_service, - "nomos": prepare_nomos_service -} diff --git a/src/node_builders/gowaku_builder.star b/src/node_builders/gowaku_builder.star new file mode 100644 index 0000000..a00fcad --- /dev/null +++ b/src/node_builders/gowaku_builder.star @@ -0,0 +1,35 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +builder = import_module("github.com/logos-co/wakurtosis/src/node_builders/node_builders.star") + + +def prepare_gowaku_service(gowakunode_name, all_services, config_files, artifact_ids, service_id): + prepared_ports = builder.prepare_waku_ports_in_service(gowakunode_name) + prepared_files = builder.prepare_config_files_in_service(gowakunode_name, artifact_ids) + prepared_cmd = _prepare_gowaku_cmd_in_service(gowakunode_name, config_files) + + add_service_config = ServiceConfig( + image=vars.GOWAKU_IMAGE, + ports=prepared_ports, + files=prepared_files, + entrypoint=vars.GENERAL_ENTRYPOINT, + cmd=prepared_cmd + ) + + all_services[service_id] = add_service_config + + +def _prepare_gowaku_cmd_in_service(gowakunode_names, config_files): + prepared_cmd = "" + for i in range(len(gowakunode_names)): + prepared_cmd += vars.GOWAKU_ENTRYPOINT + " " + prepared_cmd += vars.WAKUNODE_CONFIGURATION_FILE_FLAG + \ + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + \ + gowakunode_names[i] + "/" + config_files[i] + " " + prepared_cmd += vars.WAKUNODE_PORT_SHIFT_FLAG + str(i) + if i != len(gowakunode_names) - 1: + prepared_cmd += " & " + + return [prepared_cmd] \ No newline at end of file diff --git a/src/node_builders/node_builders.star b/src/node_builders/node_builders.star new file mode 100644 index 0000000..dad8223 --- /dev/null +++ b/src/node_builders/node_builders.star @@ -0,0 +1,95 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +waku = import_module(vars.WAKU_MODULE) +files = import_module(vars.FILE_HELPERS_MODULE) +waku_builder = import_module(vars.WAKU_BUILDER_MODULE) +nwaku_builder = import_module(vars.NWAKU_BUILDER_MODULE) +gowaku_builder = import_module(vars.GOWAKU_BUILDER_MODULE) + + +service_builder_dispatcher = { + "go-waku": gowaku_builder.prepare_gowaku_service, + "nim-waku": nwaku_builder.prepare_nwaku_service + # nomos: nomos_builder.prepare_nomos_service +} + +ports_dispatcher = { + "go-waku": waku_builder._add_waku_ports_info_to_topology, + "nim-waku": waku_builder._add_waku_ports_info_to_topology + # nomos: nomos_builder._add_nomos_ports_info_to_topology +} + +def instantiate_services(plan, network_topology, testing): + """ + As we will need to access for the service information later, we are adding Starlark info into + the network topology.: + + network_topology = { + "containers": [...], + "nodes": { + "node_0": { + standard_gennet_info..., + "peer_id" : peer id of the node, as string, + "ip_address": ip of the container that has the node, as string + "ports": { + "waku_rpc_node_0": (port_number, port_protocol) + "libp2p_node_0": (port_number, port_protocol), + "prometheus_node_0": (port_number, port_protocol) + } + }, + "node_1": {...} + } + + } + """ + all_services_configuration = {} + + for service_id, nodes_in_service in network_topology[vars.GENNET_ALL_CONTAINERS_KEY].items(): + image = network_topology[vars.GENNET_NODES_KEY][nodes_in_service[0]][vars.GENNET_IMAGE_KEY] + service_builder = service_builder_dispatcher[image] + + # Get all config file names needed + config_file_names = [network_topology[vars.GENNET_NODES_KEY][node][vars.GENNET_CONFIG_KEY] + for node in nodes_in_service] + + config_files_artifact_ids = [ + files.get_toml_configuration_artifact(plan, config_file_name, service_name, testing) + for config_file_name, service_name + in zip(config_file_names, nodes_in_service) + ] + + service_builder(nodes_in_service, all_services_configuration, config_file_names, + config_files_artifact_ids, service_id) + + all_services_information = plan.add_services( + configs=all_services_configuration + ) + + _add_service_info_to_topology(plan, all_services_information, network_topology) + + +def _add_service_info_to_topology(plan, all_services_information, network_topology): + for node_id, node_info in network_topology[vars.GENNET_NODES_KEY].items(): + node_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id + + node_peer_id = waku.get_wakunode_peer_id(plan, node_info[vars.GENNET_NODE_CONTAINER_KEY], + node_rpc_port_id) + + network_topology[vars.GENNET_NODES_KEY][node_id][vars.GENNET_PEER_ID_KEY] = node_peer_id + + network_topology[vars.GENNET_NODES_KEY][node_id][vars.GENNET_IP_KEY] = \ + all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ip_address + + ports_adder = ports_dispatcher[node_info[vars.GENNET_IMAGE_KEY]] + ports_adder(network_topology, all_services_information, node_info, node_id) + + +def prepare_config_files_in_service(node_names, artifact_ids): + prepared_files = {} + + for i in range(len(node_names)): + prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + node_names[i]] = artifact_ids[i] + + return prepared_files diff --git a/src/node_builders/nwaku_builder.star b/src/node_builders/nwaku_builder.star new file mode 100644 index 0000000..b6eb9a9 --- /dev/null +++ b/src/node_builders/nwaku_builder.star @@ -0,0 +1,35 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +builder = import_module("github.com/logos-co/wakurtosis/src/node_builders/node_builders.star") + + +def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, service_id): + prepared_ports = builder.prepare_waku_ports_in_service(nwakunode_names) + prepared_files = builder.prepare_config_files_in_service(nwakunode_names, artifact_ids) + prepared_cmd = _prepare_nwaku_cmd_in_service(nwakunode_names, config_files) + + add_service_config = ServiceConfig( + image=vars.NWAKU_IMAGE, + ports=prepared_ports, + files=prepared_files, + entrypoint=vars.GENERAL_ENTRYPOINT, + cmd=prepared_cmd + ) + + all_services[service_id] = add_service_config + + +def _prepare_nwaku_cmd_in_service(nwakunode_names, config_files): + prepared_cmd = "" + for i in range(len(nwakunode_names)): + prepared_cmd += vars.NWAKU_ENTRYPOINT + " " + prepared_cmd += vars.WAKUNODE_CONFIGURATION_FILE_FLAG + \ + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + \ + nwakunode_names[i] + "/" + config_files[i] + " " + prepared_cmd += vars.WAKUNODE_PORT_SHIFT_FLAG + str(i) + if i != len(nwakunode_names) - 1: + prepared_cmd += " & " + + return [prepared_cmd] \ No newline at end of file diff --git a/src/node_builders/waku_builder.star b/src/node_builders/waku_builder.star new file mode 100644 index 0000000..96506ce --- /dev/null +++ b/src/node_builders/waku_builder.star @@ -0,0 +1,39 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + + +def prepare_waku_ports_in_service(wakunode_names): + prepared_ports = {} + for i in range(len(wakunode_names)): + prepared_ports[vars.RPC_PORT_ID + "_" + wakunode_names[i]] = \ + PortSpec(number=vars.WAKU_RPC_PORT_NUMBER + i, + transport_protocol=vars.WAKU_RPC_PORT_PROTOCOL) + + prepared_ports[vars.PROMETHEUS_PORT_ID + "_" + wakunode_names[i]] = \ + PortSpec(number=vars.PROMETHEUS_PORT_NUMBER + i, + transport_protocol=vars.PROMETHEUS_PORT_PROTOCOL) + + prepared_ports[vars.WAKU_LIBP2P_PORT_ID + "_" + wakunode_names[i]] = \ + PortSpec(number=vars.WAKU_LIBP2P_PORT + i, + transport_protocol=vars.WAKU_LIBP2P_PORT_PROTOCOL) + + return prepared_ports + + +def _add_waku_ports_info_to_topology(network_topology, all_services_information, node_info, node_id): + waku_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id + libp2p_port_id = vars.WAKU_LIBP2P_PORT_ID + "_" + node_id + prometheus_port_id = vars.PROMETHEUS_PORT_ID + "_" + node_id + + _add_waku_port(network_topology, all_services_information, node_id, node_info, waku_rpc_port_id) + _add_waku_port(network_topology, all_services_information, node_id, node_info, libp2p_port_id) + _add_waku_port(network_topology, all_services_information, node_id, node_info, prometheus_port_id) + + +def _add_waku_port(network_topology, all_services_information, node_id, node_info, port_id): + network_topology[vars.GENNET_NODES_KEY][node_id][vars.TOPOLOGY_PORTS_KEY] = {} + network_topology[vars.GENNET_NODES_KEY][node_id][vars.TOPOLOGY_PORTS_KEY][port_id] = \ + (all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[ + port_id].number, + all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[ + port_id].transport_protocol) \ No newline at end of file diff --git a/src/system_variables.star b/src/system_variables.star index 8809323..118e9f6 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -2,24 +2,31 @@ NWAKU_IMAGE = "statusteam/nim-waku:nwaku-trace" GOWAKU_IMAGE = "gowaku" -WAKU_RPC_PORT_ID = "rpc" -WAKU_TCP_PORT = 8545 -WAKU_LIBP2P_PORT_ID = "libp2p" -WAKU_LIBP2P_PORT = 60000 - +NODE_IMAGES_FROM_GENNET = ["go-waku", "nim-waku"] +RPC_PORT_ID = "rpc" NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_generated/" CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" -NODE_CONFIGURATION_FILE_EXTENSION = ".toml" -NODE_CONFIGURATION_FILE_FLAG = "--config-file=" +GENERAL_ENTRYPOINT = ["/bin/sh", "-c"] + +# Waku Configuration +WAKU_RPC_PORT_PROTOCOL = "TCP" +WAKU_RPC_PORT_NUMBER = 8545 +WAKU_LIBP2P_PORT_ID = "libp2p" +WAKU_LIBP2P_PORT_PROTOCOL = "TCP" +WAKU_LIBP2P_PORT = 60000 -NWAKU_ENTRYPOINT = "/usr/bin/wakunode", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0", "--log-level=TRACE" -GOWAKU_ENTRYPOINT = "/usr/bin/waku", "--rpc-address=0.0.0.0", "--metrics-server-address=0.0.0.0" +WAKUNODE_CONFIGURATION_FILE_EXTENSION = ".toml" +WAKUNODE_CONFIGURATION_FILE_FLAG = "--config-file=" +WAKUNODE_PORT_SHIFT_FLAG = "--ports-shift=" +NWAKU_ENTRYPOINT = "/usr/bin/wakunode --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0 --log-level=TRACE" +GOWAKU_ENTRYPOINT = "/usr/bin/waku --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0" # Prometheus Configuration PROMETHEUS_IMAGE = "prom/prometheus:latest" PROMETHEUS_SERVICE_NAME = "prometheus" -PROMETHEUS_PORT_ID = "prometheus_tcp" -PROMETHEUS_TCP_PORT = 8008 +PROMETHEUS_PORT_ID = "prometheus" +PROMETHEUS_PORT_PROTOCOL = "TCP" +PROMETHEUS_PORT_NUMBER = 8008 PROMETHEUS_CONFIGURATION_PATH = "github.com/logos-co/wakurtosis/monitoring/prometheus.yml" CONTAINER_CONFIGURATION_LOCATION_PROMETHEUS = "/test/" @@ -44,6 +51,16 @@ CONTAINER_CUSTOMIZATION_GRAFANA = "/usr/share/grafana/" CONTAINER_DATASOURCES_GRAFANA = "/etc/grafana/provisioning/datasources/" CONTAINER_DATASOURCES_FILE_NAME_GRAFANA = "datasources.yaml" +# Gennet topology Keys +GENNET_NODES_KEY = "nodes" +GENNET_ALL_CONTAINERS_KEY = "containers" +GENNET_IMAGE_KEY = "image" +GENNET_CONFIG_KEY = "node_config" +GENNET_NODE_CONTAINER_KEY = "container_id" +GENNET_PEER_ID_KEY = "peer_id" +GENNET_IP_KEY = "ip_address" +TOPOLOGY_PORTS_KEY = "ports" + # WSL Configuration WSL_IMAGE = "wsl:0.0.1" WSL_SERVICE_NAME = "wsl" @@ -61,7 +78,10 @@ GET_PEERS_METHOD = "get_waku_v2_admin_v1_peers" # Import locations WAKU_MODULE = "github.com/logos-co/wakurtosis/src/waku.star" -NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders.star" +NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/node_builders.star" +WAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/waku_builder.star" +NWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/nwaku_builder.star" +GOWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/gowaku_builder.star" PROMETHEUS_MODULE = "github.com/logos-co/wakurtosis/src/monitoring/prometheus.star" GRAFANA_MODULE = "github.com/logos-co/wakurtosis/src/monitoring/grafana.star" ARGUMENT_PARSER_MODULE = "github.com/logos-co/wakurtosis/src/arguments_parser.star" diff --git a/src/tests/test_file_helpers.star b/src/tests/test_file_helpers.star index 675b0f4..898cf88 100644 --- a/src/tests/test_file_helpers.star +++ b/src/tests/test_file_helpers.star @@ -41,7 +41,7 @@ def test_generate_template_node_targets_multiple(plan): def test_generate_template_prometheus_url(plan): prometheus_service_struct = struct(ip_address="1.2.3.4", ports={vars.PROMETHEUS_PORT_ID: - PortSpec(number=vars.PROMETHEUS_TCP_PORT)}) + PortSpec(number=vars.PROMETHEUS_PORT_NUMBER)}) result = files.generate_template_prometheus_url(prometheus_service_struct) plan.assert(value=result["prometheus_url"], assertion="==", target_value="1.2.3.4:8008") diff --git a/src/tests/test_node_builders.star b/src/tests/test_node_builders.star index 2bac0ac..be3ec5a 100644 --- a/src/tests/test_node_builders.star +++ b/src/tests/test_node_builders.star @@ -16,9 +16,9 @@ def test_prepare_nwaku_service(plan): plan.assert(value=test_dict["test"].image, assertion="==", target_value=vars.NWAKU_IMAGE) plan.assert(value=str(test_dict["test"].ports[vars.WAKU_RPC_PORT_ID].number), - assertion="==", target_value=str(vars.WAKU_TCP_PORT)) + assertion="==", target_value=str(vars.WAKU_RPC_PORT_NUMBER)) plan.assert(value=str(test_dict["test"].ports[vars.PROMETHEUS_PORT_ID].number), - assertion="==", target_value=str(vars.PROMETHEUS_TCP_PORT)) + assertion="==", target_value=str(vars.PROMETHEUS_PORT_NUMBER)) plan.assert(value=str(test_dict["test"].ports[vars.WAKU_LIBP2P_PORT_ID].number), assertion="==", target_value=str(vars.WAKU_LIBP2P_PORT)) plan.assert(value=test_dict["test"].files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION], @@ -44,9 +44,9 @@ def test_prepare_gowaku_service(plan): plan.assert(value=test_dict["test"].image, assertion="==", target_value=vars.GOWAKU_IMAGE) plan.assert(value=str(test_dict["test"].ports[vars.WAKU_RPC_PORT_ID].number), - assertion="==", target_value=str(vars.WAKU_TCP_PORT)) + assertion="==", target_value=str(vars.WAKU_RPC_PORT_NUMBER)) plan.assert(value=str(test_dict["test"].ports[vars.PROMETHEUS_PORT_ID].number), - assertion="==", target_value=str(vars.PROMETHEUS_TCP_PORT)) + assertion="==", target_value=str(vars.PROMETHEUS_PORT_NUMBER)) plan.assert(value=str(test_dict["test"].ports[vars.WAKU_LIBP2P_PORT_ID].number), assertion="==", target_value=str(vars.WAKU_LIBP2P_PORT)) plan.assert(value=test_dict["test"].files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION], diff --git a/src/tests/test_waku_methods.star b/src/tests/test_waku_methods.star index 523115c..6040b76 100644 --- a/src/tests/test_waku_methods.star +++ b/src/tests/test_waku_methods.star @@ -43,12 +43,12 @@ def test_send_json_rpc(plan, service_name): params = "test, " + waku_message # Automatically waits for 200 - waku.send_json_rpc(plan, service_name, vars.WAKU_RPC_PORT_ID, + waku.send_json_rpc(plan, service_name, vars.RPC_PORT_ID, vars.POST_RELAY_MESSAGE_METHOD, params) def test_get_wakunode_peer_id(plan, service_name, expected_ids): - peer_id = waku.get_wakunode_peer_id(plan, service_name, vars.WAKU_RPC_PORT_ID) + peer_id = waku.get_wakunode_peer_id(plan, service_name, vars.RPC_PORT_ID) plan.assert(value=peer_id, assertion="==", target_value=expected_ids[service_name]) @@ -76,7 +76,7 @@ def test__merge_peer_ids(plan): def test_connect_wakunode_to_peers(plan, service_name): # It will print an error but 200 code - waku.connect_wakunode_to_peers(plan, service_name, vars.WAKU_RPC_PORT_ID, ["asd"]) + waku.connect_wakunode_to_peers(plan, service_name, vars.RPC_PORT_ID, ["asd"]) def test_post_waku_v2_relay_v1_message(plan, service_name): waku.post_waku_v2_relay_v1_message_test(plan, service_name, "test") diff --git a/src/waku.star b/src/waku.star index c6345a4..2b37479 100644 --- a/src/waku.star +++ b/src/waku.star @@ -63,7 +63,7 @@ def post_waku_v2_relay_v1_message_test(plan, service_name, topic): waku_message = '{"payload": "0x1a2b3c4d5e6f", "timestamp": 1626813243}' params = '"' + topic + '"' + ", " + waku_message - response = send_json_rpc(plan, service_name, vars.WAKU_RPC_PORT_ID, + response = send_json_rpc(plan, service_name, vars.RPC_PORT_ID, vars.POST_RELAY_MESSAGE_METHOD, params) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -80,7 +80,7 @@ def make_service_wait(plan, service_name, time): def get_waku_peers(plan, waku_service_name): extract = {"peers": '.result | length'} - response = send_json_rpc(plan, waku_service_name, vars.WAKU_RPC_PORT_ID, + response = send_json_rpc(plan, waku_service_name, vars.RPC_PORT_ID, vars.GET_PEERS_METHOD, "", extract) plan.assert(value=response["code"], assertion="==", target_value=200) @@ -99,6 +99,6 @@ def interconnect_waku_nodes(plan, topology_information, interconnection_batch): for peer in peers[x:x + interconnection_batch]] connect_wakunode_to_peers(plan, topology_information["nodes"][node_id]["container_id"], - node_id, vars.WAKU_RPC_PORT_ID, peer_ids) + node_id, vars.RPC_PORT_ID, peer_ids) diff --git a/src/wsl.star b/src/wsl.star index 9ccdbbe..20019fd 100644 --- a/src/wsl.star +++ b/src/wsl.star @@ -25,7 +25,7 @@ def create_config(plan, wls_config): def create_targets(plan, services): # Get private ip and ports of all nodes - template_data = files.generate_template_node_targets(services, vars.WAKU_RPC_PORT_ID) + template_data = files.generate_template_node_targets(services, vars.RPC_PORT_ID) # Template template = """ From 5ecf1d7632a597f2a334d668c42c93ab7a266ad6 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 13:07:26 +0100 Subject: [PATCH 039/112] Moved node builder star files to types folder --- src/node_builders/{ => types}/gowaku_builder.star | 0 src/node_builders/{ => types}/nwaku_builder.star | 0 src/node_builders/{ => types}/waku_builder.star | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename src/node_builders/{ => types}/gowaku_builder.star (100%) rename src/node_builders/{ => types}/nwaku_builder.star (100%) rename src/node_builders/{ => types}/waku_builder.star (100%) diff --git a/src/node_builders/gowaku_builder.star b/src/node_builders/types/gowaku_builder.star similarity index 100% rename from src/node_builders/gowaku_builder.star rename to src/node_builders/types/gowaku_builder.star diff --git a/src/node_builders/nwaku_builder.star b/src/node_builders/types/nwaku_builder.star similarity index 100% rename from src/node_builders/nwaku_builder.star rename to src/node_builders/types/nwaku_builder.star diff --git a/src/node_builders/waku_builder.star b/src/node_builders/types/waku_builder.star similarity index 100% rename from src/node_builders/waku_builder.star rename to src/node_builders/types/waku_builder.star From 76275b7746f7f9bc318da1f8d2a5aac8a627bff7 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 16:30:37 +0100 Subject: [PATCH 040/112] Fixed bug when adding ports --- src/node_builders/types/gowaku_builder.star | 6 +++--- src/node_builders/types/nwaku_builder.star | 8 ++++---- src/node_builders/types/waku_builder.star | 9 +++++++++ 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/node_builders/types/gowaku_builder.star b/src/node_builders/types/gowaku_builder.star index a00fcad..c5f3799 100644 --- a/src/node_builders/types/gowaku_builder.star +++ b/src/node_builders/types/gowaku_builder.star @@ -2,12 +2,12 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports -builder = import_module("github.com/logos-co/wakurtosis/src/node_builders/node_builders.star") +waku_builder = import_module(vars.WAKU_BUILDER_MODULE) def prepare_gowaku_service(gowakunode_name, all_services, config_files, artifact_ids, service_id): - prepared_ports = builder.prepare_waku_ports_in_service(gowakunode_name) - prepared_files = builder.prepare_config_files_in_service(gowakunode_name, artifact_ids) + prepared_ports = waku_builder.prepare_waku_ports_in_service(gowakunode_name) + prepared_files = waku_builder.prepare_waku_config_files_in_service(gowakunode_name, artifact_ids) prepared_cmd = _prepare_gowaku_cmd_in_service(gowakunode_name, config_files) add_service_config = ServiceConfig( diff --git a/src/node_builders/types/nwaku_builder.star b/src/node_builders/types/nwaku_builder.star index b6eb9a9..e0d622a 100644 --- a/src/node_builders/types/nwaku_builder.star +++ b/src/node_builders/types/nwaku_builder.star @@ -2,12 +2,12 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports -builder = import_module("github.com/logos-co/wakurtosis/src/node_builders/node_builders.star") +waku_builder = import_module(vars.WAKU_BUILDER_MODULE) def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, service_id): - prepared_ports = builder.prepare_waku_ports_in_service(nwakunode_names) - prepared_files = builder.prepare_config_files_in_service(nwakunode_names, artifact_ids) + prepared_ports = waku_builder.prepare_waku_ports_in_service(nwakunode_names) + prepared_files = waku_builder.prepare_waku_config_files_in_service(nwakunode_names, artifact_ids) prepared_cmd = _prepare_nwaku_cmd_in_service(nwakunode_names, config_files) add_service_config = ServiceConfig( @@ -32,4 +32,4 @@ def _prepare_nwaku_cmd_in_service(nwakunode_names, config_files): if i != len(nwakunode_names) - 1: prepared_cmd += " & " - return [prepared_cmd] \ No newline at end of file + return [prepared_cmd] diff --git a/src/node_builders/types/waku_builder.star b/src/node_builders/types/waku_builder.star index 96506ce..4e25849 100644 --- a/src/node_builders/types/waku_builder.star +++ b/src/node_builders/types/waku_builder.star @@ -19,12 +19,21 @@ def prepare_waku_ports_in_service(wakunode_names): return prepared_ports +def prepare_waku_config_files_in_service(node_names, artifact_ids): + prepared_files = {} + + for i in range(len(node_names)): + prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + node_names[i]] = artifact_ids[i] + + return prepared_files + def _add_waku_ports_info_to_topology(network_topology, all_services_information, node_info, node_id): waku_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id libp2p_port_id = vars.WAKU_LIBP2P_PORT_ID + "_" + node_id prometheus_port_id = vars.PROMETHEUS_PORT_ID + "_" + node_id + network_topology[vars.GENNET_NODES_KEY][node_id][vars.PORTS_KEY] = {} _add_waku_port(network_topology, all_services_information, node_id, node_info, waku_rpc_port_id) _add_waku_port(network_topology, all_services_information, node_id, node_info, libp2p_port_id) _add_waku_port(network_topology, all_services_information, node_id, node_info, prometheus_port_id) From 01b398513bc29ac767f0b3d025881a15356717ee Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 16:31:16 +0100 Subject: [PATCH 041/112] Refactored more system variables --- main.star | 7 +++-- src/arguments_parser.star | 4 +-- src/file_helpers.star | 12 +++++---- src/monitoring/prometheus.star | 4 +-- src/node_builders/node_builders.star | 21 +++++---------- src/node_builders/types/waku_builder.star | 3 +-- src/system_variables.star | 27 +++++++++++++++----- src/tests/test_waku_methods.star | 2 +- src/waku.star | 31 ++++++++--------------- src/wsl.star | 4 +-- 10 files changed, 55 insertions(+), 60 deletions(-) diff --git a/main.star b/main.star index 577c544..a44da48 100644 --- a/main.star +++ b/main.star @@ -17,10 +17,9 @@ def run(plan, args): config_json = read_file(src=config_file) config = json.decode(config_json) - kurtosis_config = config['kurtosis'] - wsl_config = config['wsl'] - interconnection_batch = kurtosis_config['interconnection_batch'] - nodes_per_container = kurtosis_config['nodes_per_container'] + kurtosis_config = config[vars.KURTOSIS_KEY] + wsl_config = config[vars.WLS_KEY] + interconnection_batch = kurtosis_config[vars.INTERCONNECTION_BATCH_KEY] # Load network topology network_topology = read_file(src=vars.TOPOLOGIES_LOCATION + vars.DEFAULT_TOPOLOGY_FILE) diff --git a/src/arguments_parser.star b/src/arguments_parser.star index 6ee0106..faefdc2 100644 --- a/src/arguments_parser.star +++ b/src/arguments_parser.star @@ -1,14 +1,14 @@ # System Imports vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + def get_configuration_file_name(plan, input_args): # Parse command line argument (config file) config_file = vars.DEFAULT_CONFIG_FILE - if hasattr(input_args, "config_file"): + if hasattr(input_args, vars.CONFIG_FILE_STARLARK_PARAMETER): config_file = input_args.config_file plan.print("Got config file: %s" %config_file) else: plan.print("Got default config file: %s" %config_file) return config_file - diff --git a/src/file_helpers.star b/src/file_helpers.star index f390b84..de0d01f 100644 --- a/src/file_helpers.star +++ b/src/file_helpers.star @@ -19,17 +19,19 @@ def get_toml_configuration_artifact(plan, config_file, name, testing): return artifact_id -def generate_template_node_targets(services, port_id): +def generate_template_node_targets(services, port_id, key_value): template_data = {} targets_data = [] - for service_name in services["nodes"].keys(): - service_ip = services["nodes"][service_name]["ip_address"] - service_port_number = str(services["nodes"][service_name]["ports"][port_id+"_"+service_name][0]) + for service_name in services[vars.GENNET_NODES_KEY].keys(): + service_info = services[vars.GENNET_NODES_KEY][service_name] + + service_ip = service_info[vars.IP_KEY] + service_port_number = str(service_info[vars.PORTS_KEY][port_id+"_"+service_name][0]) targets_data.append('"' + service_ip + ":" + service_port_number + '"') data_as_string = ",".join(targets_data) targets_payload = "[" + data_as_string + "]" - template_data["targets"] = targets_payload + template_data[key_value] = targets_payload return template_data diff --git a/src/monitoring/prometheus.star b/src/monitoring/prometheus.star index 9de5343..e96e0b8 100644 --- a/src/monitoring/prometheus.star +++ b/src/monitoring/prometheus.star @@ -44,7 +44,7 @@ def create_prometheus_targets(plan, network_topology): # get ip and ports of all nodes template_data = files.generate_template_node_targets(network_topology, - vars.PROMETHEUS_PORT_ID) + vars.PROMETHEUS_PORT_ID, "targets") template = templates.get_prometheus_template() @@ -55,7 +55,7 @@ def create_prometheus_targets(plan, network_topology): data=template_data, ) }, - name="prometheus_targets" + name=vars.PROMETHEUS_TEMPLATE_NAME ) return artifact_id diff --git a/src/node_builders/node_builders.star b/src/node_builders/node_builders.star index dad8223..52b2034 100644 --- a/src/node_builders/node_builders.star +++ b/src/node_builders/node_builders.star @@ -10,14 +10,14 @@ gowaku_builder = import_module(vars.GOWAKU_BUILDER_MODULE) service_builder_dispatcher = { - "go-waku": gowaku_builder.prepare_gowaku_service, - "nim-waku": nwaku_builder.prepare_nwaku_service + vars.GENNET_GOWAKU_IMAGE_VALUE: gowaku_builder.prepare_gowaku_service, + vars.GENNET_NWAKU_IMAGE_VALUE: nwaku_builder.prepare_nwaku_service # nomos: nomos_builder.prepare_nomos_service } ports_dispatcher = { - "go-waku": waku_builder._add_waku_ports_info_to_topology, - "nim-waku": waku_builder._add_waku_ports_info_to_topology + vars.GENNET_GOWAKU_IMAGE_VALUE: waku_builder._add_waku_ports_info_to_topology, + vars.GENNET_NWAKU_IMAGE_VALUE: waku_builder._add_waku_ports_info_to_topology # nomos: nomos_builder._add_nomos_ports_info_to_topology } @@ -77,19 +77,10 @@ def _add_service_info_to_topology(plan, all_services_information, network_topolo node_peer_id = waku.get_wakunode_peer_id(plan, node_info[vars.GENNET_NODE_CONTAINER_KEY], node_rpc_port_id) - network_topology[vars.GENNET_NODES_KEY][node_id][vars.GENNET_PEER_ID_KEY] = node_peer_id + network_topology[vars.GENNET_NODES_KEY][node_id][vars.PEER_ID_KEY] = node_peer_id - network_topology[vars.GENNET_NODES_KEY][node_id][vars.GENNET_IP_KEY] = \ + network_topology[vars.GENNET_NODES_KEY][node_id][vars.IP_KEY] = \ all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ip_address ports_adder = ports_dispatcher[node_info[vars.GENNET_IMAGE_KEY]] ports_adder(network_topology, all_services_information, node_info, node_id) - - -def prepare_config_files_in_service(node_names, artifact_ids): - prepared_files = {} - - for i in range(len(node_names)): - prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + node_names[i]] = artifact_ids[i] - - return prepared_files diff --git a/src/node_builders/types/waku_builder.star b/src/node_builders/types/waku_builder.star index 4e25849..6e256c4 100644 --- a/src/node_builders/types/waku_builder.star +++ b/src/node_builders/types/waku_builder.star @@ -40,8 +40,7 @@ def _add_waku_ports_info_to_topology(network_topology, all_services_information, def _add_waku_port(network_topology, all_services_information, node_id, node_info, port_id): - network_topology[vars.GENNET_NODES_KEY][node_id][vars.TOPOLOGY_PORTS_KEY] = {} - network_topology[vars.GENNET_NODES_KEY][node_id][vars.TOPOLOGY_PORTS_KEY][port_id] = \ + network_topology[vars.GENNET_NODES_KEY][node_id][vars.PORTS_KEY][port_id] = \ (all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[ port_id].number, all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[ diff --git a/src/system_variables.star b/src/system_variables.star index 118e9f6..89b5eba 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -2,11 +2,16 @@ NWAKU_IMAGE = "statusteam/nim-waku:nwaku-trace" GOWAKU_IMAGE = "gowaku" -NODE_IMAGES_FROM_GENNET = ["go-waku", "nim-waku"] RPC_PORT_ID = "rpc" NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_generated/" CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" GENERAL_ENTRYPOINT = ["/bin/sh", "-c"] +CONFIG_FILE_STARLARK_PARAMETER = "config_file" + +# Config file keys +KURTOSIS_KEY = "kurtosis" +WLS_KEY = "wls" +INTERCONNECTION_BATCH_KEY = "interconnection_batch" # Waku Configuration WAKU_RPC_PORT_PROTOCOL = "TCP" @@ -28,6 +33,8 @@ PROMETHEUS_PORT_ID = "prometheus" PROMETHEUS_PORT_PROTOCOL = "TCP" PROMETHEUS_PORT_NUMBER = 8008 PROMETHEUS_CONFIGURATION_PATH = "github.com/logos-co/wakurtosis/monitoring/prometheus.yml" +PROMETHEUS_TEMPLATE_NAME = "prometheus_targets" + CONTAINER_CONFIGURATION_LOCATION_PROMETHEUS = "/test/" CONTAINER_CONFIGURATION_LOCATION_PROMETHEUS_2 = "/tmp/" @@ -57,9 +64,13 @@ GENNET_ALL_CONTAINERS_KEY = "containers" GENNET_IMAGE_KEY = "image" GENNET_CONFIG_KEY = "node_config" GENNET_NODE_CONTAINER_KEY = "container_id" -GENNET_PEER_ID_KEY = "peer_id" -GENNET_IP_KEY = "ip_address" -TOPOLOGY_PORTS_KEY = "ports" +GENNET_STATIC_NODES_KEY = "static_nodes" +GENNET_GOWAKU_IMAGE_VALUE = "go-waku" +GENNET_NWAKU_IMAGE_VALUE = "nim-waku" + +PEER_ID_KEY = "peer_id" +IP_KEY = "ip_address" +PORTS_KEY = "ports" # WSL Configuration WSL_IMAGE = "wsl:0.0.1" @@ -67,6 +78,8 @@ WSL_SERVICE_NAME = "wsl" WSL_CONFIG_PATH = "/wsl/config" WSL_TARGETS_PATH = "/wsl/targets" WSL_TOMLS_PATH = "/wsl/tomls" +WLS_CMD = ["python3", "wsl.py"] + CONTAINER_WSL_CONFIGURATION_FILE_NAME = "wsl.yml" CONTAINER_TARGETS_FILE_NAME_WSL = "targets.json" @@ -79,9 +92,9 @@ GET_PEERS_METHOD = "get_waku_v2_admin_v1_peers" # Import locations WAKU_MODULE = "github.com/logos-co/wakurtosis/src/waku.star" NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/node_builders.star" -WAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/waku_builder.star" -NWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/nwaku_builder.star" -GOWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/gowaku_builder.star" +WAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/waku_builder.star" +NWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/nwaku_builder.star" +GOWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/gowaku_builder.star" PROMETHEUS_MODULE = "github.com/logos-co/wakurtosis/src/monitoring/prometheus.star" GRAFANA_MODULE = "github.com/logos-co/wakurtosis/src/monitoring/grafana.star" ARGUMENT_PARSER_MODULE = "github.com/logos-co/wakurtosis/src/arguments_parser.star" diff --git a/src/tests/test_waku_methods.star b/src/tests/test_waku_methods.star index 6040b76..e4f6138 100644 --- a/src/tests/test_waku_methods.star +++ b/src/tests/test_waku_methods.star @@ -59,7 +59,7 @@ def test_create_waku_id(plan): ports={vars.WAKU_LIBP2P_PORT_ID: PortSpec(number=1234)}) services_example = {"service_info": service_struct, "peer_id": "ASDFGHJKL"} - waku_id = waku.create_waku_id(services_example) + waku_id = waku.create_node_multiaddress(services_example) plan.assert(value=waku_id, assertion="==", target_value='"/ip4/1.1.1.1/tcp/1234/p2p/ASDFGHJKL"') diff --git a/src/waku.star b/src/waku.star index 2b37479..8a75cd9 100644 --- a/src/waku.star +++ b/src/waku.star @@ -35,10 +35,10 @@ def get_wakunode_peer_id(plan, service_name, port_id): # todo better name for this function -def create_waku_id(node_id, node_information): - ip = node_information["ip_address"] - port = node_information["ports"][vars.WAKU_LIBP2P_PORT_ID + "_" + node_id][0] - waku_node_id = node_information["peer_id"] +def create_node_multiaddress(node_id, node_information): + ip = node_information[vars.IP_KEY] + port = node_information[vars.PORTS_KEY][vars.WAKU_LIBP2P_PORT_ID + "_" + node_id][0] + waku_node_id = node_information[vars.PEER_ID_KEY] return '"/ip4/' + str(ip) + '/tcp/' + str(port) + '/p2p/' + waku_node_id + '"' @@ -59,16 +59,6 @@ def connect_wakunode_to_peers(plan, service_name, node_id, port_id, peer_ids): plan.print(response) -def post_waku_v2_relay_v1_message_test(plan, service_name, topic): - waku_message = '{"payload": "0x1a2b3c4d5e6f", "timestamp": 1626813243}' - params = '"' + topic + '"' + ", " + waku_message - - response = send_json_rpc(plan, service_name, vars.RPC_PORT_ID, - vars.POST_RELAY_MESSAGE_METHOD, params) - - plan.assert(value=response["code"], assertion="==", target_value = 200) - - def make_service_wait(plan, service_name, time): exec_recipe = struct( service_name=service_name, @@ -90,15 +80,16 @@ def get_waku_peers(plan, waku_service_name): def interconnect_waku_nodes(plan, topology_information, interconnection_batch): # Interconnect them - for node_id in topology_information["nodes"].keys(): - peers = topology_information["nodes"][node_id]["static_nodes"] + nodes_in_topology = topology_information[vars.GENNET_NODES_KEY] + + for node_id in nodes_in_topology.keys(): + peers = nodes_in_topology[node_id][vars.GENNET_STATIC_NODES_KEY] for i in range(0, len(peers), interconnection_batch): - x = i - peer_ids = [create_waku_id(peer, topology_information["nodes"][peer]) - for peer in peers[x:x + interconnection_batch]] + peer_ids = [create_node_multiaddress(peer, nodes_in_topology[peer]) + for peer in peers[i:i + interconnection_batch]] - connect_wakunode_to_peers(plan, topology_information["nodes"][node_id]["container_id"], + connect_wakunode_to_peers(plan, nodes_in_topology[node_id][vars.GENNET_NODE_CONTAINER_KEY], node_id, vars.RPC_PORT_ID, peer_ids) diff --git a/src/wsl.star b/src/wsl.star index 20019fd..8aa638b 100644 --- a/src/wsl.star +++ b/src/wsl.star @@ -25,7 +25,7 @@ def create_config(plan, wls_config): def create_targets(plan, services): # Get private ip and ports of all nodes - template_data = files.generate_template_node_targets(services, vars.RPC_PORT_ID) + template_data = files.generate_template_node_targets(services, vars.RPC_PORT_ID, "targets") # Template template = """ @@ -66,7 +66,7 @@ def init(plan, services, wsl_config): vars.WSL_TARGETS_PATH: wsl_targets, vars.WSL_TOMLS_PATH: tomls_artifact }, - cmd=["python3", "wsl.py"] + cmd=vars.WLS_CMD ) wsl_service = plan.add_service( service_name=vars.WSL_SERVICE_NAME, From 13ebdea8535bc2ba2a34da64841e8792d3873b25 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 16:38:49 +0100 Subject: [PATCH 042/112] Renamed propperly WSL to WLS to avoid confussions --- .gitignore | 4 +-- README.md | 4 +-- build.sh | 6 ++-- main.star | 8 +++--- run.sh | 18 ++++++------ src/system_variables.star | 20 ++++++------- src/templates.star | 8 +++--- src/{wsl.star => wls.star} | 32 ++++++++++----------- {wsl-module => wls-module}/README.md | 4 +-- wls-module/build.sh | 6 ++++ {wsl-module => wls-module}/config/wsl.yml | 0 {wsl-module => wls-module}/dockerfile | 4 +-- {wsl-module => wls-module}/requirements.txt | 0 {wsl-module => wls-module}/rtnorm.py | 0 {wsl-module => wls-module}/wsl.py | 2 +- wsl-module/build.sh | 6 ---- 16 files changed, 62 insertions(+), 60 deletions(-) rename src/{wsl.star => wls.star} (64%) rename {wsl-module => wls-module}/README.md (95%) create mode 100644 wls-module/build.sh rename {wsl-module => wls-module}/config/wsl.yml (100%) rename {wsl-module => wls-module}/dockerfile (82%) rename {wsl-module => wls-module}/requirements.txt (100%) rename {wsl-module => wls-module}/rtnorm.py (100%) rename {wsl-module => wls-module}/wsl.py (99%) delete mode 100644 wsl-module/build.sh diff --git a/.gitignore b/.gitignore index fc8c79d..e958722 100644 --- a/.gitignore +++ b/.gitignore @@ -16,8 +16,8 @@ node_modules/ targets.json data/ enclave.dump/ -wsl-module/requirements.txt -wsl-module/__pycache__ +wls-module/requirements.txt +wls-module/__pycache__ config/network_topology_auto/ config/config.json gennet-module/topology/ diff --git a/README.md b/README.md index f976807..d9d1107 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ These are arguments that can be modified: - _jobs_: int. Defines how many services will be instantiated at the same time. - _interconnection_batch_: int. If nodes are being connected by a given topology, this tells kurtosis how many connections will try to set up in the same node at a time. Used to avoid timeouts if a node has a lot of connections. -- [WLS](wsl-module/README.md) module configuration +- [WLS](wls-module/README.md) module configuration - [Gennet](gennet-module/Readme.md) module configuration #### What will happen @@ -50,7 +50,7 @@ Once all nodes have been interconnected the simulation starts and will inject tr - Simulation log: -'kurtosis service logs wakurtosis $(kurtosis enclave inspect | grep wsl- | awk '{print $1}')' +'kurtosis service logs wakurtosis $(kurtosis enclave inspect | grep wls- | awk '{print $1}')' - Grafana server: diff --git a/build.sh b/build.sh index 9e5d06d..3f2c1c5 100644 --- a/build.sh +++ b/build.sh @@ -13,9 +13,9 @@ sudo apt install kurtosis-cli=$kurtosis_version sudo apt-mark hold kurtosis-cli sudo rm /etc/apt/sources.list.d/kurtosis.list -# Build WSL and Gennet docker image -cd wsl-module -docker build -t wsl:0.0.1 . +# Build WLS and Gennet docker image +cd wls-module +docker build -t wls:0.0.1 . cd .. cd gennet-module diff --git a/main.star b/main.star index a44da48..49380ad 100644 --- a/main.star +++ b/main.star @@ -6,7 +6,7 @@ waku = import_module(vars.WAKU_MODULE) prometheus = import_module(vars.PROMETHEUS_MODULE) grafana = import_module(vars.GRAFANA_MODULE) args_parser = import_module(vars.ARGUMENT_PARSER_MODULE) -wsl = import_module(vars.WSL_MODULE) +wls = import_module(vars.WLS_MODULE) nodes = import_module(vars.NODE_BUILDERS_MODULE) @@ -18,7 +18,7 @@ def run(plan, args): config = json.decode(config_json) kurtosis_config = config[vars.KURTOSIS_KEY] - wsl_config = config[vars.WLS_KEY] + wls_config = config[vars.WLS_KEY] interconnection_batch = kurtosis_config[vars.INTERCONNECTION_BATCH_KEY] # Load network topology @@ -35,5 +35,5 @@ def run(plan, args): waku.interconnect_waku_nodes(plan, network_topology, interconnection_batch) - # Setup WSL & Start the Simulation - wsl_service = wsl.init(plan, network_topology, wsl_config) + # Setup WLS & Start the Simulation + wls_service = wls.init(plan, network_topology, wls_config) diff --git a/run.sh b/run.sh index bb5185a..9d2aa46 100644 --- a/run.sh +++ b/run.sh @@ -72,10 +72,11 @@ kurtosis_cmd="kurtosis run --enclave-id ${enclave_name} . '{\"wakurtosis_config_ eval $kurtosis_cmd echo -e "Enclave " $enclave_name " is up and running" -# Fetch the WSL service id and display the log of the simulation -wsl_service_name=$(kurtosis enclave inspect $enclave_name 2>/dev/null | grep wsl | awk '{print $1}') -# kurtosis service logs wakurtosis $wsl_service_id -echo -e "\n--> To see simulation logs run: kurtosis service logs $enclave_name $wsl_service_name <--" +# Fetch the WLS service id and display the log of the simulation +wls_service_name=$(kurtosis enclave inspect wakurtosis | grep wls | awk '{print $1}') +# kurtosis service logs wakurtosis $wls_service_name +echo -e "\n--> To see simulation logs run: kurtosis service logs wakurtosis $wls_service_name <--" + # Fetch the Grafana address & port grafana_host=$(kurtosis enclave inspect $enclave_name | grep grafana | awk '{print $6}') @@ -83,11 +84,11 @@ echo -e "\n--> Statistics in Grafana server at http://$grafana_host/ <--" echo "Output of kurtosis run command written in kurtosisrun_log.txt" -### Wait for WSL to finish +### Wait for WLS to finish # Get the container suffix for the running service enclave_preffix="$(kurtosis enclave inspect --full-uuids $enclave_name | grep UUID: | awk '{print $2}')" -cid_suffix="$(kurtosis enclave inspect --full-uuids $enclave_name | grep $wsl_service_name | cut -f 1 -d ' ')" +cid_suffix="$(kurtosis enclave inspect --full-uuids $enclave_name | grep $wls_service_name | cut -f 1 -d ' ')" # Construct the fully qualified container name that kurtosis created cid="$enclave_preffix--user-service--$cid_suffix" @@ -101,8 +102,9 @@ kurtosis enclave dump ${enclave_name} ${enclave_name}_logs > /dev/null 2>&1 echo -e "Simulation ended with code $status_code Results in ./${enclave_name}_logs" # Copy simulation results -# docker cp "$cid:/wsl/summary.json" "./${enclave_name}_logs" > /dev/null 2>&1 -docker cp "$cid:/wsl/messages.json" "./${enclave_name}_logs" +# docker cp "$cid:/wls/summary.json" "./${enclave_name}_logs" > /dev/null 2>&1 +docker cp "$cid:/wls/messages.json" "./${enclave_name}_logs" + # Stop and delete the enclave # kurtosis enclave stop $enclave_name > /dev/null 2>&1 diff --git a/src/system_variables.star b/src/system_variables.star index 89b5eba..8a1cac4 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -72,16 +72,16 @@ PEER_ID_KEY = "peer_id" IP_KEY = "ip_address" PORTS_KEY = "ports" -# WSL Configuration -WSL_IMAGE = "wsl:0.0.1" -WSL_SERVICE_NAME = "wsl" -WSL_CONFIG_PATH = "/wsl/config" -WSL_TARGETS_PATH = "/wsl/targets" -WSL_TOMLS_PATH = "/wsl/tomls" -WLS_CMD = ["python3", "wsl.py"] +# WLS Configuration +WLS_IMAGE = "wls:0.0.1" +WLS_SERVICE_NAME = "wls" +WLS_CONFIG_PATH = "/wls/config" +WLS_TARGETS_PATH = "/wls/targets" +WLS_TOMLS_PATH = "/wls/tomls" +WLS_CMD = ["python3", "wls.py"] -CONTAINER_WSL_CONFIGURATION_FILE_NAME = "wsl.yml" -CONTAINER_TARGETS_FILE_NAME_WSL = "targets.json" +CONTAINER_WLS_CONFIGURATION_FILE_NAME = "wls.yml" +CONTAINER_TARGETS_FILE_NAME_WLS = "targets.json" # Waku RPC methods POST_RELAY_MESSAGE_METHOD = "post_waku_v2_relay_v1_message" @@ -100,7 +100,7 @@ GRAFANA_MODULE = "github.com/logos-co/wakurtosis/src/monitoring/grafana.star" ARGUMENT_PARSER_MODULE = "github.com/logos-co/wakurtosis/src/arguments_parser.star" FILE_HELPERS_MODULE = "github.com/logos-co/wakurtosis/src/file_helpers.star" TEMPLATES_MODULE = "github.com/logos-co/wakurtosis/src/templates.star" -WSL_MODULE = "github.com/logos-co/wakurtosis/src/wsl.star" +WLS_MODULE = "github.com/logos-co/wakurtosis/src/wls.star" TEST_ARGUMENTS_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_arguments_parser.star" TEST_FILES_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_file_helpers.star" diff --git a/src/templates.star b/src/templates.star index a7031b7..cfc099e 100644 --- a/src/templates.star +++ b/src/templates.star @@ -33,10 +33,10 @@ def get_prometheus_template(): return template -# WSL -def get_wsl_template(): +# WLS +def get_wls_template(): # Traffic simulation parameters - wsl_yml_template = """ + wls_yml_template = """ general: debug_level : "DEBUG" @@ -68,4 +68,4 @@ def get_wsl_template(): inter_msg_type : {{.inter_msg_type}} """ - return wsl_yml_template + return wls_yml_template diff --git a/src/wsl.star b/src/wls.star similarity index 64% rename from src/wsl.star rename to src/wls.star index 8aa638b..e6a8434 100644 --- a/src/wsl.star +++ b/src/wls.star @@ -8,16 +8,16 @@ templates = import_module(vars.TEMPLATES_MODULE) def create_config(plan, wls_config): # Traffic simulation parameters - wsl_yml_template = templates.get_wsl_template() + wls_yml_template = templates.get_wls_template() artifact_id = plan.render_templates( config={ - vars.CONTAINER_WSL_CONFIGURATION_FILE_NAME: struct( - template=wsl_yml_template, + vars.CONTAINER_WLS_CONFIGURATION_FILE_NAME: struct( + template=wls_yml_template, data=wls_config, ) }, - name="wsl_config" + name="wls_config" ) return artifact_id @@ -34,20 +34,20 @@ def create_targets(plan, services): artifact_id = plan.render_templates( config={ - vars.CONTAINER_TARGETS_FILE_NAME_WSL: struct( + vars.CONTAINER_TARGETS_FILE_NAME_WLS: struct( template=template, data=template_data, ) }, - name="wsl_targets" + name="wls_targets" ) return artifact_id -def init(plan, services, wsl_config): +def init(plan, services, wls_config): # Generate simulation config - wsl_config = create_config(plan, wsl_config) + wls_config = create_config(plan, wls_config) tomls_artifact = plan.upload_files( src = vars.NODE_CONFIG_FILE_LOCATION, @@ -55,23 +55,23 @@ def init(plan, services, wsl_config): ) # Create targets.json - wsl_targets = create_targets(plan, services) + wls_targets = create_targets(plan, services) add_service_config = ServiceConfig( - image=vars.WSL_IMAGE, + image=vars.WLS_IMAGE, ports={}, files={ - vars.WSL_CONFIG_PATH: wsl_config, - vars.WSL_TARGETS_PATH: wsl_targets, - vars.WSL_TOMLS_PATH: tomls_artifact + vars.WLS_CONFIG_PATH: wls_config, + vars.WLS_TARGETS_PATH: wls_targets, + vars.WLS_TOMLS_PATH: tomls_artifact }, cmd=vars.WLS_CMD ) - wsl_service = plan.add_service( - service_name=vars.WSL_SERVICE_NAME, + wls_service = plan.add_service( + service_name=vars.WLS_SERVICE_NAME, config=add_service_config ) - return wsl_service + return wls_service diff --git a/wsl-module/README.md b/wls-module/README.md similarity index 95% rename from wsl-module/README.md rename to wls-module/README.md index db06f15..cf00cac 100644 --- a/wsl-module/README.md +++ b/wls-module/README.md @@ -1,4 +1,4 @@ -Wakurtosis Load Simualtor (WSL) +Wakurtosis Load Simualtor (WLS) =============================== Kurtosis: https://docs.kurtosis.com/ @@ -8,7 +8,7 @@ Kurtosis: https://docs.kurtosis.com/ To build docker image: `sh ./build.sh` -Name of the image is wsl:0.0.1 +Name of the image is wls:0.0.1 ### Parameters diff --git a/wls-module/build.sh b/wls-module/build.sh new file mode 100644 index 0000000..efdcccc --- /dev/null +++ b/wls-module/build.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# pip freeze > requirements.txt +image_id=$(docker images -q wls:0.0.1) +echo $image_id +docker image rm -f $image_id +docker image build --progress=plain -t wls:0.0.1 ./ diff --git a/wsl-module/config/wsl.yml b/wls-module/config/wsl.yml similarity index 100% rename from wsl-module/config/wsl.yml rename to wls-module/config/wsl.yml diff --git a/wsl-module/dockerfile b/wls-module/dockerfile similarity index 82% rename from wsl-module/dockerfile rename to wls-module/dockerfile index 4ccee00..37ee5ee 100644 --- a/wsl-module/dockerfile +++ b/wls-module/dockerfile @@ -1,7 +1,7 @@ FROM python:3.11.0 LABEL Maintainer="Daimakaimura" -WORKDIR /wsl -COPY wsl.py . +WORKDIR /wls +COPY wls.py . COPY rtnorm.py . COPY requirements.txt ./ RUN pip install -r requirements.txt \ No newline at end of file diff --git a/wsl-module/requirements.txt b/wls-module/requirements.txt similarity index 100% rename from wsl-module/requirements.txt rename to wls-module/requirements.txt diff --git a/wsl-module/rtnorm.py b/wls-module/rtnorm.py similarity index 100% rename from wsl-module/rtnorm.py rename to wls-module/rtnorm.py diff --git a/wsl-module/wsl.py b/wls-module/wsl.py similarity index 99% rename from wsl-module/wsl.py rename to wls-module/wsl.py index d1fc996..05ce35f 100644 --- a/wsl-module/wsl.py +++ b/wls-module/wsl.py @@ -17,7 +17,7 @@ """ Globals """ G_APP_NAME = 'WLS' G_LOG_LEVEL = 'DEBUG' -G_DEFAULT_CONFIG_FILE = './config/wsl.yml' +G_DEFAULT_CONFIG_FILE = './config/wls.yml' G_LOGGER = None """ Custom logging formatter """ diff --git a/wsl-module/build.sh b/wsl-module/build.sh deleted file mode 100644 index c662990..0000000 --- a/wsl-module/build.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -# pip freeze > requirements.txt -image_id=$(docker images -q wsl:0.0.1) -echo $image_id -docker image rm -f $image_id -docker image build --progress=plain -t wsl:0.0.1 ./ From 777f67b1b47bfad518693deab9dd2f6467f60649 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 16:57:57 +0100 Subject: [PATCH 043/112] Last wsl to wls changes --- wls-module/config/{wsl.yml => wls.yml} | 0 wls-module/{wsl.py => wls.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename wls-module/config/{wsl.yml => wls.yml} (100%) rename wls-module/{wsl.py => wls.py} (100%) diff --git a/wls-module/config/wsl.yml b/wls-module/config/wls.yml similarity index 100% rename from wls-module/config/wsl.yml rename to wls-module/config/wls.yml diff --git a/wls-module/wsl.py b/wls-module/wls.py similarity index 100% rename from wls-module/wsl.py rename to wls-module/wls.py From d5b433290cb76b2d66354e7a2c41518ccaf57a23 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 17:01:01 +0100 Subject: [PATCH 044/112] Removed unused system variable --- src/system_variables.star | 1 - 1 file changed, 1 deletion(-) diff --git a/src/system_variables.star b/src/system_variables.star index 8a1cac4..f5c977f 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -20,7 +20,6 @@ WAKU_LIBP2P_PORT_ID = "libp2p" WAKU_LIBP2P_PORT_PROTOCOL = "TCP" WAKU_LIBP2P_PORT = 60000 -WAKUNODE_CONFIGURATION_FILE_EXTENSION = ".toml" WAKUNODE_CONFIGURATION_FILE_FLAG = "--config-file=" WAKUNODE_PORT_SHIFT_FLAG = "--ports-shift=" NWAKU_ENTRYPOINT = "/usr/bin/wakunode --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0 --log-level=TRACE" From d8c3429bb60f630aad782f127a257f5ea6226640 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 20:46:23 +0100 Subject: [PATCH 045/112] Moved send json rpc to a new file --- src/call_protocols.star | 18 ++++++++++++++++++ src/system_variables.star | 1 + src/waku.star | 26 ++++---------------------- 3 files changed, 23 insertions(+), 22 deletions(-) create mode 100644 src/call_protocols.star diff --git a/src/call_protocols.star b/src/call_protocols.star new file mode 100644 index 0000000..71462c0 --- /dev/null +++ b/src/call_protocols.star @@ -0,0 +1,18 @@ + + +def send_json_rpc(plan, service_name, port_id, method, params, extract={}): + recipe = PostHttpRequestRecipe( + service_name=service_name, + port_id=port_id, + endpoint="", + content_type="application/json", + body='{ "jsonrpc": "2.0", "method": "' + method + '", "params": [' + params + '], "id": 1}', + extract=extract + ) + + response = plan.wait(recipe=recipe, + field="code", + assertion="==", + target_value=200) + + return response \ No newline at end of file diff --git a/src/system_variables.star b/src/system_variables.star index f5c977f..0b779fd 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -100,6 +100,7 @@ ARGUMENT_PARSER_MODULE = "github.com/logos-co/wakurtosis/src/arguments_parser.st FILE_HELPERS_MODULE = "github.com/logos-co/wakurtosis/src/file_helpers.star" TEMPLATES_MODULE = "github.com/logos-co/wakurtosis/src/templates.star" WLS_MODULE = "github.com/logos-co/wakurtosis/src/wls.star" +CALL_PROTOCOLS = "github.com/logos-co/wakurtosis/src/call_protocols.star" TEST_ARGUMENTS_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_arguments_parser.star" TEST_FILES_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_file_helpers.star" diff --git a/src/waku.star b/src/waku.star index 8a75cd9..79f598e 100644 --- a/src/waku.star +++ b/src/waku.star @@ -3,30 +3,13 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports files = import_module(vars.FILE_HELPERS_MODULE) - - -def send_json_rpc(plan, service_name, port_id, method, params, extract={}): - recipe = PostHttpRequestRecipe( - service_name=service_name, - port_id=port_id, - endpoint="", - content_type="application/json", - body='{ "jsonrpc": "2.0", "method": "' + method + '", "params": [' + params + '], "id": 1}', - extract=extract - ) - - response = plan.wait(recipe=recipe, - field="code", - assertion="==", - target_value=200) - - return response +call_protocols = import_module(vars.CALL_PROTOCOLS) def get_wakunode_peer_id(plan, service_name, port_id): extract = {"peer_id": '.result.listenAddresses | .[0] | split("/") | .[-1]'} - response = send_json_rpc(plan, service_name, port_id, + response = call_protocols.send_json_rpc(plan, service_name, port_id, vars.GET_WAKU_INFO_METHOD, "", extract) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -34,7 +17,6 @@ def get_wakunode_peer_id(plan, service_name, port_id): return response["extract.peer_id"] -# todo better name for this function def create_node_multiaddress(node_id, node_information): ip = node_information[vars.IP_KEY] port = node_information[vars.PORTS_KEY][vars.WAKU_LIBP2P_PORT_ID + "_" + node_id][0] @@ -52,7 +34,7 @@ def connect_wakunode_to_peers(plan, service_name, node_id, port_id, peer_ids): params = _merge_peer_ids(peer_ids) port_id = port_id + "_" + node_id - response = send_json_rpc(plan, service_name, port_id, method, params) + response = call_protocols.send_json_rpc(plan, service_name, port_id, method, params) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -70,7 +52,7 @@ def make_service_wait(plan, service_name, time): def get_waku_peers(plan, waku_service_name): extract = {"peers": '.result | length'} - response = send_json_rpc(plan, waku_service_name, vars.RPC_PORT_ID, + response = call_protocols.send_json_rpc(plan, waku_service_name, vars.RPC_PORT_ID, vars.GET_PEERS_METHOD, "", extract) plan.assert(value=response["code"], assertion="==", target_value=200) From e58a7aaf6b5f3809baca6a083e2452747075d81a Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 21 Feb 2023 21:08:53 +0100 Subject: [PATCH 046/112] Not having a gennet config should not be considered an error. --- gennet-module/gennet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gennet-module/gennet.py b/gennet-module/gennet.py index 11dbabd..1aeaabb 100755 --- a/gennet-module/gennet.py +++ b/gennet-module/gennet.py @@ -298,7 +298,7 @@ def conf_callback(ctx: typer.Context, param: typer.CallbackParam, cfile: str): conf = json.load(f) if "gennet" not in conf: print(f"Gennet configuration not found in {cfile}. Skipping topology generation.") - sys.exit(1) + sys.exit(0) if "general" in conf and "prng_seed" in conf["general"]: conf["gennet"]["prng_seed"] = conf["general"]["prng_seed"] # TODO : type-check and sanity-check the config.json From 605eb7fefd6ce03d1f5f0d0d53bf6bebf6c469c8 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 22 Feb 2023 16:47:40 +0100 Subject: [PATCH 047/112] Fixed typo en config.json and changed gennet output to match what is expected in starlark --- config/config.json | 2 +- gennet-module/gennet.py | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/config/config.json b/config/config.json index f78903d..cfbaca8 100644 --- a/config/config.json +++ b/config/config.json @@ -21,7 +21,7 @@ "output_dir": "network_data", "benchmark": "False" }, - "wsl": { + "wls": { "simulation_time": 60, "message_rate": 10, "min_packet_size": 2, diff --git a/gennet-module/gennet.py b/gennet-module/gennet.py index 1aeaabb..3f5a4b2 100755 --- a/gennet-module/gennet.py +++ b/gennet-module/gennet.py @@ -45,7 +45,8 @@ class networkType(Enum): NW_DATA_FNAME = "network_data.json" -NODE_PREFIX, SUBNET_PREFIX, CONTAINER_PREFIX = "node", "subnetwork", "container" +EXTERNAL_NODES_PREFIX, NODE_PREFIX, SUBNET_PREFIX, CONTAINER_PREFIX = \ + "nodes", "node", "subnetwork", "containers" ### I/O related fns ############################################################## @@ -256,6 +257,7 @@ def generate_and_write_files(ctx: typer, G): json_dump = {} json_dump[CONTAINER_PREFIX] = {} + json_dump[EXTERNAL_NODES_PREFIX] = {} inv = {} for key, val in node2container.items(): if val[1] not in inv: @@ -272,19 +274,19 @@ def generate_and_write_files(ctx: typer, G): # write the per node toml for the i^ith node of appropriate type node_type, i = node_types_enum[i], i+1 write_toml(ctx.params["output_dir"], node, generate_toml(topics, node_type)) - json_dump[node] = {} - json_dump[node]["static_nodes"] = [] + json_dump[EXTERNAL_NODES_PREFIX][node] = {} + json_dump[EXTERNAL_NODES_PREFIX][node]["static_nodes"] = [] for edge in G.edges(node): - json_dump[node]["static_nodes"].append(edge[1]) - json_dump[node][SUBNET_PREFIX] = node2subnet[node] - json_dump[node]["image"] = nodeTypeToDocker.get(node_type) + json_dump[EXTERNAL_NODES_PREFIX][node]["static_nodes"].append(edge[1]) + json_dump[EXTERNAL_NODES_PREFIX][node][SUBNET_PREFIX] = node2subnet[node] + json_dump[EXTERNAL_NODES_PREFIX][node]["image"] = nodeTypeToDocker.get(node_type) # the per node tomls will continue for now as they include topics - json_dump[node]["node_config"] = f"{node}.toml" + json_dump[EXTERNAL_NODES_PREFIX][node]["node_config"] = f"{node}.toml" # logs ought to continue as they need to be unique - json_dump[node]["node_log"] = f"{node}.log" + json_dump[EXTERNAL_NODES_PREFIX][node]["node_log"] = f"{node}.log" port_shift, cid = node2container[node] - json_dump[node]["port_shift"] = port_shift - json_dump[node]["container_id"] = cid + json_dump[EXTERNAL_NODES_PREFIX][node]["port_shift"] = port_shift + json_dump[EXTERNAL_NODES_PREFIX][node]["container_id"] = cid write_json(ctx.params["output_dir"], json_dump) # network wide json From 2fe5771e7a65c60ae9d3a24f79b1f2fe575c3963 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 22 Feb 2023 16:49:58 +0100 Subject: [PATCH 048/112] Deleted unnecessary variable in config.json --- config/config.json | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/config/config.json b/config/config.json index cfbaca8..6d5909f 100644 --- a/config/config.json +++ b/config/config.json @@ -6,11 +6,10 @@ "enclave_name": "wakurtosis", "topology_path": "./config/topology_generated/", "jobs": 4, - "interconnection_batch": 10, - "nodes_per_container": 5 + "interconnection_batch": 10 }, "gennet": { - "num_nodes": 9, + "num_nodes": 3, "num_topics": 1, "num_partitions": 1, "num_subnets": 1, From 76fc0c34d2dad83ee7674550fd8f5a4a20da5672 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Tue, 7 Feb 2023 16:04:25 +0200 Subject: [PATCH 049/112] Methods for nomos node interconnection --- src/nomos.star | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 src/nomos.star diff --git a/src/nomos.star b/src/nomos.star new file mode 100644 index 0000000..7074c84 --- /dev/null +++ b/src/nomos.star @@ -0,0 +1,82 @@ +# System Imports +system_variables = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +files = import_module(system_variables.FILE_HELPERS_MODULE) + + +def send_req(plan, service_id, port_id, endpoint, method, body, extract={}): + recipe = struct( + service_id=service_id, + port_id=port_id, + endpoint=endpoint, + method=method, + content_type="application/json", + body=body, + extract=extract + ) + + response = plan.wait(recipe=recipe, + field="code", + assertion="==", + target_value=200) + + return response + + +def get_nomos_peer_id(plan, service_id, port_id): + extract = {"peer_id": '.peer_id'} + + response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_INFO_URL + system_variables.NOMOS_NET_INFO_METHOD, "", extract) + + plan.assert(value=response["code"], assertion="==", target_value = 200) + + return response["extract.peer_id"] + + +def create_nomos_id(nomos_service_information): + nomos_service = nomos_service_information["service_info"] + + ip = nomos_service.ip_address + port = nomos_service.ports[system_variables.NOMOS_LIBP2P_PORT_ID].number + nomos_node_id = nomos_service_information["peer_id"] + + return '"/ip4/' + str(ip) + '/tcp/' + str(port) + '/p2p/' + nomos_node_id + '"' + + +def _merge_peer_ids(peer_ids): + return "[" + ",".join(peer_ids) + "]" + + +def connect_nomos_to_peers(plan, service_id, port_id, peer_ids): + method = system_variables.CONNECT_TO_PEER_METHOD + body = _merge_peer_ids(peer_ids) + + response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_CONN_URL + system_variables.NOMOS_NET_CONN_METHOD, body) + + plan.assert(value=response["code"], assertion="==", target_value = 200) + + plan.print(response) + + +def make_service_wait(plan,service_id, time): + exec_recipe = struct( + service_id=service_id, + command=["sleep", time] + ) + plan.exec(exec_recipe) + + + +def interconnect_nomos_nodes(plan, topology_information, services): + # Interconnect them + for nomos_service_id in services.keys(): + peers = topology_information[nomos_service_id]["static_nodes"] + + peer_ids = [create_nomos_id(services[peer]) for peer in peers] + + connect_nomos_to_peers(plan, nomos_service_id, system_variables.NOMOS_HTTP_PORT_ID, peer_ids) + + From 552f824d871805579aa390cc9b35a8347ec20937 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 14:10:11 +0200 Subject: [PATCH 050/112] Variable definitions for nomos node --- src/nomos.star | 5 ++--- src/system_variables.star | 12 ++++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/nomos.star b/src/nomos.star index 7074c84..be2cc90 100644 --- a/src/nomos.star +++ b/src/nomos.star @@ -28,7 +28,7 @@ def get_nomos_peer_id(plan, service_id, port_id): extract = {"peer_id": '.peer_id'} response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_INFO_URL - system_variables.NOMOS_NET_INFO_METHOD, "", extract) + "GET", "", extract) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -50,11 +50,10 @@ def _merge_peer_ids(peer_ids): def connect_nomos_to_peers(plan, service_id, port_id, peer_ids): - method = system_variables.CONNECT_TO_PEER_METHOD body = _merge_peer_ids(peer_ids) response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_CONN_URL - system_variables.NOMOS_NET_CONN_METHOD, body) + "POST", body) plan.assert(value=response["code"], assertion="==", target_value = 200) diff --git a/src/system_variables.star b/src/system_variables.star index 0b779fd..4de74ba 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -3,6 +3,7 @@ NWAKU_IMAGE = "statusteam/nim-waku:nwaku-trace" GOWAKU_IMAGE = "gowaku" RPC_PORT_ID = "rpc" + NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_generated/" CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" GENERAL_ENTRYPOINT = ["/bin/sh", "-c"] @@ -25,6 +26,16 @@ WAKUNODE_PORT_SHIFT_FLAG = "--ports-shift=" NWAKU_ENTRYPOINT = "/usr/bin/wakunode --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0 --log-level=TRACE" GOWAKU_ENTRYPOINT = "/usr/bin/waku --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0" +# Nomos Configuration +NOMOS_IMAGE = "nomos" +NOMOS_HTTP_PORT_ID = "http" +NOMOS_HTTP_PORT = 8080 +NOMOS_LIBP2P_PORT_ID = "libp2p" +NOMOS_LIBP2P_PORT = 3000 +NOMOS_SETUP_WAIT_TIME = "5" +NOMOS_NET_INFO_URL = "/network/info" +NOMOS_NET_CONN_URL = "/network/conn" + # Prometheus Configuration PROMETHEUS_IMAGE = "prom/prometheus:latest" PROMETHEUS_SERVICE_NAME = "prometheus" @@ -101,6 +112,7 @@ FILE_HELPERS_MODULE = "github.com/logos-co/wakurtosis/src/file_helpers.star" TEMPLATES_MODULE = "github.com/logos-co/wakurtosis/src/templates.star" WLS_MODULE = "github.com/logos-co/wakurtosis/src/wls.star" CALL_PROTOCOLS = "github.com/logos-co/wakurtosis/src/call_protocols.star" +NOMOS_MODULE = "github.com/logos-co/wakurtosis/src/nomos.star" TEST_ARGUMENTS_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_arguments_parser.star" TEST_FILES_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_file_helpers.star" From e93504f523f806d6110a2dbd610693560d6bb1f2 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 14:43:50 +0200 Subject: [PATCH 051/112] Nomos service initialization and information extraction --- src/node_builders.star | 179 ++++++++++++++++++++++++++++++++++++++ src/nomos.star | 4 +- src/system_variables.star | 2 + 3 files changed, 183 insertions(+), 2 deletions(-) create mode 100644 src/node_builders.star diff --git a/src/node_builders.star b/src/node_builders.star new file mode 100644 index 0000000..a6f345b --- /dev/null +++ b/src/node_builders.star @@ -0,0 +1,179 @@ +# System Imports +system_variables = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +waku = import_module(system_variables.WAKU_MODULE) +nomos = import_module(system_variables.NOMOS_MODULE) +files = import_module(system_variables.FILE_HELPERS_MODULE) + + +def prepare_nwaku_service(plan, nwakunode_name, all_services, use_general_configuration): + artifact_id, configuration_file = files.get_toml_configuration_artifact(plan, nwakunode_name, + use_general_configuration, + nwakunode_name) + + plan.print("Configuration being used file is " + configuration_file) + + add_service_config = ServiceConfig( + image=system_variables.NWAKU_IMAGE, + ports={ + system_variables.WAKU_RPC_PORT_ID: PortSpec(number=system_variables.WAKU_TCP_PORT, + transport_protocol="TCP"), + system_variables.PROMETHEUS_PORT_ID: PortSpec( + number=system_variables.PROMETHEUS_TCP_PORT, + transport_protocol="TCP"), + system_variables.WAKU_LIBP2P_PORT_ID: PortSpec( + number=system_variables.WAKU_LIBP2P_PORT, + transport_protocol="TCP"), + }, + files={ + system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id + }, + entrypoint=system_variables.NWAKU_ENTRYPOINT, + cmd=[ + "--config-file=" + system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION + "/" + configuration_file + ] + ) + + all_services[nwakunode_name] = add_service_config + + + +def prepare_gowaku_service(plan, gowakunode_name, all_services, use_general_configuration): + artifact_id, configuration_file = files.get_toml_configuration_artifact(plan, gowakunode_name, + use_general_configuration, + gowakunode_name) + + plan.print("Configuration being used file is " + configuration_file) + plan.print("Entrypoint is "+ str(system_variables.GOWAKU_ENTRYPOINT)) + + add_service_config = ServiceConfig( + image=system_variables.GOWAKU_IMAGE, + ports={ + system_variables.WAKU_RPC_PORT_ID: PortSpec(number=system_variables.WAKU_TCP_PORT, + transport_protocol="TCP"), + system_variables.PROMETHEUS_PORT_ID: PortSpec( + number=system_variables.PROMETHEUS_TCP_PORT, + transport_protocol="TCP"), + system_variables.WAKU_LIBP2P_PORT_ID: PortSpec( + number=system_variables.WAKU_LIBP2P_PORT, + transport_protocol="TCP"), + }, + files={ + system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id + }, + entrypoint=system_variables.GOWAKU_ENTRYPOINT, + cmd=[ + "--config-file=" + system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION + "/" + configuration_file + ] + ) + + all_services[gowakunode_name] = add_service_config + + +def prepare_nomos_service(plan, node_name, all_services, use_general_configuration): + plan.print("nomos") + artifact_id, configuration_file = files.get_toml_configuration_artifact(plan, node_name, + use_general_configuration, + node_name) + + plan.print("Configuration being used file is " + configuration_file) + plan.print("Entrypoint is "+ str(system_variables.NOMOS_ENTRYPOINT)) + + nomos_service_config = ServiceConfig( + image=system_variables.NOMOS_IMAGE, + ports={ + system_variables.NOMOS_HTTP_PORT_ID: PortSpec(number=system_variables.NOMOS_HTTP_PORT, + transport_protocol="TCP"), + system_variables.PROMETHEUS_PORT_ID: PortSpec( + number=system_variables.PROMETHEUS_TCP_PORT, + transport_protocol="TCP"), + system_variables.NOMOS_LIBP2P_PORT_ID: PortSpec( + number=system_variables.NOMOS_LIBP2P_PORT, + transport_protocol="TCP"), + }, + files={ + system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id + }, + entrypoint=system_variables.NOMOS_ENTRYPOINT, + cmd=[ + system_variables.NOMOS_CONTAINER_CONFIG_FILE_LOCATION + ] + ) + + all_services[node_name] + + +def instantiate_services(plan, network_topology, use_general_configuration): + """ + As we will need to access for the service information later, the structure is the following: + + services = { + "nwaku_0": { + "peer_id" : peer id of the node, as string, + "service_info": Kurtosis service struct, that has + "ip": ip of the service that is running the node, + "ports": Kurtosis PortSpec, that you can access with their respective identifier + }, + "nwaku_1": {...}, + "gowaku_": {...} + + } + + Example: + + service_peer_id = services["nwaku_0"]["peer_id"] + service_ip = services["nwaku_0"]["service_info"].ip_address + rpc_node_number = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].number + rpc_node_protocol = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].protocol + """ + + all_services = {} + + # Get up all nodes + for service_name in network_topology.keys(): + image = network_topology[service_name]["image"] + + service_builder = service_dispatcher[image] + + service_builder(plan, service_name, all_services, use_general_configuration) + + all_services_information = plan.add_services( + configs = all_services + ) + #services_information = _add_waku_service_information(plan, all_services_information) + services_information = _add_nomos_service_information(plan, all_services_information) + + return services_information + + +def _add_waku_service_information(plan, all_services_information): + + new_services_information = {} + + for service_name in all_services_information: + node_peer_id = waku.get_wakunode_peer_id(plan, service_name, system_variables.WAKU_RPC_PORT_ID) + + new_services_information[service_name] = {} + new_services_information[service_name]["peer_id"] = node_peer_id + new_services_information[service_name]["service_info"] = all_services_information[service_name] + + return new_services_information + + +def _add_nomos_service_information(plan, services_information, new_service_id, service_information): + new_service_information = {} + + nomos_peer_id = nomos.get_nomos_peer_id(plan, new_service_id, system_variables.NOMOS_HTTP_PORT_ID) + + new_service_information["peer_id"] = nomos_peer_id + new_service_information["service_info"] = service_information + + services_information[new_service_id] = new_service_information + + +service_dispatcher = { + "go-waku": prepare_gowaku_service, + "nim-waku": prepare_nwaku_service, + "nomos": prepare_nomos_service +} diff --git a/src/nomos.star b/src/nomos.star index be2cc90..04f8c27 100644 --- a/src/nomos.star +++ b/src/nomos.star @@ -27,7 +27,7 @@ def send_req(plan, service_id, port_id, endpoint, method, body, extract={}): def get_nomos_peer_id(plan, service_id, port_id): extract = {"peer_id": '.peer_id'} - response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_INFO_URL + response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_INFO_URL, "GET", "", extract) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -52,7 +52,7 @@ def _merge_peer_ids(peer_ids): def connect_nomos_to_peers(plan, service_id, port_id, peer_ids): body = _merge_peer_ids(peer_ids) - response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_CONN_URL + response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_CONN_URL, "POST", body) plan.assert(value=response["code"], assertion="==", target_value = 200) diff --git a/src/system_variables.star b/src/system_variables.star index 4de74ba..3672129 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -25,6 +25,8 @@ WAKUNODE_CONFIGURATION_FILE_FLAG = "--config-file=" WAKUNODE_PORT_SHIFT_FLAG = "--ports-shift=" NWAKU_ENTRYPOINT = "/usr/bin/wakunode --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0 --log-level=TRACE" GOWAKU_ENTRYPOINT = "/usr/bin/waku --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0" +NOMOS_ENTRYPOINT = ["/usr/bin/nomos-node"] +NOMOS_CONTAINER_CONFIG_FILE_LOCATION = '/etc/nomos/config.yml' # Nomos Configuration NOMOS_IMAGE = "nomos" From cb81c8676bbaa59fc54611c6d0edc84f3e092e83 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 14:45:39 +0200 Subject: [PATCH 052/112] Gennet and WSL changes for nomos node --- gennet-module/gennet.py | 14 +- src/wsl.star | 77 +++++++++++ wls-module/wsl_nomos.py | 279 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 367 insertions(+), 3 deletions(-) create mode 100644 src/wsl.star create mode 100644 wls-module/wsl_nomos.py diff --git a/gennet-module/gennet.py b/gennet-module/gennet.py index 3f5a4b2..a1e8385 100755 --- a/gennet-module/gennet.py +++ b/gennet-module/gennet.py @@ -19,8 +19,9 @@ # To add a new node type, add appropriate entries to the nodeType and nodeTypeSwitch class nodeType(Enum): - NWAKU = "nwaku" # waku desktop config - GOWAKU = "gowaku" # waku mobile config + NWAKU = "nwaku" # waku desktop config + GOWAKU = "gowaku" # waku mobile config + NOMOS = "nomos" # incompatible with waku nodes nodeTypeToToml = { @@ -30,9 +31,16 @@ class nodeType(Enum): nodeTypeToDocker = { nodeType.NWAKU: "nim-waku", - nodeType.GOWAKU: "go-waku" + nodeType.GOWAKU: "go-waku", + nodeType.NOMOS: "nomos" } +<<<<<<< HEAD +======= +#NODES = [nodeType.NWAKU, nodeType.GOWAKU, nodeType.NOMOS] +#NODE_PROBABILITIES = (0, 0, 100) + +>>>>>>> b6a76d5 (Gennet and WSL changes for nomos node) # To add a new network type, add appropriate entries to the networkType and networkTypeSwitch # the networkTypeSwitch is placed before generate_network(): fwd declaration mismatch with typer/python :/ class networkType(Enum): diff --git a/src/wsl.star b/src/wsl.star new file mode 100644 index 0000000..3fd4394 --- /dev/null +++ b/src/wsl.star @@ -0,0 +1,77 @@ +# System Imports +system_variables = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +files = import_module(system_variables.FILE_HELPERS_MODULE) +templates = import_module(system_variables.TEMPLATES_MODULE) + +def create_config(plan, wls_config): + + # Traffic simulation parameters + wsl_yml_template = templates.get_wsl_template() + + artifact_id = plan.render_templates( + config={ + system_variables.CONTAINER_WSL_CONFIGURATION_FILE_NAME: struct( + template=wsl_yml_template, + data=wls_config, + ) + }, + name="wsl_config" + ) + + return artifact_id + +def create_targets(plan, services): + + # Get private ip and ports of all nodes + template_data = files.generate_template_node_targets(services, system_variables.NOMOS_HTTP_PORT_ID) + + # Template + template = """ + {{.targets}} + """ + + artifact_id = plan.render_templates( + config={ + system_variables.CONTAINER_TARGETS_FILE_NAME_WSL: struct( + template=template, + data=template_data, + ) + }, + name="wsl_targets" + ) + + return artifact_id + +def init(plan, services, wsl_config): + + # Generate simulation config + wsl_config = create_config(plan, wsl_config) + + tomls_artifact = plan.upload_files( + src = system_variables.NODE_CONFIG_FILE_LOCATION, + name = "tomls_artifact", + ) + + # Create targets.json + wsl_targets = create_targets(plan, services) + + + add_service_config = ServiceConfig( + image=system_variables.WSL_IMAGE, + ports={}, + files={ + system_variables.WSL_CONFIG_PATH: wsl_config, + system_variables.WSL_TARGETS_PATH: wsl_targets, + system_variables.WSL_TOMLS_PATH: tomls_artifact + }, + cmd=["python3", "wsl.py"] + ) + wsl_service = plan.add_service( + service_name=system_variables.WSL_SERVICE_NAME, + config=add_service_config + ) + + return wsl_service + diff --git a/wls-module/wsl_nomos.py b/wls-module/wsl_nomos.py new file mode 100644 index 0000000..cc5eab7 --- /dev/null +++ b/wls-module/wsl_nomos.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python3 +""" +Description: Wakurtosis load simulator + +""" + +""" Dependencies """ +import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob +import requests +import rtnorm +# from pathlib import Path +# import numpy as np +# import pandas as pd +# import matplotlib.pyplot as plt +# import cloudpickle as pickle + +""" Globals """ +G_APP_NAME = 'WLS' +G_LOG_LEVEL = 'DEBUG' +G_DEFAULT_CONFIG_FILE = './config/wsl.yml' +G_LOGGER = None + +""" Custom logging formatter """ +class CustomFormatter(logging.Formatter): + + # Set different formats for every logging level + time_name_stamp = "[%(asctime)s.%(msecs)03d] [" + G_APP_NAME + "]" + FORMATS = { + logging.ERROR: time_name_stamp + " ERROR in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", + logging.WARNING: time_name_stamp + " WARNING - %(msg)s", + logging.CRITICAL: time_name_stamp + " CRITICAL in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", + logging.INFO: time_name_stamp + " %(msg)s", + logging.DEBUG: time_name_stamp + " %(funcName)s() %(msg)s", + 'DEFAULT': time_name_stamp + " %(msg)s", + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT']) + formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') + return formatter.format(record) + +def check_nomos_node(node_address): + url = node_address + "network/info" + + try: + response = requests.get(url) + except Exception as e: + G_LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + try: + response_obj = response.json() + except Exception as e: + G_LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + G_LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) + + return True + +def add_nomos_tx(node_address, tx): + url = node_address + "mempool/addtx" + + try: + response = requests.post(url, data=json.dumps(tx), headers={'content-type': 'application/json'}) + except Exception as e: + G_LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + G_LOGGER.debug('Response from %s: %s' %(url, response.text)) + + return True + +def get_nomos_mempool_metrics(node_address): + url = node_address + "mempool/metrics" + + try: + response = requests.get(url) + except Exception as e: + G_LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + try: + response_obj = response.json() + except Exception as e: + G_LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + G_LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) + + return True + + +# Generate a random interval using a Poisson distribution +def poisson_interval(rate): + return random.expovariate(rate) + +def make_payload(size): + payload = hex(random.getrandbits(4*size)) + G_LOGGER.debug('Payload of size %d bytes: %s' %(size, payload)) + return payload + +def make_payload_dist(dist_type, min_size, max_size): + + # Check if min and max packet sizes are the same + if min_size == max_size: + G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' %min_size) + return make_payload(min_size) + + # Payload sizes are even integers uniformly distributed in [min_size, max_size] + if dist_type == 'uniform': + size = int(random.uniform(min_size, max_size)) + + # Reject non even sizes + while(size % 2) != 0: + size = int(random.uniform(min_size, max_size)) + + return make_payload(size) + + # Payload sizes are even integers ~"normally" distributed in [min_size, max_size] + if dist_type == 'gaussian': + σ = (max_size - min_size) / 5. + μ = (max_size - min_size) / 2. + size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) + + # Reject non even sizes + while(size % 2) != 0: + size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) + + return make_payload(size) + + G_LOGGER.error('Unknown distribution type %s') + + return '0x00' + +def parse_targets(enclave_dump_path, waku_port=8545): + + targets = [] + + G_LOGGER.info('Extracting Waku node addresses from Kurtosus enclance dump in %s' %enclave_dump_path) + + for path_obj in os.walk(enclave_dump_path): + if 'waku_' in path_obj[0]: + with open(path_obj[0] + '/spec.json', "r") as read_file: + spec_obj = json.load(read_file) + network_settings = spec_obj['NetworkSettings'] + waku_address = network_settings['Ports']['%d/tcp' %waku_port] + targets.append('%s:%s' %(waku_address[0]['HostIp'], waku_address[0]['HostPort'])) + + G_LOGGER.info('Parsed %d Waku nodes' %len(targets)) + + return targets + +def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): + + if inter_msg_type == 'poisson': + return poisson_interval(msg_rate) + + if inter_msg_type == 'uniform': + return simulation_time / msg_rate + + G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' %inter_msg_type) + sys.exit() + +def get_all_messages_from_node_from_topic(node_address, topic): + + page_cnt = 0 + msg_cnt = 0 + + # Retrieve the first page + response, elapsed = get_waku_msgs(node_address, topic) + if 'error' in response: + G_LOGGER.error(response['error']) + return 0 + + messages = response['result']['messages'] + msg_cnt += len(messages) + G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic)) + + for msg_idx, msg in enumerate(messages): + # Decode the payload + payload_obj = json.loads(''.join(map(chr, msg['payload']))) + + # Retrieve further pages + while(response['result']['pagingOptions']): + page_cnt += 1 + cursor = response['result']['pagingOptions']['cursor'] + index = {"digest" : cursor['digest'], "receivedTime" : cursor['receiverTime']} + response, elapsed = get_waku_msgs(node_address, topic, cursor) + if 'error' in response: + G_LOGGER.error(response['error']) + break + + messages = response['result']['messages'] + msg_cnt += len(messages) + G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic)) + + for msg_idx, msg in enumerate(messages): + # Decode the payload + payload_obj = json.loads(''.join(map(chr, msg['payload']))) + + return msg_cnt + +def main(): + + global G_LOGGER + + """ Init Logging """ + G_LOGGER = logging.getLogger(G_APP_NAME) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(CustomFormatter()) + G_LOGGER.addHandler(handler) + + G_LOGGER.info('Started') + + """ Parse command line args. """ + parser = argparse.ArgumentParser() + parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", default=G_DEFAULT_CONFIG_FILE) + args = parser.parse_args() + + config_file = args.config_file + + """ Load config file """ + try: + with open(config_file, 'r') as f: + config = yaml.safe_load(f) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + # Set loglevel from config + G_LOGGER.setLevel(config['general']['debug_level']) + handler.setLevel(config['general']['debug_level']) + + G_LOGGER.debug(config) + G_LOGGER.info('Configuration loaded from %s' %config_file) + + # Set RPNG seed from config + random.seed(config['general']['prng_seed']) + + """ Load targets """ + try: + with open(config['general']['targets_file'], 'r') as read_file: + targets = json.load(read_file) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + if len(targets) == 0: + G_LOGGER.error('Cannot find valid targets. Aborting.') + sys.exit(1) + + G_LOGGER.debug(targets) + G_LOGGER.info('%d targets loaded' %len(targets)) + + """ Check all nodes are reachable """ + for i, target in enumerate(targets): + if not check_nomos_node('http://%s/' %target): + G_LOGGER.error('Node %d (%s) is not online. Aborted.' %(i, target)) + sys.exit(1) + G_LOGGER.info('All %d Waku nodes are reachable.' %len(targets)) + + G_LOGGER.info('Tx addition start time: %d' %int(time.time())) + """ Add new transaction to every node """ + for i, target in enumerate(targets): + if not add_nomos_tx('http://%s/' %target, 'tx%s' %i): + G_LOGGER.error('Unable to add new tx. Node %d (%s).' %(i, target)) + + """ Collect mempool metrics from nodes """ + for i, target in enumerate(targets): + if not get_nomos_mempool_metrics('http://%s/' %target): + G_LOGGER.error('Unable to add new tx. Node %d (%s).' %(i, target)) + + """ We are done """ + G_LOGGER.info('Ended') + +if __name__ == "__main__": + + main() From 2a3b5e552df028461b7bb420a9b48fcd2c7d587c Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sun, 12 Feb 2023 23:36:16 +0200 Subject: [PATCH 053/112] Update to Kurtosis 66.2 --- src/node_builders.star | 17 ++++++++++------- src/nomos.star | 31 ++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/src/node_builders.star b/src/node_builders.star index a6f345b..214d873 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -101,7 +101,7 @@ def prepare_nomos_service(plan, node_name, all_services, use_general_configurati ] ) - all_services[node_name] + all_services[node_name] = nomos_service_config def instantiate_services(plan, network_topology, use_general_configuration): @@ -161,15 +161,18 @@ def _add_waku_service_information(plan, all_services_information): return new_services_information -def _add_nomos_service_information(plan, services_information, new_service_id, service_information): - new_service_information = {} +def _add_nomos_service_information(plan, all_services_information): - nomos_peer_id = nomos.get_nomos_peer_id(plan, new_service_id, system_variables.NOMOS_HTTP_PORT_ID) + new_services_information = {} - new_service_information["peer_id"] = nomos_peer_id - new_service_information["service_info"] = service_information + for service_name in all_services_information: + node_peer_id = nomos.get_nomos_peer_id(plan, service_name, system_variables.NOMOS_HTTP_PORT_ID) - services_information[new_service_id] = new_service_information + new_services_information[service_name] = {} + new_services_information[service_name]["peer_id"] = node_peer_id + new_services_information[service_name]["service_info"] = all_services_information[service_name] + + return new_services_information service_dispatcher = { diff --git a/src/nomos.star b/src/nomos.star index 04f8c27..ee7083f 100644 --- a/src/nomos.star +++ b/src/nomos.star @@ -5,12 +5,27 @@ system_variables = import_module("github.com/logos-co/wakurtosis/src/system_vari files = import_module(system_variables.FILE_HELPERS_MODULE) -def send_req(plan, service_id, port_id, endpoint, method, body, extract={}): - recipe = struct( - service_id=service_id, +def get_req(plan, service_name, port_id, endpoint, extract={}): + recipe = GetHttpRequestRecipe( + service_name=service_name, + port_id=port_id, + endpoint=endpoint, + extract=extract + ) + + response = plan.wait(recipe=recipe, + field="code", + assertion="==", + target_value=200) + + return response + + +def post_req(plan, service_name, port_id, endpoint, body, extract={}): + recipe = PostHttpRequestRecipe( + service_name=service_name, port_id=port_id, endpoint=endpoint, - method=method, content_type="application/json", body=body, extract=extract @@ -24,11 +39,10 @@ def send_req(plan, service_id, port_id, endpoint, method, body, extract={}): return response -def get_nomos_peer_id(plan, service_id, port_id): +def get_nomos_peer_id(plan, service_name, port_id): extract = {"peer_id": '.peer_id'} - response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_INFO_URL, - "GET", "", extract) + response = get_req(plan, service_name, port_id, system_variables.NOMOS_NET_INFO_URL, extract) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -52,8 +66,7 @@ def _merge_peer_ids(peer_ids): def connect_nomos_to_peers(plan, service_id, port_id, peer_ids): body = _merge_peer_ids(peer_ids) - response = send_req(plan, service_id, port_id, system_variables.NOMOS_NET_CONN_URL, - "POST", body) + response = post_req(plan, service_id, port_id, system_variables.NOMOS_NET_CONN_URL, body) plan.assert(value=response["code"], assertion="==", target_value = 200) From 34d903e796b720377c6f5641be4cac147dc771a2 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 14:46:47 +0200 Subject: [PATCH 054/112] Extend nomos wsl and add visualizations --- wls-module/nomos.py | 55 ++++++++++ wls-module/wsl_nomos.py | 237 +++++++++++++++++++--------------------- 2 files changed, 167 insertions(+), 125 deletions(-) create mode 100644 wls-module/nomos.py diff --git a/wls-module/nomos.py b/wls-module/nomos.py new file mode 100644 index 0000000..04b4d35 --- /dev/null +++ b/wls-module/nomos.py @@ -0,0 +1,55 @@ +import json +import matplotlib.pyplot as plt +import networkx as nx +from PIL import Image + +# Histogram of time delta in millis of tx being sent +# and received by all nodes. +def hist_delta(name, iterations): + results = [] + for iteration in iterations: + iteration_results = [result["delta"] for result in iteration["results"]] + results.extend(iteration_results) + + plt.hist(results, bins=20) + plt.xlabel("delta in (ms)") + plt.ylabel("Frequency") + plt.title("TX dissemination over network") + plt.savefig(name) + plt.close() + +def network_graph(name, topology): + G = nx.DiGraph() + for node_name, node_data in topology.items(): + G.add_node(node_name) + for node_name, node_data in topology.items(): + for connection in node_data["static_nodes"]: + G.add_edge(node_name, connection) + + nx.draw(G, with_labels=True) + plt.savefig(name) + plt.close() + +def concat_images(name, images): + images = [Image.open(image) for image in images] + + # Get the width and height of the first image + widths, heights = zip(*(i.size for i in images)) + + # Calculate the total width and height of the collage + total_width = sum(widths) + max_height = max(heights) + + # Create a new image with the calculated size + collage = Image.new('RGB', (total_width, max_height)) + + # Paste the images into the collage + x_offset = 0 + for image in images: + collage.paste(image, (x_offset, 0)) + x_offset += image.size[0] + + # Save the collage + collage.save(name) + + diff --git a/wls-module/wsl_nomos.py b/wls-module/wsl_nomos.py index cc5eab7..f7adbfb 100644 --- a/wls-module/wsl_nomos.py +++ b/wls-module/wsl_nomos.py @@ -8,6 +8,7 @@ import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob import requests import rtnorm +import nomos # from pathlib import Path # import numpy as np # import pandas as pd @@ -18,6 +19,7 @@ G_APP_NAME = 'WLS' G_LOG_LEVEL = 'DEBUG' G_DEFAULT_CONFIG_FILE = './config/wsl.yml' +G_DEFAULT_TOPOLOGY_FILE = './tomls/network_data.json' G_LOGGER = None """ Custom logging formatter """ @@ -67,139 +69,30 @@ def add_nomos_tx(node_address, tx): G_LOGGER.debug('%s: %s' % (e.__doc__, e)) return False - G_LOGGER.debug('Response from %s: %s' %(url, response.text)) + if len(response.text) > 0: + G_LOGGER.debug('Response from %s: %s' %(url, response.text)) + return False return True -def get_nomos_mempool_metrics(node_address): +def get_nomos_mempool_metrics(node_address, iteration_s): url = node_address + "mempool/metrics" try: response = requests.get(url) except Exception as e: G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return False + return "error", -1 try: response_obj = response.json() except Exception as e: G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return False - + return "error", -1 G_LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) + time_e = int(time.time() * 1000) - return True - - -# Generate a random interval using a Poisson distribution -def poisson_interval(rate): - return random.expovariate(rate) - -def make_payload(size): - payload = hex(random.getrandbits(4*size)) - G_LOGGER.debug('Payload of size %d bytes: %s' %(size, payload)) - return payload - -def make_payload_dist(dist_type, min_size, max_size): - - # Check if min and max packet sizes are the same - if min_size == max_size: - G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' %min_size) - return make_payload(min_size) - - # Payload sizes are even integers uniformly distributed in [min_size, max_size] - if dist_type == 'uniform': - size = int(random.uniform(min_size, max_size)) - - # Reject non even sizes - while(size % 2) != 0: - size = int(random.uniform(min_size, max_size)) - - return make_payload(size) - - # Payload sizes are even integers ~"normally" distributed in [min_size, max_size] - if dist_type == 'gaussian': - σ = (max_size - min_size) / 5. - μ = (max_size - min_size) / 2. - size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - - # Reject non even sizes - while(size % 2) != 0: - size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - - return make_payload(size) - - G_LOGGER.error('Unknown distribution type %s') - - return '0x00' - -def parse_targets(enclave_dump_path, waku_port=8545): - - targets = [] - - G_LOGGER.info('Extracting Waku node addresses from Kurtosus enclance dump in %s' %enclave_dump_path) - - for path_obj in os.walk(enclave_dump_path): - if 'waku_' in path_obj[0]: - with open(path_obj[0] + '/spec.json', "r") as read_file: - spec_obj = json.load(read_file) - network_settings = spec_obj['NetworkSettings'] - waku_address = network_settings['Ports']['%d/tcp' %waku_port] - targets.append('%s:%s' %(waku_address[0]['HostIp'], waku_address[0]['HostPort'])) - - G_LOGGER.info('Parsed %d Waku nodes' %len(targets)) - - return targets - -def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): - - if inter_msg_type == 'poisson': - return poisson_interval(msg_rate) - - if inter_msg_type == 'uniform': - return simulation_time / msg_rate - - G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' %inter_msg_type) - sys.exit() - -def get_all_messages_from_node_from_topic(node_address, topic): - - page_cnt = 0 - msg_cnt = 0 - - # Retrieve the first page - response, elapsed = get_waku_msgs(node_address, topic) - if 'error' in response: - G_LOGGER.error(response['error']) - return 0 - - messages = response['result']['messages'] - msg_cnt += len(messages) - G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic)) - - for msg_idx, msg in enumerate(messages): - # Decode the payload - payload_obj = json.loads(''.join(map(chr, msg['payload']))) - - # Retrieve further pages - while(response['result']['pagingOptions']): - page_cnt += 1 - cursor = response['result']['pagingOptions']['cursor'] - index = {"digest" : cursor['digest'], "receivedTime" : cursor['receiverTime']} - response, elapsed = get_waku_msgs(node_address, topic, cursor) - if 'error' in response: - G_LOGGER.error(response['error']) - break - - messages = response['result']['messages'] - msg_cnt += len(messages) - G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic)) - - for msg_idx, msg in enumerate(messages): - # Decode the payload - payload_obj = json.loads(''.join(map(chr, msg['payload']))) - - return msg_cnt + return response_obj, time_e - iteration_s def main(): @@ -216,9 +109,11 @@ def main(): """ Parse command line args. """ parser = argparse.ArgumentParser() parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", default=G_DEFAULT_CONFIG_FILE) + parser.add_argument("-topo", "--topology_file", help="Topology file", action="store_true", default=G_DEFAULT_TOPOLOGY_FILE) args = parser.parse_args() config_file = args.config_file + topology_file = args.topology_file """ Load config file """ try: @@ -246,13 +141,20 @@ def main(): G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() + try: + with open(topology_file) as read_file: + topology = json.load(read_file) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + if len(targets) == 0: G_LOGGER.error('Cannot find valid targets. Aborting.') sys.exit(1) G_LOGGER.debug(targets) G_LOGGER.info('%d targets loaded' %len(targets)) - + """ Check all nodes are reachable """ for i, target in enumerate(targets): if not check_nomos_node('http://%s/' %target): @@ -260,16 +162,101 @@ def main(): sys.exit(1) G_LOGGER.info('All %d Waku nodes are reachable.' %len(targets)) - G_LOGGER.info('Tx addition start time: %d' %int(time.time())) + """ Start simulation """ + msg_cnt = 0 + failed_addtx_cnt = 0 + failed_metrics_cnt = 0 + bytes_cnt = 0 + s_time = time.time() + last_msg_time = 0 + next_time_to_msg = 0 + failed_dissemination_cnt = 0 + batch_size = 40 + iterations = [] + tx_id = 0 + + G_LOGGER.info('Tx addition start time: %d' %int(round(time.time() * 1000))) """ Add new transaction to every node """ for i, target in enumerate(targets): - if not add_nomos_tx('http://%s/' %target, 'tx%s' %i): - G_LOGGER.error('Unable to add new tx. Node %d (%s).' %(i, target)) + iteration_s = int(time.time() * 1000) + last_tx_sent = iteration_s + + tx_id = tx_id + msg_cnt+failed_addtx_cnt+1 + for j in range(batch_size): + tx_id += j + tx_target = random.choice(targets) + G_LOGGER.debug('sending tx_id: %s to target: %s' %(tx_id, tx_target)) + + if not add_nomos_tx('http://%s/' %tx_target, 'tx%s' %tx_id): + G_LOGGER.error('Unable to add new tx. Node %s.' %(tx_target)) + failed_addtx_cnt += 1 + continue + + last_tx_sent = int(time.time() * 1000) + msg_cnt += 1 + + time.sleep(1.5) + + results = [] + """ Collect mempool metrics from nodes """ + for n, target in enumerate(targets): + res, t = get_nomos_mempool_metrics('http://%s/' %target, iteration_s) + if 'error' in res: + G_LOGGER.error('Unable to pull metrics. Node %d (%s).' %(n, target)) + failed_metrics_cnt += 1 + continue + + is_ok = True + delta = res['last_tx'] - last_tx_sent + start_finish = res['last_tx'] - iteration_s + + # Tolerate one second difference between finish and start times. + if -1000 < delta < 0: + delta = 0 + + if delta < 0: + G_LOGGER.error('delta should be gt that zero: %d' %delta) + delta = -1 + + G_LOGGER.debug('should be %s' %msg_cnt) + if res['pending_tx'] != msg_cnt: + delta = -1 + is_ok = False + failed_dissemination_cnt += 1 + + results.append({ + "node": n, + "is_ok": is_ok, + "delta": delta, + "start_finish": start_finish + }) + + iterations.append({ + "iteration": iteration_s, + "results": results + }) + + stats = { + "msg_cnt": msg_cnt, + "failed_addtx_cnt": failed_addtx_cnt, + "failed_metrics_cnt": failed_metrics_cnt, + "failed_dissemination_cnt": failed_dissemination_cnt, + "batch_size": batch_size, + "bytes_cnt": bytes_cnt, + "s_time": s_time, + "last_msg_time": last_msg_time, + "next_time_to_msg": next_time_to_msg, + "iterations": iterations, + } - """ Collect mempool metrics from nodes """ - for i, target in enumerate(targets): - if not get_nomos_mempool_metrics('http://%s/' %target): - G_LOGGER.error('Unable to add new tx. Node %d (%s).' %(i, target)) + G_LOGGER.info("Results: %s" %json.dumps(stats)) + + with open('./summary.json', 'w') as summary_file: + summary_file.write(json.dumps(stats, indent=4)) + + nomos.network_graph("1.png", topology) + nomos.hist_delta("2.png", stats['iterations']) + nomos.concat_images("collage.png", ["1.png", "2.png"]) """ We are done """ G_LOGGER.info('Ended') From 2df81532ece95bd9ea5a87fb20602a7da235f72c Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 14:48:17 +0200 Subject: [PATCH 055/112] Simplify dockerfiles and mark files as executable --- build.sh | 0 gennet-module/.dockerignore | 0 gennet-module/Dockerfile | 2 +- run.sh | 0 wls-module/.dockerignore | 2 ++ wls-module/dockerfile | 6 ++---- wsl-module/build.sh | 6 ++++++ 7 files changed, 11 insertions(+), 5 deletions(-) mode change 100644 => 100755 build.sh create mode 100644 gennet-module/.dockerignore mode change 100644 => 100755 run.sh create mode 100644 wls-module/.dockerignore create mode 100755 wsl-module/build.sh diff --git a/build.sh b/build.sh old mode 100644 new mode 100755 diff --git a/gennet-module/.dockerignore b/gennet-module/.dockerignore new file mode 100644 index 0000000..e69de29 diff --git a/gennet-module/Dockerfile b/gennet-module/Dockerfile index 027106a..c301908 100644 --- a/gennet-module/Dockerfile +++ b/gennet-module/Dockerfile @@ -21,7 +21,7 @@ COPY --from=build-image /opt/venv /opt/venv # Copy the gennet files to the production image WORKDIR /gennet -COPY Dockerfile batch_gen.sh gennet.py requirements.txt Readme.md ./ +COPY . . # Deploy the virtualenv in production image ENV PATH="/opt/venv/bin:$PATH" diff --git a/run.sh b/run.sh old mode 100644 new mode 100755 diff --git a/wls-module/.dockerignore b/wls-module/.dockerignore new file mode 100644 index 0000000..4b60fd2 --- /dev/null +++ b/wls-module/.dockerignore @@ -0,0 +1,2 @@ +build.sh +README.md diff --git a/wls-module/dockerfile b/wls-module/dockerfile index 37ee5ee..98e6041 100644 --- a/wls-module/dockerfile +++ b/wls-module/dockerfile @@ -1,7 +1,5 @@ FROM python:3.11.0 LABEL Maintainer="Daimakaimura" WORKDIR /wls -COPY wls.py . -COPY rtnorm.py . -COPY requirements.txt ./ -RUN pip install -r requirements.txt \ No newline at end of file +COPY . . +RUN pip install -r requirements.txt diff --git a/wsl-module/build.sh b/wsl-module/build.sh new file mode 100755 index 0000000..c662990 --- /dev/null +++ b/wsl-module/build.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# pip freeze > requirements.txt +image_id=$(docker images -q wsl:0.0.1) +echo $image_id +docker image rm -f $image_id +docker image build --progress=plain -t wsl:0.0.1 ./ From b469a63ccb8ddf96391592b92da8b45e905a4648 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 14:51:52 +0200 Subject: [PATCH 056/112] Make node init code agnostic to the node type --- main.star | 1 + src/node_builders.star | 70 +++++++++++++++++++++-------------- src/nomos.star | 15 +++----- src/system_variables.star | 2 +- src/wsl.star | 77 --------------------------------------- 5 files changed, 51 insertions(+), 114 deletions(-) delete mode 100644 src/wsl.star diff --git a/main.star b/main.star index 49380ad..3ec61e7 100644 --- a/main.star +++ b/main.star @@ -8,6 +8,7 @@ grafana = import_module(vars.GRAFANA_MODULE) args_parser = import_module(vars.ARGUMENT_PARSER_MODULE) wls = import_module(vars.WLS_MODULE) nodes = import_module(vars.NODE_BUILDERS_MODULE) +nomos = import_module(vars.NOMOS_MODULE) def run(plan, args): diff --git a/src/node_builders.star b/src/node_builders.star index 214d873..1a12376 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -2,9 +2,9 @@ system_variables = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports -waku = import_module(system_variables.WAKU_MODULE) -nomos = import_module(system_variables.NOMOS_MODULE) -files = import_module(system_variables.FILE_HELPERS_MODULE) +waku = import_module(vars.WAKU_MODULE) +nomos = import_module(vars.NOMOS_MODULE) +files = import_module(vars.FILE_HELPERS_MODULE) def prepare_nwaku_service(plan, nwakunode_name, all_services, use_general_configuration): @@ -78,26 +78,26 @@ def prepare_nomos_service(plan, node_name, all_services, use_general_configurati node_name) plan.print("Configuration being used file is " + configuration_file) - plan.print("Entrypoint is "+ str(system_variables.NOMOS_ENTRYPOINT)) + plan.print("Entrypoint is "+ str(vars.NOMOS_ENTRYPOINT)) nomos_service_config = ServiceConfig( - image=system_variables.NOMOS_IMAGE, + image=vars.NOMOS_IMAGE, ports={ - system_variables.NOMOS_HTTP_PORT_ID: PortSpec(number=system_variables.NOMOS_HTTP_PORT, + vars.NOMOS_HTTP_PORT_ID: PortSpec(number=vars.NOMOS_HTTP_PORT, transport_protocol="TCP"), - system_variables.PROMETHEUS_PORT_ID: PortSpec( - number=system_variables.PROMETHEUS_TCP_PORT, + vars.PROMETHEUS_PORT_ID: PortSpec( + number=vars.PROMETHEUS_TCP_PORT, transport_protocol="TCP"), - system_variables.NOMOS_LIBP2P_PORT_ID: PortSpec( - number=system_variables.NOMOS_LIBP2P_PORT, + vars.NOMOS_LIBP2P_PORT_ID: PortSpec( + number=vars.NOMOS_LIBP2P_PORT, transport_protocol="TCP"), }, files={ - system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id }, - entrypoint=system_variables.NOMOS_ENTRYPOINT, + entrypoint=vars.NOMOS_ENTRYPOINT, cmd=[ - system_variables.NOMOS_CONTAINER_CONFIG_FILE_LOCATION + vars.NOMOS_CONTAINER_CONFIG_FILE_LOCATION ] ) @@ -134,49 +134,65 @@ def instantiate_services(plan, network_topology, use_general_configuration): for service_name in network_topology.keys(): image = network_topology[service_name]["image"] - service_builder = service_dispatcher[image] + service_builder = service_dispatcher[image][0] service_builder(plan, service_name, all_services, use_general_configuration) all_services_information = plan.add_services( configs = all_services ) +<<<<<<< HEAD #services_information = _add_waku_service_information(plan, all_services_information) services_information = _add_nomos_service_information(plan, all_services_information) +======= + services_information = add_service_information(plan, all_services_information, network_topology) +>>>>>>> 1e3f147 (Make node init code agnostic to the node type) return services_information -def _add_waku_service_information(plan, all_services_information): - +def add_service_information(plan, all_services_information, network_topology): new_services_information = {} for service_name in all_services_information: +<<<<<<< HEAD node_peer_id = waku.get_wakunode_peer_id(plan, service_name, system_variables.WAKU_RPC_PORT_ID) new_services_information[service_name] = {} new_services_information[service_name]["peer_id"] = node_peer_id new_services_information[service_name]["service_info"] = all_services_information[service_name] +======= + image = network_topology[service_name]["image"] + info_getter = service_dispatcher[image][1] + service_info = all_services_information[service_name] + new_service_info = info_getter(plan, service_name, service_info) + new_service_info["image"] = image + new_services_information[service_name] = new_service_info +>>>>>>> 1e3f147 (Make node init code agnostic to the node type) return new_services_information -def _add_nomos_service_information(plan, all_services_information): +def _add_waku_service_information(plan, service_name, service_info): + node_peer_id = waku.get_wakunode_peer_id(plan, service_name, vars.WAKU_RPC_PORT_ID) + new_service_info = {} + new_service_info["peer_id"] = node_peer_id + new_service_info["service_info"] = service_info - new_services_information = {} + return new_service_info - for service_name in all_services_information: - node_peer_id = nomos.get_nomos_peer_id(plan, service_name, system_variables.NOMOS_HTTP_PORT_ID) - new_services_information[service_name] = {} - new_services_information[service_name]["peer_id"] = node_peer_id - new_services_information[service_name]["service_info"] = all_services_information[service_name] +def _add_nomos_service_information(plan, service_name, service_info): + node_peer_id = nomos.get_nomos_peer_id(plan, service_name, vars.NOMOS_HTTP_PORT_ID) + new_service_info = {} + new_service_info["peer_id"] = node_peer_id + new_service_info["service_info"] = service_info - return new_services_information + return new_service_info service_dispatcher = { - "go-waku": prepare_gowaku_service, - "nim-waku": prepare_nwaku_service, - "nomos": prepare_nomos_service + "go-waku": [prepare_gowaku_service, _add_waku_service_information], + "nim-waku": [prepare_nwaku_service, _add_waku_service_information], + "nomos": [prepare_nomos_service, _add_nomos_service_information] } diff --git a/src/nomos.star b/src/nomos.star index ee7083f..3ac44c1 100644 --- a/src/nomos.star +++ b/src/nomos.star @@ -1,8 +1,8 @@ # System Imports -system_variables = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports -files = import_module(system_variables.FILE_HELPERS_MODULE) +files = import_module(vars.FILE_HELPERS_MODULE) def get_req(plan, service_name, port_id, endpoint, extract={}): @@ -42,7 +42,7 @@ def post_req(plan, service_name, port_id, endpoint, body, extract={}): def get_nomos_peer_id(plan, service_name, port_id): extract = {"peer_id": '.peer_id'} - response = get_req(plan, service_name, port_id, system_variables.NOMOS_NET_INFO_URL, extract) + response = get_req(plan, service_name, port_id, vars.NOMOS_NET_INFO_URL, extract) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -53,7 +53,7 @@ def create_nomos_id(nomos_service_information): nomos_service = nomos_service_information["service_info"] ip = nomos_service.ip_address - port = nomos_service.ports[system_variables.NOMOS_LIBP2P_PORT_ID].number + port = nomos_service.ports[vars.NOMOS_LIBP2P_PORT_ID].number nomos_node_id = nomos_service_information["peer_id"] return '"/ip4/' + str(ip) + '/tcp/' + str(port) + '/p2p/' + nomos_node_id + '"' @@ -66,7 +66,7 @@ def _merge_peer_ids(peer_ids): def connect_nomos_to_peers(plan, service_id, port_id, peer_ids): body = _merge_peer_ids(peer_ids) - response = post_req(plan, service_id, port_id, system_variables.NOMOS_NET_CONN_URL, body) + response = post_req(plan, service_id, port_id, vars.NOMOS_NET_CONN_URL, body) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -81,7 +81,6 @@ def make_service_wait(plan,service_id, time): plan.exec(exec_recipe) - def interconnect_nomos_nodes(plan, topology_information, services): # Interconnect them for nomos_service_id in services.keys(): @@ -89,6 +88,4 @@ def interconnect_nomos_nodes(plan, topology_information, services): peer_ids = [create_nomos_id(services[peer]) for peer in peers] - connect_nomos_to_peers(plan, nomos_service_id, system_variables.NOMOS_HTTP_PORT_ID, peer_ids) - - + connect_nomos_to_peers(plan, nomos_service_id, vars.NOMOS_HTTP_PORT_ID, peer_ids) diff --git a/src/system_variables.star b/src/system_variables.star index 3672129..72acdd2 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -30,7 +30,7 @@ NOMOS_CONTAINER_CONFIG_FILE_LOCATION = '/etc/nomos/config.yml' # Nomos Configuration NOMOS_IMAGE = "nomos" -NOMOS_HTTP_PORT_ID = "http" +NOMOS_HTTP_PORT_ID = "rpc" NOMOS_HTTP_PORT = 8080 NOMOS_LIBP2P_PORT_ID = "libp2p" NOMOS_LIBP2P_PORT = 3000 diff --git a/src/wsl.star b/src/wsl.star deleted file mode 100644 index 3fd4394..0000000 --- a/src/wsl.star +++ /dev/null @@ -1,77 +0,0 @@ -# System Imports -system_variables = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") - -# Module Imports -files = import_module(system_variables.FILE_HELPERS_MODULE) -templates = import_module(system_variables.TEMPLATES_MODULE) - -def create_config(plan, wls_config): - - # Traffic simulation parameters - wsl_yml_template = templates.get_wsl_template() - - artifact_id = plan.render_templates( - config={ - system_variables.CONTAINER_WSL_CONFIGURATION_FILE_NAME: struct( - template=wsl_yml_template, - data=wls_config, - ) - }, - name="wsl_config" - ) - - return artifact_id - -def create_targets(plan, services): - - # Get private ip and ports of all nodes - template_data = files.generate_template_node_targets(services, system_variables.NOMOS_HTTP_PORT_ID) - - # Template - template = """ - {{.targets}} - """ - - artifact_id = plan.render_templates( - config={ - system_variables.CONTAINER_TARGETS_FILE_NAME_WSL: struct( - template=template, - data=template_data, - ) - }, - name="wsl_targets" - ) - - return artifact_id - -def init(plan, services, wsl_config): - - # Generate simulation config - wsl_config = create_config(plan, wsl_config) - - tomls_artifact = plan.upload_files( - src = system_variables.NODE_CONFIG_FILE_LOCATION, - name = "tomls_artifact", - ) - - # Create targets.json - wsl_targets = create_targets(plan, services) - - - add_service_config = ServiceConfig( - image=system_variables.WSL_IMAGE, - ports={}, - files={ - system_variables.WSL_CONFIG_PATH: wsl_config, - system_variables.WSL_TARGETS_PATH: wsl_targets, - system_variables.WSL_TOMLS_PATH: tomls_artifact - }, - cmd=["python3", "wsl.py"] - ) - wsl_service = plan.add_service( - service_name=system_variables.WSL_SERVICE_NAME, - config=add_service_config - ) - - return wsl_service - From 600c9e96937e27a705cfd31379e8828b2af2e91c Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 15:04:07 +0200 Subject: [PATCH 057/112] Interconnect waku and nomos nodes via the same method --- config/config.json | 2 +- main.star | 3 +- src/node_builders.star | 64 ++++++++------- wls-module/nomos.py | 172 ++++++++++++++++++++++++++++++++++++++-- wls-module/wls.py | 1 + wls-module/wsl_nomos.py | 159 +------------------------------------ 6 files changed, 208 insertions(+), 193 deletions(-) diff --git a/config/config.json b/config/config.json index 6d5909f..4c84733 100644 --- a/config/config.json +++ b/config/config.json @@ -14,7 +14,7 @@ "num_partitions": 1, "num_subnets": 1, "container_size": "1", - "node_type_distribution": { "nwaku":100, "gowaku":0 }, + "node_type_distribution": { "nwaku":100, "gowaku":0, "nomos":0 }, "node_type": "desktop", "network_type": "newmanwattsstrogatz", "output_dir": "network_data", diff --git a/main.star b/main.star index 3ec61e7..9508a5a 100644 --- a/main.star +++ b/main.star @@ -34,7 +34,8 @@ def run(plan, args): grafana_service = grafana.set_up_grafana(plan, prometheus_service) - waku.interconnect_waku_nodes(plan, network_topology, interconnection_batch) + # nomos.interconnect_nomos_nodes(plan, waku_topology, services) + nodes.interconnect_nodes(plan, network_topology, services, interconnection_batch) # Setup WLS & Start the Simulation wls_service = wls.init(plan, network_topology, wls_config) diff --git a/src/node_builders.star b/src/node_builders.star index 1a12376..68aeb7b 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -71,15 +71,7 @@ def prepare_gowaku_service(plan, gowakunode_name, all_services, use_general_conf all_services[gowakunode_name] = add_service_config -def prepare_nomos_service(plan, node_name, all_services, use_general_configuration): - plan.print("nomos") - artifact_id, configuration_file = files.get_toml_configuration_artifact(plan, node_name, - use_general_configuration, - node_name) - - plan.print("Configuration being used file is " + configuration_file) - plan.print("Entrypoint is "+ str(vars.NOMOS_ENTRYPOINT)) - +def prepare_nomos_service(node_name, all_services, config_file, artifact_id): nomos_service_config = ServiceConfig( image=vars.NOMOS_IMAGE, ports={ @@ -104,7 +96,21 @@ def prepare_nomos_service(plan, node_name, all_services, use_general_configurati all_services[node_name] = nomos_service_config -def instantiate_services(plan, network_topology, use_general_configuration): +def interconnect_nodes(plan, topology_information, services, interconnection_batch): + for waku_service_name in services.keys(): + peers = topology_information[waku_service_name]["static_nodes"] + + for i in range(0, len(peers), interconnection_batch): + x = i + image = services[waku_service_name]["image"] + create_id = service_dispatcher[image].create_id + connect_peers = service_dispatcher[image].connect_peers + peer_ids = [create_id(services[peer]) for peer in peers[x:x + interconnection_batch]] + + connect_peers(plan, waku_service_name, vars.WAKU_RPC_PORT_ID, peer_ids) + + +def instantiate_services(plan, network_topology, testing): """ As we will need to access for the service information later, the structure is the following: @@ -134,19 +140,14 @@ def instantiate_services(plan, network_topology, use_general_configuration): for service_name in network_topology.keys(): image = network_topology[service_name]["image"] - service_builder = service_dispatcher[image][0] + service_builder = service_dispatcher[image].prepare_service service_builder(plan, service_name, all_services, use_general_configuration) all_services_information = plan.add_services( configs = all_services ) -<<<<<<< HEAD - #services_information = _add_waku_service_information(plan, all_services_information) - services_information = _add_nomos_service_information(plan, all_services_information) -======= services_information = add_service_information(plan, all_services_information, network_topology) ->>>>>>> 1e3f147 (Make node init code agnostic to the node type) return services_information @@ -155,20 +156,12 @@ def add_service_information(plan, all_services_information, network_topology): new_services_information = {} for service_name in all_services_information: -<<<<<<< HEAD - node_peer_id = waku.get_wakunode_peer_id(plan, service_name, system_variables.WAKU_RPC_PORT_ID) - - new_services_information[service_name] = {} - new_services_information[service_name]["peer_id"] = node_peer_id - new_services_information[service_name]["service_info"] = all_services_information[service_name] -======= image = network_topology[service_name]["image"] - info_getter = service_dispatcher[image][1] + info_getter = service_dispatcher[image].add_service_information service_info = all_services_information[service_name] new_service_info = info_getter(plan, service_name, service_info) new_service_info["image"] = image new_services_information[service_name] = new_service_info ->>>>>>> 1e3f147 (Make node init code agnostic to the node type) return new_services_information @@ -192,7 +185,22 @@ def _add_nomos_service_information(plan, service_name, service_info): service_dispatcher = { - "go-waku": [prepare_gowaku_service, _add_waku_service_information], - "nim-waku": [prepare_nwaku_service, _add_waku_service_information], - "nomos": [prepare_nomos_service, _add_nomos_service_information] + "go-waku": struct( + prepare_service = prepare_gowaku_service, + add_service_information = _add_waku_service_information, + create_id = waku.create_waku_id, + connect_peers = waku.connect_wakunode_to_peers + ), + "nim-waku": struct( + prepare_service = prepare_gowaku_service, + add_service_information = _add_waku_service_information, + create_id = waku.create_waku_id, + connect_peers = waku.connect_wakunode_to_peers + ), + "nomos": struct( + prepare_service = prepare_nomos_service, + add_service_information = _add_nomos_service_information, + create_id = nomos.create_nomos_id, + connect_peers = nomos.connect_nomos_to_peers + ), } diff --git a/wls-module/nomos.py b/wls-module/nomos.py index 04b4d35..47c4629 100644 --- a/wls-module/nomos.py +++ b/wls-module/nomos.py @@ -1,8 +1,13 @@ +import requests +import time import json +import random import matplotlib.pyplot as plt import networkx as nx from PIL import Image +LOGGER = None + # Histogram of time delta in millis of tx being sent # and received by all nodes. def hist_delta(name, iterations): @@ -33,23 +38,178 @@ def network_graph(name, topology): def concat_images(name, images): images = [Image.open(image) for image in images] - # Get the width and height of the first image widths, heights = zip(*(i.size for i in images)) - - # Calculate the total width and height of the collage total_width = sum(widths) max_height = max(heights) - # Create a new image with the calculated size collage = Image.new('RGB', (total_width, max_height)) - # Paste the images into the collage x_offset = 0 for image in images: collage.paste(image, (x_offset, 0)) x_offset += image.size[0] - # Save the collage collage.save(name) +def check_nomos_node(node_address): + url = node_address + "network/info" + + try: + response = requests.get(url) + except Exception as e: + LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + try: + response_obj = response.json() + except Exception as e: + LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) + + return True + +def add_nomos_tx(node_address, tx): + url = node_address + "mempool/addtx" + + try: + response = requests.post(url, data=json.dumps(tx), headers={'content-type': 'application/json'}) + except Exception as e: + LOGGER.debug('%s: %s' % (e.__doc__, e)) + return False + + if len(response.text) > 0: + LOGGER.debug('Response from %s: %s' %(url, response.text)) + return False + + return True + +def get_nomos_mempool_metrics(node_address, iteration_s): + url = node_address + "mempool/metrics" + + try: + response = requests.get(url) + except Exception as e: + LOGGER.debug('%s: %s' % (e.__doc__, e)) + return "error", -1 + + try: + response_obj = response.json() + except Exception as e: + LOGGER.debug('%s: %s' % (e.__doc__, e)) + return "error", -1 + LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) + time_e = int(time.time() * 1000) + + return response_obj, time_e - iteration_s + +def run_tests(logger, targets, topology): + global LOGGER + LOGGER = logger + + """ Check all nodes are reachable """ + for i, target in enumerate(targets): + if not check_nomos_node('http://%s/' %target): + LOGGER.error('Node %d (%s) is not online. Aborted.' %(i, target)) + sys.exit(1) + LOGGER.info('All %d Waku nodes are reachable.' %len(targets)) + + """ Start simulation """ + msg_cnt = 0 + failed_addtx_cnt = 0 + failed_metrics_cnt = 0 + bytes_cnt = 0 + s_time = time.time() + last_msg_time = 0 + next_time_to_msg = 0 + failed_dissemination_cnt = 0 + batch_size = 40 + iterations = [] + tx_id = 0 + + LOGGER.info('Tx addition start time: %d' %int(round(time.time() * 1000))) + """ Add new transaction to every node """ + for i, target in enumerate(targets): + iteration_s = int(time.time() * 1000) + last_tx_sent = iteration_s + + tx_id = tx_id + msg_cnt+failed_addtx_cnt+1 + for j in range(batch_size): + tx_id += j + tx_target = random.choice(targets) + LOGGER.debug('sending tx_id: %s to target: %s' %(tx_id, tx_target)) + + if not add_nomos_tx('http://%s/' %tx_target, 'tx%s' %tx_id): + LOGGER.error('Unable to add new tx. Node %s.' %(tx_target)) + failed_addtx_cnt += 1 + continue + + last_tx_sent = int(time.time() * 1000) + msg_cnt += 1 + + time.sleep(1.5) + + results = [] + """ Collect mempool metrics from nodes """ + for n, target in enumerate(targets): + res, t = get_nomos_mempool_metrics('http://%s/' %target, iteration_s) + if 'error' in res: + LOGGER.error('Unable to pull metrics. Node %d (%s).' %(n, target)) + failed_metrics_cnt += 1 + continue + + is_ok = True + delta = res['last_tx'] - last_tx_sent + start_finish = res['last_tx'] - iteration_s + + # Tolerate one second difference between finish and start times. + if -1000 < delta < 0: + delta = 0 + + if delta < 0: + LOGGER.error('delta should be gt that zero: %d' %delta) + delta = -1 + + LOGGER.debug('should be %s' %msg_cnt) + if res['pending_tx'] != msg_cnt: + delta = -1 + is_ok = False + failed_dissemination_cnt += 1 + + results.append({ + "node": n, + "is_ok": is_ok, + "delta": delta, + "start_finish": start_finish + }) + + iterations.append({ + "iteration": iteration_s, + "results": results + }) + + stats = { + "msg_cnt": msg_cnt, + "failed_addtx_cnt": failed_addtx_cnt, + "failed_metrics_cnt": failed_metrics_cnt, + "failed_dissemination_cnt": failed_dissemination_cnt, + "batch_size": batch_size, + "bytes_cnt": bytes_cnt, + "s_time": s_time, + "last_msg_time": last_msg_time, + "next_time_to_msg": next_time_to_msg, + "iterations": iterations, + } + + LOGGER.info("Results: %s" %json.dumps(stats)) + + with open('./summary.json', 'w') as summary_file: + summary_file.write(json.dumps(stats, indent=4)) + + network_graph("1.png", topology) + hist_delta("2.png", stats['iterations']) + concat_images("collage.png", ["1.png", "2.png"]) + """ We are done """ + LOGGER.info('Ended') diff --git a/wls-module/wls.py b/wls-module/wls.py index 05ce35f..f28b8bf 100644 --- a/wls-module/wls.py +++ b/wls-module/wls.py @@ -8,6 +8,7 @@ import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, hashlib import requests import rtnorm +import nomos # from pathlib import Path # import numpy as np # import pandas as pd diff --git a/wls-module/wsl_nomos.py b/wls-module/wsl_nomos.py index f7adbfb..1087c02 100644 --- a/wls-module/wsl_nomos.py +++ b/wls-module/wsl_nomos.py @@ -5,7 +5,7 @@ """ """ Dependencies """ -import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob +import sys, logging, yaml, json, random, os, argparse, tomllib, glob import requests import rtnorm import nomos @@ -41,58 +41,6 @@ def format(self, record): formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') return formatter.format(record) -def check_nomos_node(node_address): - url = node_address + "network/info" - - try: - response = requests.get(url) - except Exception as e: - G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return False - - try: - response_obj = response.json() - except Exception as e: - G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return False - - G_LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) - - return True - -def add_nomos_tx(node_address, tx): - url = node_address + "mempool/addtx" - - try: - response = requests.post(url, data=json.dumps(tx), headers={'content-type': 'application/json'}) - except Exception as e: - G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return False - - if len(response.text) > 0: - G_LOGGER.debug('Response from %s: %s' %(url, response.text)) - return False - - return True - -def get_nomos_mempool_metrics(node_address, iteration_s): - url = node_address + "mempool/metrics" - - try: - response = requests.get(url) - except Exception as e: - G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return "error", -1 - - try: - response_obj = response.json() - except Exception as e: - G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return "error", -1 - G_LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) - time_e = int(time.time() * 1000) - - return response_obj, time_e - iteration_s def main(): @@ -155,111 +103,8 @@ def main(): G_LOGGER.debug(targets) G_LOGGER.info('%d targets loaded' %len(targets)) - """ Check all nodes are reachable """ - for i, target in enumerate(targets): - if not check_nomos_node('http://%s/' %target): - G_LOGGER.error('Node %d (%s) is not online. Aborted.' %(i, target)) - sys.exit(1) - G_LOGGER.info('All %d Waku nodes are reachable.' %len(targets)) - - """ Start simulation """ - msg_cnt = 0 - failed_addtx_cnt = 0 - failed_metrics_cnt = 0 - bytes_cnt = 0 - s_time = time.time() - last_msg_time = 0 - next_time_to_msg = 0 - failed_dissemination_cnt = 0 - batch_size = 40 - iterations = [] - tx_id = 0 - - G_LOGGER.info('Tx addition start time: %d' %int(round(time.time() * 1000))) - """ Add new transaction to every node """ - for i, target in enumerate(targets): - iteration_s = int(time.time() * 1000) - last_tx_sent = iteration_s - - tx_id = tx_id + msg_cnt+failed_addtx_cnt+1 - for j in range(batch_size): - tx_id += j - tx_target = random.choice(targets) - G_LOGGER.debug('sending tx_id: %s to target: %s' %(tx_id, tx_target)) - - if not add_nomos_tx('http://%s/' %tx_target, 'tx%s' %tx_id): - G_LOGGER.error('Unable to add new tx. Node %s.' %(tx_target)) - failed_addtx_cnt += 1 - continue - - last_tx_sent = int(time.time() * 1000) - msg_cnt += 1 - - time.sleep(1.5) - - results = [] - """ Collect mempool metrics from nodes """ - for n, target in enumerate(targets): - res, t = get_nomos_mempool_metrics('http://%s/' %target, iteration_s) - if 'error' in res: - G_LOGGER.error('Unable to pull metrics. Node %d (%s).' %(n, target)) - failed_metrics_cnt += 1 - continue - - is_ok = True - delta = res['last_tx'] - last_tx_sent - start_finish = res['last_tx'] - iteration_s - - # Tolerate one second difference between finish and start times. - if -1000 < delta < 0: - delta = 0 - - if delta < 0: - G_LOGGER.error('delta should be gt that zero: %d' %delta) - delta = -1 - - G_LOGGER.debug('should be %s' %msg_cnt) - if res['pending_tx'] != msg_cnt: - delta = -1 - is_ok = False - failed_dissemination_cnt += 1 - - results.append({ - "node": n, - "is_ok": is_ok, - "delta": delta, - "start_finish": start_finish - }) - - iterations.append({ - "iteration": iteration_s, - "results": results - }) - - stats = { - "msg_cnt": msg_cnt, - "failed_addtx_cnt": failed_addtx_cnt, - "failed_metrics_cnt": failed_metrics_cnt, - "failed_dissemination_cnt": failed_dissemination_cnt, - "batch_size": batch_size, - "bytes_cnt": bytes_cnt, - "s_time": s_time, - "last_msg_time": last_msg_time, - "next_time_to_msg": next_time_to_msg, - "iterations": iterations, - } - - G_LOGGER.info("Results: %s" %json.dumps(stats)) - - with open('./summary.json', 'w') as summary_file: - summary_file.write(json.dumps(stats, indent=4)) - - nomos.network_graph("1.png", topology) - nomos.hist_delta("2.png", stats['iterations']) - nomos.concat_images("collage.png", ["1.png", "2.png"]) + nomos.run_tests(G_LOGGER, targets, topology) - """ We are done """ - G_LOGGER.info('Ended') if __name__ == "__main__": From 8bbabafe98b38d2aaf475627f4f8a6d633c848e9 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 15:04:50 +0200 Subject: [PATCH 058/112] Remove unused imports --- main.star | 2 -- src/node_builders.star | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/main.star b/main.star index 9508a5a..857f780 100644 --- a/main.star +++ b/main.star @@ -2,13 +2,11 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports -waku = import_module(vars.WAKU_MODULE) prometheus = import_module(vars.PROMETHEUS_MODULE) grafana = import_module(vars.GRAFANA_MODULE) args_parser = import_module(vars.ARGUMENT_PARSER_MODULE) wls = import_module(vars.WLS_MODULE) nodes = import_module(vars.NODE_BUILDERS_MODULE) -nomos = import_module(vars.NOMOS_MODULE) def run(plan, args): diff --git a/src/node_builders.star b/src/node_builders.star index 68aeb7b..feecb0c 100644 --- a/src/node_builders.star +++ b/src/node_builders.star @@ -147,12 +147,12 @@ def instantiate_services(plan, network_topology, testing): all_services_information = plan.add_services( configs = all_services ) - services_information = add_service_information(plan, all_services_information, network_topology) + services_information = add_services_information(plan, all_services_information, network_topology) return services_information -def add_service_information(plan, all_services_information, network_topology): +def add_services_information(plan, all_services_information, network_topology): new_services_information = {} for service_name in all_services_information: @@ -192,7 +192,7 @@ service_dispatcher = { connect_peers = waku.connect_wakunode_to_peers ), "nim-waku": struct( - prepare_service = prepare_gowaku_service, + prepare_service = prepare_nwaku_service, add_service_information = _add_waku_service_information, create_id = waku.create_waku_id, connect_peers = waku.connect_wakunode_to_peers From 432889fbff8a3765f59c5b7378ecfcecbb3b876d Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Fri, 17 Feb 2023 15:55:28 +0200 Subject: [PATCH 059/112] Remove inaccurate comment --- gennet-module/gennet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gennet-module/gennet.py b/gennet-module/gennet.py index a1e8385..ddbe2dd 100755 --- a/gennet-module/gennet.py +++ b/gennet-module/gennet.py @@ -21,7 +21,7 @@ class nodeType(Enum): NWAKU = "nwaku" # waku desktop config GOWAKU = "gowaku" # waku mobile config - NOMOS = "nomos" # incompatible with waku nodes + NOMOS = "nomos" nodeTypeToToml = { From 5c81b48b4d8a90d619840e1b0a5c264b8b05e8eb Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 14:06:47 +0200 Subject: [PATCH 060/112] Nomos wsl statistics --- wls-module/nomos.py | 44 ++++++++++++++++++++++++++--------------- wls-module/wsl_nomos.py | 2 +- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/wls-module/nomos.py b/wls-module/nomos.py index 47c4629..ec99c09 100644 --- a/wls-module/nomos.py +++ b/wls-module/nomos.py @@ -4,23 +4,26 @@ import random import matplotlib.pyplot as plt import networkx as nx +import statistics from PIL import Image LOGGER = None # Histogram of time delta in millis of tx being sent # and received by all nodes. + def hist_delta(name, iterations): results = [] for iteration in iterations: iteration_results = [result["delta"] for result in iteration["results"]] results.extend(iteration_results) - plt.hist(results, bins=20) - plt.xlabel("delta in (ms)") - plt.ylabel("Frequency") - plt.title("TX dissemination over network") - plt.savefig(name) + plt.hist(results, bins=30, color="#000000") + plt.xlabel("Delta time (milliseconds)", fontsize=12) + plt.ylabel("Frequency", fontsize=12) + plt.title("TX dissemination over network", fontsize=14) + + plt.savefig(name, dpi=200) plt.close() def network_graph(name, topology): @@ -31,8 +34,17 @@ def network_graph(name, topology): for connection in node_data["static_nodes"]: G.add_edge(node_name, connection) - nx.draw(G, with_labels=True) - plt.savefig(name) + pos = nx.spring_layout(G, seed=1) + node_size = 100 + font_size = 8 + + nx.draw(G, pos, with_labels=False, node_size=node_size, font_size=font_size, node_color='white', edge_color='black') + shift_amount = 0.07 + label_pos = {k: (v[0], v[1]+shift_amount) for k, v in pos.items()} + nx.draw_networkx_labels(G, label_pos, font_size=font_size) + nx.draw_networkx_nodes(G, pos, node_size=node_size, node_color='black', edgecolors='white') + + plt.savefig(name, dpi=200) plt.close() def concat_images(name, images): @@ -104,7 +116,7 @@ def get_nomos_mempool_metrics(node_address, iteration_s): return response_obj, time_e - iteration_s -def run_tests(logger, targets, topology): +def run_tests(logger, config, targets, topology): global LOGGER LOGGER = logger @@ -119,14 +131,12 @@ def run_tests(logger, targets, topology): msg_cnt = 0 failed_addtx_cnt = 0 failed_metrics_cnt = 0 - bytes_cnt = 0 s_time = time.time() - last_msg_time = 0 - next_time_to_msg = 0 failed_dissemination_cnt = 0 batch_size = 40 iterations = [] tx_id = 0 + all_response_deltas = [] LOGGER.info('Tx addition start time: %d' %int(round(time.time() * 1000))) """ Add new transaction to every node """ @@ -148,7 +158,7 @@ def run_tests(logger, targets, topology): last_tx_sent = int(time.time() * 1000) msg_cnt += 1 - time.sleep(1.5) + time.sleep(1) results = [] """ Collect mempool metrics from nodes """ @@ -177,6 +187,9 @@ def run_tests(logger, targets, topology): is_ok = False failed_dissemination_cnt += 1 + if delta >= 0: + all_response_deltas.append(delta) + results.append({ "node": n, "is_ok": is_ok, @@ -195,14 +208,13 @@ def run_tests(logger, targets, topology): "failed_metrics_cnt": failed_metrics_cnt, "failed_dissemination_cnt": failed_dissemination_cnt, "batch_size": batch_size, - "bytes_cnt": bytes_cnt, "s_time": s_time, - "last_msg_time": last_msg_time, - "next_time_to_msg": next_time_to_msg, + "median_response_delta": statistics.median(all_response_deltas), "iterations": iterations, } - LOGGER.info("Results: %s" %json.dumps(stats)) + with open('./topology.json', 'w') as summary_file: + summary_file.write(json.dumps(topology, indent=4)) with open('./summary.json', 'w') as summary_file: summary_file.write(json.dumps(stats, indent=4)) diff --git a/wls-module/wsl_nomos.py b/wls-module/wsl_nomos.py index 1087c02..4ec29a9 100644 --- a/wls-module/wsl_nomos.py +++ b/wls-module/wsl_nomos.py @@ -103,7 +103,7 @@ def main(): G_LOGGER.debug(targets) G_LOGGER.info('%d targets loaded' %len(targets)) - nomos.run_tests(G_LOGGER, targets, topology) + nomos.run_tests(G_LOGGER, config, targets, topology) if __name__ == "__main__": From de84b28a04f3fa642d317b03e22adf9481bf4dd3 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sat, 25 Feb 2023 15:06:53 +0200 Subject: [PATCH 061/112] Remove unused files --- wsl-module/build.sh | 6 ------ 1 file changed, 6 deletions(-) delete mode 100755 wsl-module/build.sh diff --git a/wsl-module/build.sh b/wsl-module/build.sh deleted file mode 100755 index c662990..0000000 --- a/wsl-module/build.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -# pip freeze > requirements.txt -image_id=$(docker images -q wsl:0.0.1) -echo $image_id -docker image rm -f $image_id -docker image build --progress=plain -t wsl:0.0.1 ./ From 7cda520f5182a53ac24b2ca19e9fdbf7b55687a2 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Sun, 26 Feb 2023 19:16:30 +0200 Subject: [PATCH 062/112] Add multiple nomos nodes per container support --- gennet-module/gennet.py | 6 - main.star | 3 +- src/call_protocols.star | 36 +++- src/node_builders.star | 206 --------------------- src/node_builders/node_builders.star | 51 ++++- src/node_builders/types/nomos_builder.star | 76 ++++++++ src/nomos.star | 69 +++---- src/system_variables.star | 10 +- wls-module/build.sh | 0 wls-module/{wsl_nomos.py => wls_nomos.py} | 0 10 files changed, 186 insertions(+), 271 deletions(-) delete mode 100644 src/node_builders.star create mode 100644 src/node_builders/types/nomos_builder.star mode change 100644 => 100755 wls-module/build.sh rename wls-module/{wsl_nomos.py => wls_nomos.py} (100%) diff --git a/gennet-module/gennet.py b/gennet-module/gennet.py index ddbe2dd..c55a6ca 100755 --- a/gennet-module/gennet.py +++ b/gennet-module/gennet.py @@ -35,12 +35,6 @@ class nodeType(Enum): nodeType.NOMOS: "nomos" } -<<<<<<< HEAD -======= -#NODES = [nodeType.NWAKU, nodeType.GOWAKU, nodeType.NOMOS] -#NODE_PROBABILITIES = (0, 0, 100) - ->>>>>>> b6a76d5 (Gennet and WSL changes for nomos node) # To add a new network type, add appropriate entries to the networkType and networkTypeSwitch # the networkTypeSwitch is placed before generate_network(): fwd declaration mismatch with typer/python :/ class networkType(Enum): diff --git a/main.star b/main.star index 857f780..2d5acbf 100644 --- a/main.star +++ b/main.star @@ -32,8 +32,7 @@ def run(plan, args): grafana_service = grafana.set_up_grafana(plan, prometheus_service) - # nomos.interconnect_nomos_nodes(plan, waku_topology, services) - nodes.interconnect_nodes(plan, network_topology, services, interconnection_batch) + nodes.interconnect_nodes(plan, network_topology, interconnection_batch) # Setup WLS & Start the Simulation wls_service = wls.init(plan, network_topology, wls_config) diff --git a/src/call_protocols.star b/src/call_protocols.star index 71462c0..d41a195 100644 --- a/src/call_protocols.star +++ b/src/call_protocols.star @@ -15,4 +15,38 @@ def send_json_rpc(plan, service_name, port_id, method, params, extract={}): assertion="==", target_value=200) - return response \ No newline at end of file + return response + + +def send_http_get_req(plan, service_name, port_id, endpoint, extract={}): + recipe = GetHttpRequestRecipe( + service_name=service_name, + port_id=port_id, + endpoint=endpoint, + extract=extract + ) + + response = plan.wait(recipe=recipe, + field="code", + assertion="==", + target_value=200) + + return response + + +def send_http_post_req(plan, service_name, port_id, endpoint, body, extract={}): + recipe = PostHttpRequestRecipe( + service_name=service_name, + port_id=port_id, + endpoint=endpoint, + content_type="application/json", + body=body, + extract=extract + ) + + response = plan.wait(recipe=recipe, + field="code", + assertion="==", + target_value=200) + + return response diff --git a/src/node_builders.star b/src/node_builders.star deleted file mode 100644 index feecb0c..0000000 --- a/src/node_builders.star +++ /dev/null @@ -1,206 +0,0 @@ -# System Imports -system_variables = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") - -# Module Imports -waku = import_module(vars.WAKU_MODULE) -nomos = import_module(vars.NOMOS_MODULE) -files = import_module(vars.FILE_HELPERS_MODULE) - - -def prepare_nwaku_service(plan, nwakunode_name, all_services, use_general_configuration): - artifact_id, configuration_file = files.get_toml_configuration_artifact(plan, nwakunode_name, - use_general_configuration, - nwakunode_name) - - plan.print("Configuration being used file is " + configuration_file) - - add_service_config = ServiceConfig( - image=system_variables.NWAKU_IMAGE, - ports={ - system_variables.WAKU_RPC_PORT_ID: PortSpec(number=system_variables.WAKU_TCP_PORT, - transport_protocol="TCP"), - system_variables.PROMETHEUS_PORT_ID: PortSpec( - number=system_variables.PROMETHEUS_TCP_PORT, - transport_protocol="TCP"), - system_variables.WAKU_LIBP2P_PORT_ID: PortSpec( - number=system_variables.WAKU_LIBP2P_PORT, - transport_protocol="TCP"), - }, - files={ - system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id - }, - entrypoint=system_variables.NWAKU_ENTRYPOINT, - cmd=[ - "--config-file=" + system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION + "/" + configuration_file - ] - ) - - all_services[nwakunode_name] = add_service_config - - - -def prepare_gowaku_service(plan, gowakunode_name, all_services, use_general_configuration): - artifact_id, configuration_file = files.get_toml_configuration_artifact(plan, gowakunode_name, - use_general_configuration, - gowakunode_name) - - plan.print("Configuration being used file is " + configuration_file) - plan.print("Entrypoint is "+ str(system_variables.GOWAKU_ENTRYPOINT)) - - add_service_config = ServiceConfig( - image=system_variables.GOWAKU_IMAGE, - ports={ - system_variables.WAKU_RPC_PORT_ID: PortSpec(number=system_variables.WAKU_TCP_PORT, - transport_protocol="TCP"), - system_variables.PROMETHEUS_PORT_ID: PortSpec( - number=system_variables.PROMETHEUS_TCP_PORT, - transport_protocol="TCP"), - system_variables.WAKU_LIBP2P_PORT_ID: PortSpec( - number=system_variables.WAKU_LIBP2P_PORT, - transport_protocol="TCP"), - }, - files={ - system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id - }, - entrypoint=system_variables.GOWAKU_ENTRYPOINT, - cmd=[ - "--config-file=" + system_variables.CONTAINER_NODE_CONFIG_FILE_LOCATION + "/" + configuration_file - ] - ) - - all_services[gowakunode_name] = add_service_config - - -def prepare_nomos_service(node_name, all_services, config_file, artifact_id): - nomos_service_config = ServiceConfig( - image=vars.NOMOS_IMAGE, - ports={ - vars.NOMOS_HTTP_PORT_ID: PortSpec(number=vars.NOMOS_HTTP_PORT, - transport_protocol="TCP"), - vars.PROMETHEUS_PORT_ID: PortSpec( - number=vars.PROMETHEUS_TCP_PORT, - transport_protocol="TCP"), - vars.NOMOS_LIBP2P_PORT_ID: PortSpec( - number=vars.NOMOS_LIBP2P_PORT, - transport_protocol="TCP"), - }, - files={ - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION: artifact_id - }, - entrypoint=vars.NOMOS_ENTRYPOINT, - cmd=[ - vars.NOMOS_CONTAINER_CONFIG_FILE_LOCATION - ] - ) - - all_services[node_name] = nomos_service_config - - -def interconnect_nodes(plan, topology_information, services, interconnection_batch): - for waku_service_name in services.keys(): - peers = topology_information[waku_service_name]["static_nodes"] - - for i in range(0, len(peers), interconnection_batch): - x = i - image = services[waku_service_name]["image"] - create_id = service_dispatcher[image].create_id - connect_peers = service_dispatcher[image].connect_peers - peer_ids = [create_id(services[peer]) for peer in peers[x:x + interconnection_batch]] - - connect_peers(plan, waku_service_name, vars.WAKU_RPC_PORT_ID, peer_ids) - - -def instantiate_services(plan, network_topology, testing): - """ - As we will need to access for the service information later, the structure is the following: - - services = { - "nwaku_0": { - "peer_id" : peer id of the node, as string, - "service_info": Kurtosis service struct, that has - "ip": ip of the service that is running the node, - "ports": Kurtosis PortSpec, that you can access with their respective identifier - }, - "nwaku_1": {...}, - "gowaku_": {...} - - } - - Example: - - service_peer_id = services["nwaku_0"]["peer_id"] - service_ip = services["nwaku_0"]["service_info"].ip_address - rpc_node_number = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].number - rpc_node_protocol = services["nwaku_0"]["service_info"].ports["your_rpc_identifier"].protocol - """ - - all_services = {} - - # Get up all nodes - for service_name in network_topology.keys(): - image = network_topology[service_name]["image"] - - service_builder = service_dispatcher[image].prepare_service - - service_builder(plan, service_name, all_services, use_general_configuration) - - all_services_information = plan.add_services( - configs = all_services - ) - services_information = add_services_information(plan, all_services_information, network_topology) - - return services_information - - -def add_services_information(plan, all_services_information, network_topology): - new_services_information = {} - - for service_name in all_services_information: - image = network_topology[service_name]["image"] - info_getter = service_dispatcher[image].add_service_information - service_info = all_services_information[service_name] - new_service_info = info_getter(plan, service_name, service_info) - new_service_info["image"] = image - new_services_information[service_name] = new_service_info - - return new_services_information - - -def _add_waku_service_information(plan, service_name, service_info): - node_peer_id = waku.get_wakunode_peer_id(plan, service_name, vars.WAKU_RPC_PORT_ID) - new_service_info = {} - new_service_info["peer_id"] = node_peer_id - new_service_info["service_info"] = service_info - - return new_service_info - - -def _add_nomos_service_information(plan, service_name, service_info): - node_peer_id = nomos.get_nomos_peer_id(plan, service_name, vars.NOMOS_HTTP_PORT_ID) - new_service_info = {} - new_service_info["peer_id"] = node_peer_id - new_service_info["service_info"] = service_info - - return new_service_info - - -service_dispatcher = { - "go-waku": struct( - prepare_service = prepare_gowaku_service, - add_service_information = _add_waku_service_information, - create_id = waku.create_waku_id, - connect_peers = waku.connect_wakunode_to_peers - ), - "nim-waku": struct( - prepare_service = prepare_nwaku_service, - add_service_information = _add_waku_service_information, - create_id = waku.create_waku_id, - connect_peers = waku.connect_wakunode_to_peers - ), - "nomos": struct( - prepare_service = prepare_nomos_service, - add_service_information = _add_nomos_service_information, - create_id = nomos.create_nomos_id, - connect_peers = nomos.connect_nomos_to_peers - ), -} diff --git a/src/node_builders/node_builders.star b/src/node_builders/node_builders.star index 52b2034..207ed48 100644 --- a/src/node_builders/node_builders.star +++ b/src/node_builders/node_builders.star @@ -3,22 +3,42 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports waku = import_module(vars.WAKU_MODULE) +nomos = import_module(vars.NOMOS_MODULE) files = import_module(vars.FILE_HELPERS_MODULE) waku_builder = import_module(vars.WAKU_BUILDER_MODULE) nwaku_builder = import_module(vars.NWAKU_BUILDER_MODULE) gowaku_builder = import_module(vars.GOWAKU_BUILDER_MODULE) +nomos_builder = import_module(vars.NOMOS_BUILDER_MODULE) service_builder_dispatcher = { vars.GENNET_GOWAKU_IMAGE_VALUE: gowaku_builder.prepare_gowaku_service, - vars.GENNET_NWAKU_IMAGE_VALUE: nwaku_builder.prepare_nwaku_service - # nomos: nomos_builder.prepare_nomos_service + vars.GENNET_NWAKU_IMAGE_VALUE: nwaku_builder.prepare_nwaku_service, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos_builder.prepare_nomos_service +} + +service_info_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: waku.get_wakunode_peer_id, + vars.GENNET_NWAKU_IMAGE_VALUE: waku.get_wakunode_peer_id, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos.get_nomos_peer_id +} + +service_multiaddr_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: waku.create_node_multiaddress, + vars.GENNET_NWAKU_IMAGE_VALUE: waku.create_node_multiaddress, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos.create_node_multiaddress +} + +service_connect_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: waku.connect_wakunode_to_peers, + vars.GENNET_NWAKU_IMAGE_VALUE: waku.connect_wakunode_to_peers, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos.connect_nomos_to_peers } ports_dispatcher = { vars.GENNET_GOWAKU_IMAGE_VALUE: waku_builder._add_waku_ports_info_to_topology, - vars.GENNET_NWAKU_IMAGE_VALUE: waku_builder._add_waku_ports_info_to_topology - # nomos: nomos_builder._add_nomos_ports_info_to_topology + vars.GENNET_NWAKU_IMAGE_VALUE: waku_builder._add_waku_ports_info_to_topology, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos_builder._add_nomos_ports_info_to_topology } def instantiate_services(plan, network_topology, testing): @@ -70,11 +90,32 @@ def instantiate_services(plan, network_topology, testing): _add_service_info_to_topology(plan, all_services_information, network_topology) +def interconnect_nodes(plan, topology_information, interconnection_batch): + # Interconnect them + nodes_in_topology = topology_information[vars.GENNET_NODES_KEY] + + for node_id in nodes_in_topology.keys(): + image = nodes_in_topology[node_id][vars.GENNET_IMAGE_KEY] + peers = nodes_in_topology[node_id][vars.GENNET_STATIC_NODES_KEY] + create_node_multiaddress = service_multiaddr_dispatcher[image] + connect_node_to_peers = service_connect_dispatcher[image] + + for i in range(0, len(peers), interconnection_batch): + peer_ids = [create_node_multiaddress(peer, nodes_in_topology[peer]) + for peer in peers[i:i + interconnection_batch]] + + connect_node_to_peers(plan, nodes_in_topology[node_id][vars.GENNET_NODE_CONTAINER_KEY], + node_id, vars.RPC_PORT_ID, peer_ids) + + + def _add_service_info_to_topology(plan, all_services_information, network_topology): for node_id, node_info in network_topology[vars.GENNET_NODES_KEY].items(): node_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id - node_peer_id = waku.get_wakunode_peer_id(plan, node_info[vars.GENNET_NODE_CONTAINER_KEY], + image = network_topology[vars.GENNET_NODES_KEY][node_id][vars.GENNET_IMAGE_KEY] + peer_id_getter = service_info_dispatcher[image] + node_peer_id = peer_id_getter(plan, node_info[vars.GENNET_NODE_CONTAINER_KEY], node_rpc_port_id) network_topology[vars.GENNET_NODES_KEY][node_id][vars.PEER_ID_KEY] = node_peer_id diff --git a/src/node_builders/types/nomos_builder.star b/src/node_builders/types/nomos_builder.star new file mode 100644 index 0000000..fc2ac3f --- /dev/null +++ b/src/node_builders/types/nomos_builder.star @@ -0,0 +1,76 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + + +def prepare_nomos_service(node_name, all_services, config_files, artifact_ids, service_id): + prepared_ports = _prepare_nomos_ports_in_service(node_name) + prepared_files = _prepare_nomos_config_files_in_service(node_name, artifact_ids) + prepared_cmd = _prepare_nomos_cmd_in_service(node_name, config_files) + + add_service_config = ServiceConfig( + image=vars.NOMOS_IMAGE, + ports=prepared_ports, + files=prepared_files, + entrypoint=vars.GENERAL_ENTRYPOINT, + cmd=prepared_cmd + ) + + all_services[service_id] = add_service_config + + +def _prepare_nomos_cmd_in_service(nomos_names, config_files): + prepared_cmd = "" + for i in range(len(nomos_names)): + prepared_cmd += vars.NOMOS_ENTRYPOINT + " " + prepared_cmd += vars.NOMOS_CONTAINER_CONFIG_FILE_LOCATION + " " + # prepared_cmd += vars.NOMOS_PORT_SHIFT_FLAG + str(i) + if i != len(nomos_names) - 1: + prepared_cmd += " & " + + return [prepared_cmd] + + +def _prepare_nomos_ports_in_service(node_names): + prepared_ports = {} + for i in range(len(node_names)): + prepared_ports[vars.RPC_PORT_ID + "_" + node_names[i]] = \ + PortSpec(number=vars.NOMOS_RPC_PORT_NUMBER + i, + transport_protocol=vars.NOMOS_RPC_PORT_PROTOCOL) + + prepared_ports[vars.PROMETHEUS_PORT_ID + "_" + node_names[i]] = \ + PortSpec(number=vars.PROMETHEUS_PORT_NUMBER + i, + transport_protocol=vars.PROMETHEUS_PORT_PROTOCOL) + + prepared_ports[vars.NOMOS_LIBP2P_PORT_ID + "_" + node_names[i]] = \ + PortSpec(number=vars.NOMOS_LIBP2P_PORT + i, + transport_protocol=vars.NOMOS_LIBP2P_PORT_PROTOCOL) + + return prepared_ports + + +def _prepare_nomos_config_files_in_service(node_names, artifact_ids): + prepared_files = {} + + for i in range(len(node_names)): + prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + node_names[i]] = artifact_ids[i] + + return prepared_files + + +def _add_nomos_ports_info_to_topology(network_topology, all_services_information, node_info, node_id): + nomos_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id + libp2p_port_id = vars.NOMOS_LIBP2P_PORT_ID + "_" + node_id + prometheus_port_id = vars.PROMETHEUS_PORT_ID + "_" + node_id + + network_topology[vars.GENNET_NODES_KEY][node_id][vars.PORTS_KEY] = {} + _add_nomos_port(network_topology, all_services_information, node_id, node_info, nomos_rpc_port_id) + _add_nomos_port(network_topology, all_services_information, node_id, node_info, libp2p_port_id) + _add_nomos_port(network_topology, all_services_information, node_id, node_info, prometheus_port_id) + + +def _add_nomos_port(network_topology, all_services_information, node_id, node_info, port_id): + network_topology[vars.GENNET_NODES_KEY][node_id][vars.PORTS_KEY][port_id] = \ + (all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[ + port_id].number, + all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[ + port_id].transport_protocol) diff --git a/src/nomos.star b/src/nomos.star index 3ac44c1..17c3281 100644 --- a/src/nomos.star +++ b/src/nomos.star @@ -3,58 +3,23 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports files = import_module(vars.FILE_HELPERS_MODULE) - - -def get_req(plan, service_name, port_id, endpoint, extract={}): - recipe = GetHttpRequestRecipe( - service_name=service_name, - port_id=port_id, - endpoint=endpoint, - extract=extract - ) - - response = plan.wait(recipe=recipe, - field="code", - assertion="==", - target_value=200) - - return response - - -def post_req(plan, service_name, port_id, endpoint, body, extract={}): - recipe = PostHttpRequestRecipe( - service_name=service_name, - port_id=port_id, - endpoint=endpoint, - content_type="application/json", - body=body, - extract=extract - ) - - response = plan.wait(recipe=recipe, - field="code", - assertion="==", - target_value=200) - - return response +call_protocols = import_module(vars.CALL_PROTOCOLS) def get_nomos_peer_id(plan, service_name, port_id): extract = {"peer_id": '.peer_id'} - response = get_req(plan, service_name, port_id, vars.NOMOS_NET_INFO_URL, extract) + response = call_protocols.send_http_get_req(plan, service_name, port_id, vars.NOMOS_NET_INFO_URL, extract) plan.assert(value=response["code"], assertion="==", target_value = 200) return response["extract.peer_id"] -def create_nomos_id(nomos_service_information): - nomos_service = nomos_service_information["service_info"] - - ip = nomos_service.ip_address - port = nomos_service.ports[vars.NOMOS_LIBP2P_PORT_ID].number - nomos_node_id = nomos_service_information["peer_id"] +def create_node_multiaddress(node_id, node_information): + ip = node_information[vars.IP_KEY] + port = node_information[vars.PORTS_KEY][vars.NOMOS_LIBP2P_PORT_ID + "_" + node_id][0] + nomos_node_id = node_information[vars.PEER_ID_KEY] return '"/ip4/' + str(ip) + '/tcp/' + str(port) + '/p2p/' + nomos_node_id + '"' @@ -63,10 +28,11 @@ def _merge_peer_ids(peer_ids): return "[" + ",".join(peer_ids) + "]" -def connect_nomos_to_peers(plan, service_id, port_id, peer_ids): +def connect_nomos_to_peers(plan, service_name, node_id, port_id, peer_ids): body = _merge_peer_ids(peer_ids) + port_id = port_id + "_" + node_id - response = post_req(plan, service_id, port_id, vars.NOMOS_NET_CONN_URL, body) + response = call_protocols.send_http_post_req(plan, service_name, port_id, vars.NOMOS_NET_CONN_URL, body) plan.assert(value=response["code"], assertion="==", target_value = 200) @@ -81,11 +47,18 @@ def make_service_wait(plan,service_id, time): plan.exec(exec_recipe) -def interconnect_nomos_nodes(plan, topology_information, services): +def interconnect_nomos_nodes(plan, topology_information, interconnection_batch): # Interconnect them - for nomos_service_id in services.keys(): - peers = topology_information[nomos_service_id]["static_nodes"] + nodes_in_topology = topology_information[vars.GENNET_NODES_KEY] + + for node_id in nodes_in_topology.keys(): + peers = nodes_in_topology[node_id][vars.GENNET_STATIC_NODES_KEY] + + for i in range(0, len(peers), interconnection_batch): + peer_ids = [create_node_multiaddress(peer, nodes_in_topology[peer]) + for peer in peers[i:i + interconnection_batch]] + + connect_nomos_to_peers(plan, nodes_in_topology[node_id][vars.GENNET_NODE_CONTAINER_KEY], + node_id, vars.RPC_PORT_ID, peer_ids) - peer_ids = [create_nomos_id(services[peer]) for peer in peers] - connect_nomos_to_peers(plan, nomos_service_id, vars.NOMOS_HTTP_PORT_ID, peer_ids) diff --git a/src/system_variables.star b/src/system_variables.star index 72acdd2..5a2ef8e 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -25,13 +25,15 @@ WAKUNODE_CONFIGURATION_FILE_FLAG = "--config-file=" WAKUNODE_PORT_SHIFT_FLAG = "--ports-shift=" NWAKU_ENTRYPOINT = "/usr/bin/wakunode --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0 --log-level=TRACE" GOWAKU_ENTRYPOINT = "/usr/bin/waku --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0" -NOMOS_ENTRYPOINT = ["/usr/bin/nomos-node"] +NOMOS_ENTRYPOINT = "/usr/bin/nomos-node" +NOMOS_PORT_SHIFT_FLAG = "--ports-shift=" NOMOS_CONTAINER_CONFIG_FILE_LOCATION = '/etc/nomos/config.yml' # Nomos Configuration NOMOS_IMAGE = "nomos" -NOMOS_HTTP_PORT_ID = "rpc" -NOMOS_HTTP_PORT = 8080 +NOMOS_RPC_PORT_PROTOCOL = "TCP" +NOMOS_RPC_PORT_NUMBER = 8080 +NOMOS_LIBP2P_PORT_PROTOCOL = "TCP" NOMOS_LIBP2P_PORT_ID = "libp2p" NOMOS_LIBP2P_PORT = 3000 NOMOS_SETUP_WAIT_TIME = "5" @@ -79,6 +81,7 @@ GENNET_NODE_CONTAINER_KEY = "container_id" GENNET_STATIC_NODES_KEY = "static_nodes" GENNET_GOWAKU_IMAGE_VALUE = "go-waku" GENNET_NWAKU_IMAGE_VALUE = "nim-waku" +GENNET_NOMOS_IMAGE_VALUE = "nomos" PEER_ID_KEY = "peer_id" IP_KEY = "ip_address" @@ -107,6 +110,7 @@ NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/node_bu WAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/waku_builder.star" NWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/nwaku_builder.star" GOWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/gowaku_builder.star" +NOMOS_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/nomos_builder.star" PROMETHEUS_MODULE = "github.com/logos-co/wakurtosis/src/monitoring/prometheus.star" GRAFANA_MODULE = "github.com/logos-co/wakurtosis/src/monitoring/grafana.star" ARGUMENT_PARSER_MODULE = "github.com/logos-co/wakurtosis/src/arguments_parser.star" diff --git a/wls-module/build.sh b/wls-module/build.sh old mode 100644 new mode 100755 diff --git a/wls-module/wsl_nomos.py b/wls-module/wls_nomos.py similarity index 100% rename from wls-module/wsl_nomos.py rename to wls-module/wls_nomos.py From 6cffd71084ef3cb4e57053cda852b5667d764339 Mon Sep 17 00:00:00 2001 From: Gusto Bacvinka Date: Mon, 27 Feb 2023 18:43:54 +0200 Subject: [PATCH 063/112] Remove unused variable --- src/system_variables.star | 1 - 1 file changed, 1 deletion(-) diff --git a/src/system_variables.star b/src/system_variables.star index 5a2ef8e..ee17b59 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -36,7 +36,6 @@ NOMOS_RPC_PORT_NUMBER = 8080 NOMOS_LIBP2P_PORT_PROTOCOL = "TCP" NOMOS_LIBP2P_PORT_ID = "libp2p" NOMOS_LIBP2P_PORT = 3000 -NOMOS_SETUP_WAIT_TIME = "5" NOMOS_NET_INFO_URL = "/network/info" NOMOS_NET_CONN_URL = "/network/conn" From a0789087ebe63b76ecbcfc96dcde564032d19e25 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 1 Mar 2023 17:35:32 +0100 Subject: [PATCH 064/112] WIP trying to pass new network information to wls --- config/config.json | 1 + main.star | 3 +- src/system_variables.star | 11 ++++-- src/templates.star | 71 ++++++++++++++++----------------- src/wls.star | 56 +++++++++++++++----------- wls-module/wls.py | 82 ++++++++++++++++++++------------------- 6 files changed, 121 insertions(+), 103 deletions(-) diff --git a/config/config.json b/config/config.json index 4c84733..49850cd 100644 --- a/config/config.json +++ b/config/config.json @@ -21,6 +21,7 @@ "benchmark": "False" }, "wls": { + "debug_level": "DEBUG", "simulation_time": 60, "message_rate": 10, "min_packet_size": 2, diff --git a/main.star b/main.star index 2d5acbf..c14c3d1 100644 --- a/main.star +++ b/main.star @@ -17,7 +17,6 @@ def run(plan, args): config = json.decode(config_json) kurtosis_config = config[vars.KURTOSIS_KEY] - wls_config = config[vars.WLS_KEY] interconnection_batch = kurtosis_config[vars.INTERCONNECTION_BATCH_KEY] # Load network topology @@ -35,4 +34,4 @@ def run(plan, args): nodes.interconnect_nodes(plan, network_topology, interconnection_batch) # Setup WLS & Start the Simulation - wls_service = wls.init(plan, network_topology, wls_config) + wls_service = wls.init(plan, network_topology, config_file) diff --git a/src/system_variables.star b/src/system_variables.star index ee17b59..05f81f7 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -5,6 +5,7 @@ GOWAKU_IMAGE = "gowaku" RPC_PORT_ID = "rpc" NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_generated/" +CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/" CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/" GENERAL_ENTRYPOINT = ["/bin/sh", "-c"] CONFIG_FILE_STARLARK_PARAMETER = "config_file" @@ -92,10 +93,14 @@ WLS_SERVICE_NAME = "wls" WLS_CONFIG_PATH = "/wls/config" WLS_TARGETS_PATH = "/wls/targets" WLS_TOMLS_PATH = "/wls/tomls" -WLS_CMD = ["python3", "wls.py"] +WLS_TOPOLOGY_PATH = "/wls/network_topology" +# WLS_CMD = ["python3", "wls.py"] +WLS_CMD = ["/bin/bash"] -CONTAINER_WLS_CONFIGURATION_FILE_NAME = "wls.yml" -CONTAINER_TARGETS_FILE_NAME_WLS = "targets.json" + +CONTAINER_WLS_CONFIGURATION_FILE_NAME = "config.json" +# CONTAINER_TARGETS_FILE_NAME_WLS = "targets.json" +CONTAINER_TOPOLOGY_FILE_NAME_WLS = "network_data.json" # Waku RPC methods POST_RELAY_MESSAGE_METHOD = "post_waku_v2_relay_v1_message" diff --git a/src/templates.star b/src/templates.star index cfc099e..dcf3e73 100644 --- a/src/templates.star +++ b/src/templates.star @@ -33,39 +33,40 @@ def get_prometheus_template(): return template -# WLS -def get_wls_template(): - # Traffic simulation parameters - wls_yml_template = """ - general: - debug_level : "DEBUG" - - targets_file : "./targets/targets.json" - - prng_seed : 0 - - # Simulation time in seconds - simulation_time : {{.simulation_time}} - - # Message rate in messages per second - msg_rate : {{.message_rate}} - - # Packet size in bytes - min_packet_size : {{.min_packet_size}} - max_packet_size : {{.max_packet_size}} - - # Packe size distribution - # Values: uniform and gaussian - dist_type : {{.dist_type}} - - # Fraction (of the total number of nodes) that inject traffic - # Values: [0., 1.] - emitters_fraction : {{.emitters_fraction}} - - # Inter-message times - # Values: uniform and poisson - inter_msg_type : {{.inter_msg_type}} - """ - - return wls_yml_template +## WLS +#def get_wls_template(): +# # Traffic simulation parameters +# wls_yml_template = """ +# general: +# +# debug_level : "DEBUG" +# +# targets_file : "./targets/targets.json" +# +# prng_seed : 0 +# +# # Simulation time in seconds +# simulation_time : {{.simulation_time}} +# +# # Message rate in messages per second +# msg_rate : {{.message_rate}} +# +# # Packet size in bytes +# min_packet_size : {{.min_packet_size}} +# max_packet_size : {{.max_packet_size}} +# +# # Packe size distribution +# # Values: uniform and gaussian +# dist_type : {{.dist_type}} +# +# # Fraction (of the total number of nodes) that inject traffic +# # Values: [0., 1.] +# emitters_fraction : {{.emitters_fraction}} +# +# # Inter-message times +# # Values: uniform and poisson +# inter_msg_type : {{.inter_msg_type}} +# """ +# +# return wls_yml_template diff --git a/src/wls.star b/src/wls.star index e6a8434..5de0100 100644 --- a/src/wls.star +++ b/src/wls.star @@ -5,22 +5,13 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") files = import_module(vars.FILE_HELPERS_MODULE) templates = import_module(vars.TEMPLATES_MODULE) -def create_config(plan, wls_config): - - # Traffic simulation parameters - wls_yml_template = templates.get_wls_template() - - artifact_id = plan.render_templates( - config={ - vars.CONTAINER_WLS_CONFIGURATION_FILE_NAME: struct( - template=wls_yml_template, - data=wls_config, - ) - }, - name="wls_config" +def upload_config(plan, config_file): + config_artifact = plan.upload_files( + src=config_file, + name="config_file" ) - - return artifact_id + + return config_artifact def create_targets(plan, services): @@ -44,27 +35,46 @@ def create_targets(plan, services): return artifact_id -def init(plan, services, wls_config): +def create_new_topology_information(plan, network_topology): + template = """ + {{.information}} + """ + info = {} + info["information"] = network_topology + + artifact_id = plan.render_templates( + config={ + vars.CONTAINER_TOPOLOGY_FILE_NAME_WLS: struct( + template=template, + data=info, + ) + }, + name="wls_topology" + ) + + return artifact_id + + +def init(plan, network_topology, config_file): # Generate simulation config - wls_config = create_config(plan, wls_config) + config_artifact = upload_config(plan, config_file) tomls_artifact = plan.upload_files( src = vars.NODE_CONFIG_FILE_LOCATION, name = "tomls_artifact", ) - # Create targets.json - wls_targets = create_targets(plan, services) - + # Get complete network topology information + wls_topology = create_new_topology_information(plan, network_topology) add_service_config = ServiceConfig( image=vars.WLS_IMAGE, ports={}, files={ - vars.WLS_CONFIG_PATH: wls_config, - vars.WLS_TARGETS_PATH: wls_targets, - vars.WLS_TOMLS_PATH: tomls_artifact + vars.WLS_CONFIG_PATH: config_artifact, + vars.WLS_TOMLS_PATH: tomls_artifact, + vars.WLS_TOPOLOGY_PATH: wls_topology }, cmd=vars.WLS_CMD ) diff --git a/wls-module/wls.py b/wls-module/wls.py index f28b8bf..e53654e 100644 --- a/wls-module/wls.py +++ b/wls-module/wls.py @@ -1,24 +1,19 @@ #!/usr/bin/env python3 """ Description: Wakurtosis load simulator - """ """ Dependencies """ -import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob, hashlib +import sys, logging, json, time, random, os, argparse, tomllib, glob, hashlib import requests import rtnorm -import nomos -# from pathlib import Path -# import numpy as np -# import pandas as pd -# import matplotlib.pyplot as plt -# import cloudpickle as pickle + """ Globals """ G_APP_NAME = 'WLS' G_LOG_LEVEL = 'DEBUG' -G_DEFAULT_CONFIG_FILE = './config/wls.yml' +G_DEFAULT_CONFIG_FILE = './config/config.json' +G_DEFAULT_TOPOLOGY_FILE = './network_topology/network_data.json' G_LOGGER = None """ Custom logging formatter """ @@ -47,7 +42,7 @@ def check_waku_node(node_address): 'method': 'get_waku_v2_debug_v1_info', # 'method' : 'get_waku_v2_debug_v1_version', 'id': 1, - 'params' : []} + 'params': []} G_LOGGER.info('Waku RPC: %s from %s' %(data['method'], node_address)) @@ -73,7 +68,7 @@ def get_waku_msgs(node_address, topic, cursor=None): 'jsonrpc': '2.0', 'method': 'get_waku_v2_store_v1_messages', 'id': 1, - 'params' : [topic, None, None, None, {"pageSize": 100, "cursor": cursor,"forward": True}] + 'params': [topic, None, None, None, {"pageSize": 100, "cursor": cursor,"forward": True}] } G_LOGGER.debug('Waku RPC: %s from %s' %(data['method'], node_address)) @@ -277,49 +272,56 @@ def main(): """ Parse command line args. """ parser = argparse.ArgumentParser() parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", default=G_DEFAULT_CONFIG_FILE) + parser.add_argument("-t", "--topology_file", help="Topology file", action="store_true", + default=G_DEFAULT_TOPOLOGY_FILE) + args = parser.parse_args() config_file = args.config_file + topology_file = args.topology_file """ Load config file """ try: with open(config_file, 'r') as f: - config = yaml.safe_load(f) + config = json.load(f) except Exception as e: G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() # Set loglevel from config - G_LOGGER.setLevel(config['general']['debug_level']) - handler.setLevel(config['general']['debug_level']) + wls_config = config['wls'] + + G_LOGGER.setLevel(wls_config['debug_level']) + handler.setLevel(wls_config['debug_level']) - G_LOGGER.debug(config) + G_LOGGER.debug(wls_config) G_LOGGER.info('Configuration loaded from %s' %config_file) # Set RPNG seed from config random.seed(config['general']['prng_seed']) - - """ Load targets """ + + """ Load topology """ try: - with open(config['general']['targets_file'], 'r') as read_file: - targets = json.load(read_file) + with open(topology_file, 'r') as read_file: + topology = json.load(read_file) except Exception as e: G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() - if len(targets) == 0: - G_LOGGER.error('Cannot find valid targets. Aborting.') + if len(topology) == 0: + G_LOGGER.error('Cannot find valid topology. Aborting.') sys.exit(1) - G_LOGGER.debug(targets) - G_LOGGER.info('%d targets loaded' %len(targets)) + G_LOGGER.debug(topology) + G_LOGGER.info('%d topology loaded' %len(topology)) """ Check all nodes are reachable """ - for i, target in enumerate(targets): - if not check_waku_node('http://%s/' %target): - G_LOGGER.error('Node %d (%s) is not online. Aborted.' %(i, target)) + for node_key, node_info in topology.items(): + node_address = node_info["ip_address"]+":"+node_info["ports"]["waku_rpc_"+node_key] + if not check_waku_node(f"http://{node_address}/"): + G_LOGGER.error(f"Node {node_key} is not online. Aborted.") sys.exit(1) - G_LOGGER.info('All %d Waku nodes are reachable.' %len(targets)) + G_LOGGER.info(f"All {len(topology)} nodes are reachable.") """ Load Topics """ topics = [] @@ -354,17 +356,17 @@ def main(): G_LOGGER.info('Loaded nodes topics from toml files: %s' %topics_msg_cnt.keys()) """ Define the subset of emitters """ - num_emitters = int(len(targets) * config['general']['emitters_fraction']) + num_emitters = int(len(topology) * wls_config['emitters_fraction']) if num_emitters == 0: G_LOGGER.error('The number of emitters must be greater than zero. Try increasing the fraction of emitters.') sys.exit() """ NOTE: Emitters will only inject topics they are subscribed to """ - emitters_indices = random.sample(range(len(targets)), num_emitters) - emitters = [targets[i] for i in emitters_indices] + emitters_indices = random.sample(range(len(topology)), num_emitters) + emitters = [topology[i] for i in emitters_indices] emitters_topics = [topics[i] for i in emitters_indices] - # emitters = random.sample(targets, num_emitters) - G_LOGGER.info('Selected %d emitters out of %d total nodes' %(len(emitters), len(targets))) + # emitters = random.sample(topology, num_emitters) + G_LOGGER.info('Selected %d emitters out of %d total nodes' %(len(emitters), len(topology))) """ Start simulation """ s_time = time.time() @@ -372,13 +374,13 @@ def main(): next_time_to_msg = 0 msgs_dict = {} - G_LOGGER.info('Starting a simulation of %d seconds ...' %config['general']['simulation_time']) + G_LOGGER.info('Starting a simulation of %d seconds ...' %wls_config['simulation_time']) while True: # Check end condition elapsed_s = time.time() - s_time - if elapsed_s >= config['general']['simulation_time']: + if elapsed_s >= wls_config['simulation_time']: G_LOGGER.info('Simulation ended. Sent %d messages in %ds.' %(len(msgs_dict), elapsed_s)) break @@ -401,10 +403,9 @@ def main(): emitter_topic = random.choice(emitter_topics) G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' %(emitter_topic, node_address)) - - payload, size = make_payload_dist(dist_type=config['general']['dist_type'].lower(), min_size=config['general']['min_packet_size'], max_size=config['general']['max_packet_size']) - response, elapsed, waku_msg, ts = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) + payload, size = make_payload_dist(dist_type=wls_config['dist_type'].lower(), min_size=wls_config['min_packet_size'], max_size=wls_config['max_packet_size']) + response, elapsed, waku_msg, ts = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) if response['result']: msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() if msg_hash in msgs_dict: @@ -413,20 +414,21 @@ def main(): msgs_dict[msg_hash] = {'ts' : ts, 'injection_point' : node_address, 'nonce' : len(msgs_dict), 'topic' : emitter_topic, 'payload' : payload, 'payload_size' : size} # Compute the time to next message - next_time_to_msg = get_next_time_to_msg(config['general']['inter_msg_type'], config['general']['msg_rate'], config['general']['simulation_time']) + next_time_to_msg = get_next_time_to_msg(wls_config['inter_msg_type'], wls_config['msg_rate'], wls_config['simulation_time']) G_LOGGER.debug('Next message will happen in %d ms.' %(next_time_to_msg * 1000.0)) last_msg_time = time.time() elapsed_s = time.time() - s_time - + # Save messages for further analysis with open('./messages.json', 'w') as f: f.write(json.dumps(msgs_dict, indent=4)) """ We are done """ G_LOGGER.info('Ended') - + + if __name__ == "__main__": main() From 3fdc0f1602ce10a751a5e0109126ddd10f726e19 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 2 Mar 2023 15:15:43 +0100 Subject: [PATCH 065/112] Changed wls to work with kurtosis information --- src/node_builders/node_builders.star | 2 +- src/system_variables.star | 3 +- src/wls.star | 2 +- wls-module/wls.py | 114 ++++++++++++++++----------- wls-module/wls_nomos.py | 2 +- 5 files changed, 74 insertions(+), 49 deletions(-) diff --git a/src/node_builders/node_builders.star b/src/node_builders/node_builders.star index 207ed48..1063b6a 100644 --- a/src/node_builders/node_builders.star +++ b/src/node_builders/node_builders.star @@ -54,7 +54,7 @@ def instantiate_services(plan, network_topology, testing): "peer_id" : peer id of the node, as string, "ip_address": ip of the container that has the node, as string "ports": { - "waku_rpc_node_0": (port_number, port_protocol) + "rpc_node_0": (port_number, port_protocol) "libp2p_node_0": (port_number, port_protocol), "prometheus_node_0": (port_number, port_protocol) } diff --git a/src/system_variables.star b/src/system_variables.star index 05f81f7..edac592 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -94,8 +94,7 @@ WLS_CONFIG_PATH = "/wls/config" WLS_TARGETS_PATH = "/wls/targets" WLS_TOMLS_PATH = "/wls/tomls" WLS_TOPOLOGY_PATH = "/wls/network_topology" -# WLS_CMD = ["python3", "wls.py"] -WLS_CMD = ["/bin/bash"] +WLS_CMD = ["python3", "wls.py"] CONTAINER_WLS_CONFIGURATION_FILE_NAME = "config.json" diff --git a/src/wls.star b/src/wls.star index 5de0100..3f2e5a5 100644 --- a/src/wls.star +++ b/src/wls.star @@ -40,7 +40,7 @@ def create_new_topology_information(plan, network_topology): {{.information}} """ info = {} - info["information"] = network_topology + info["information"] = json.encode(network_topology) artifact_id = plan.render_templates( config={ diff --git a/wls-module/wls.py b/wls-module/wls.py index e53654e..7337631 100644 --- a/wls-module/wls.py +++ b/wls-module/wls.py @@ -314,59 +314,83 @@ def main(): G_LOGGER.debug(topology) G_LOGGER.info('%d topology loaded' %len(topology)) - - """ Check all nodes are reachable """ - for node_key, node_info in topology.items(): - node_address = node_info["ip_address"]+":"+node_info["ports"]["waku_rpc_"+node_key] + + + """ Check all nodes are reachable + for node_key, node_info in topology["nodes"].items(): + node_address = node_info["ip_address"]+":"+str(node_info["ports"]["rpc_"+node_key][0]) if not check_waku_node(f"http://{node_address}/"): G_LOGGER.error(f"Node {node_key} is not online. Aborted.") sys.exit(1) G_LOGGER.info(f"All {len(topology)} nodes are reachable.") + """ + # Dictionary to count messages of every topic being sent + topics_msg_cnt = {} """ Load Topics """ - topics = [] - try: - tomls = glob.glob('./tomls/*.toml') - tomls.sort() - for toml_file in tomls: - with open(toml_file, mode='rb') as read_file: + nodes = topology["nodes"] + for node, node_info in nodes.items(): + try: + with open("tomls/"+node_info["node_config"], mode='rb') as read_file: toml_config = tomllib.load(read_file) - node_topics_str = toml_config['topics'] - - # Make sure we are tokenising the topics depending if Nim-Waku or Go-Waku - # Ideally we should also pass the network_data.json so we can check directly the type of node - if isinstance(node_topics_str, list): - - # Parses Go Waku style topics list: ["topic_a", "topic_b"] - topics.append(node_topics_str) - else: - # Parses Nim Waku style topics list: "topic_a" "topic_b" - topics.append(list(node_topics_str.split(' '))) + if node_info["image"] == "nim-waku": + topics = list(toml_config["topics"].split(" ")) + elif node_info["image"] == "go-waku": + topics = toml_config["topics"] + + for topic in topics: + topics_msg_cnt[topic] = 0 + + nodes[node]["topics"] = topics + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + + # topics = [] + #try: + # # tomls = glob.glob('./tomls/*.toml') + # tomls = glob.glob('topology_generated/*.toml') + # tomls.sort() + # for toml_file in tomls: + # with open(toml_file, mode='rb') as read_file: + # toml_config = tomllib.load(read_file) + # node_topics_str = toml_config['topics'] + # + # # Make sure we are tokenising the topics depending if Nim-Waku or Go-Waku + # # Ideally we should also pass the network_data.json so we can check directly the type of node + # if isinstance(node_topics_str, list): + # + # # Parses Go Waku style topics list: ["topic_a", "topic_b"] + # topics.append(node_topics_str) + # else: + # # Parses Nim Waku style topics list: "topic_a" "topic_b" + # topics.append(list(node_topics_str.split(' '))) + # + # except Exception as e: + # G_LOGGER.error('%s: %s' % (e.__doc__, e)) + # sys.exit() - except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() - # Dictionary to count messages of every topic being sent - topics_msg_cnt = {} - for node_topics in topics: - for topic in node_topics: - topics_msg_cnt[topic] = 0 G_LOGGER.info('Loaded nodes topics from toml files: %s' %topics_msg_cnt.keys()) """ Define the subset of emitters """ - num_emitters = int(len(topology) * wls_config['emitters_fraction']) + num_emitters = int(len(nodes) * wls_config["emitters_fraction"]) + if num_emitters == 0: G_LOGGER.error('The number of emitters must be greater than zero. Try increasing the fraction of emitters.') sys.exit() - """ NOTE: Emitters will only inject topics they are subscribed to """ - emitters_indices = random.sample(range(len(topology)), num_emitters) - emitters = [topology[i] for i in emitters_indices] - emitters_topics = [topics[i] for i in emitters_indices] - # emitters = random.sample(topology, num_emitters) - G_LOGGER.info('Selected %d emitters out of %d total nodes' %(len(emitters), len(topology))) + random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) + G_LOGGER.info('Selected %d emitters out of %d total nodes' % (len(random_emitters), len(nodes))) + + #""" NOTE: Emitters will only inject topics they are subscribed to """ + #emitters_indices = random.sample(range(len(topology["nodes"])), num_emitters) + #emitters = [topology[i] for i in emitters_indices] + #emitters_topics = [topics[i] for i in emitters_indices] + ## emitters = random.sample(topology, num_emitters) + #G_LOGGER.info('Selected %d emitters out of %d total nodes' %(len(emitters), len(topology))) """ Start simulation """ s_time = time.time() @@ -380,6 +404,7 @@ def main(): # Check end condition elapsed_s = time.time() - s_time + if elapsed_s >= wls_config['simulation_time']: G_LOGGER.info('Simulation ended. Sent %d messages in %ds.' %(len(msgs_dict), elapsed_s)) break @@ -393,28 +418,29 @@ def main(): G_LOGGER.debug('Time Δ: %.6f ms.' %((msg_elapsed - next_time_to_msg) * 1000.0)) # Pick an emitter at random from the emitters list - emitter_idx = random.choice(emitters_indices) + # emitter_idx = random.choice(emitters_indices) + random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) - node_address = 'http://%s/' %emitters[emitter_idx] - - emitter_topics = emitters_topics[emitter_idx] + emitter_address = f"http://{random_emitter_info['ip_address']}:{random_emitter_info['ports']['rpc_'+random_emitter][0]}/" + emitter_topics = random_emitter_info["topics"] # Pick a topic at random from the topics supported by the emitter emitter_topic = random.choice(emitter_topics) - G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' %(emitter_topic, node_address)) + G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' %(emitter_topic, emitter_address)) payload, size = make_payload_dist(dist_type=wls_config['dist_type'].lower(), min_size=wls_config['min_packet_size'], max_size=wls_config['max_packet_size']) - response, elapsed, waku_msg, ts = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) + response, elapsed, waku_msg, ts = send_waku_msg(emitter_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) + if response['result']: msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() if msg_hash in msgs_dict: G_LOGGER.error('Hash collision. %s already exists in dictionary' %msg_hash) continue - msgs_dict[msg_hash] = {'ts' : ts, 'injection_point' : node_address, 'nonce' : len(msgs_dict), 'topic' : emitter_topic, 'payload' : payload, 'payload_size' : size} + msgs_dict[msg_hash] = {'ts' : ts, 'injection_point' : emitter_address, 'nonce' : len(msgs_dict), 'topic' : emitter_topic, 'payload' : payload, 'payload_size' : size} # Compute the time to next message - next_time_to_msg = get_next_time_to_msg(wls_config['inter_msg_type'], wls_config['msg_rate'], wls_config['simulation_time']) + next_time_to_msg = get_next_time_to_msg(wls_config['inter_msg_type'], wls_config['message_rate'], wls_config['simulation_time']) G_LOGGER.debug('Next message will happen in %d ms.' %(next_time_to_msg * 1000.0)) last_msg_time = time.time() diff --git a/wls-module/wls_nomos.py b/wls-module/wls_nomos.py index 4ec29a9..3dd7a8d 100644 --- a/wls-module/wls_nomos.py +++ b/wls-module/wls_nomos.py @@ -18,7 +18,7 @@ """ Globals """ G_APP_NAME = 'WLS' G_LOG_LEVEL = 'DEBUG' -G_DEFAULT_CONFIG_FILE = './config/wsl.yml' +G_DEFAULT_CONFIG_FILE = './config/config.json' G_DEFAULT_TOPOLOGY_FILE = './tomls/network_data.json' G_LOGGER = None From 013a799daca429bc5a0bfbb8d10ddfaa600e3638 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Fri, 3 Mar 2023 01:07:40 +0100 Subject: [PATCH 066/112] WIP: Starting clean code to refactor as it already works for several nodes per container. --- config/config.json | 4 ++-- wls-module/wls.py | 46 +--------------------------------------------- 2 files changed, 3 insertions(+), 47 deletions(-) diff --git a/config/config.json b/config/config.json index 49850cd..bb6a805 100644 --- a/config/config.json +++ b/config/config.json @@ -9,11 +9,11 @@ "interconnection_batch": 10 }, "gennet": { - "num_nodes": 3, + "num_nodes": 10, "num_topics": 1, "num_partitions": 1, "num_subnets": 1, - "container_size": "1", + "container_size": "3", "node_type_distribution": { "nwaku":100, "gowaku":0, "nomos":0 }, "node_type": "desktop", "network_type": "newmanwattsstrogatz", diff --git a/wls-module/wls.py b/wls-module/wls.py index 7337631..fc9f0be 100644 --- a/wls-module/wls.py +++ b/wls-module/wls.py @@ -315,16 +315,6 @@ def main(): G_LOGGER.debug(topology) G_LOGGER.info('%d topology loaded' %len(topology)) - - """ Check all nodes are reachable - for node_key, node_info in topology["nodes"].items(): - node_address = node_info["ip_address"]+":"+str(node_info["ports"]["rpc_"+node_key][0]) - if not check_waku_node(f"http://{node_address}/"): - G_LOGGER.error(f"Node {node_key} is not online. Aborted.") - sys.exit(1) - G_LOGGER.info(f"All {len(topology)} nodes are reachable.") - """ - # Dictionary to count messages of every topic being sent topics_msg_cnt = {} """ Load Topics """ @@ -341,37 +331,11 @@ def main(): for topic in topics: topics_msg_cnt[topic] = 0 + # Load topics into topology for easier access nodes[node]["topics"] = topics except Exception as e: G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() - - - # topics = [] - #try: - # # tomls = glob.glob('./tomls/*.toml') - # tomls = glob.glob('topology_generated/*.toml') - # tomls.sort() - # for toml_file in tomls: - # with open(toml_file, mode='rb') as read_file: - # toml_config = tomllib.load(read_file) - # node_topics_str = toml_config['topics'] - # - # # Make sure we are tokenising the topics depending if Nim-Waku or Go-Waku - # # Ideally we should also pass the network_data.json so we can check directly the type of node - # if isinstance(node_topics_str, list): - # - # # Parses Go Waku style topics list: ["topic_a", "topic_b"] - # topics.append(node_topics_str) - # else: - # # Parses Nim Waku style topics list: "topic_a" "topic_b" - # topics.append(list(node_topics_str.split(' '))) - # - # except Exception as e: - # G_LOGGER.error('%s: %s' % (e.__doc__, e)) - # sys.exit() - - G_LOGGER.info('Loaded nodes topics from toml files: %s' %topics_msg_cnt.keys()) @@ -385,13 +349,6 @@ def main(): random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) G_LOGGER.info('Selected %d emitters out of %d total nodes' % (len(random_emitters), len(nodes))) - #""" NOTE: Emitters will only inject topics they are subscribed to """ - #emitters_indices = random.sample(range(len(topology["nodes"])), num_emitters) - #emitters = [topology[i] for i in emitters_indices] - #emitters_topics = [topics[i] for i in emitters_indices] - ## emitters = random.sample(topology, num_emitters) - #G_LOGGER.info('Selected %d emitters out of %d total nodes' %(len(emitters), len(topology))) - """ Start simulation """ s_time = time.time() last_msg_time = 0 @@ -401,7 +358,6 @@ def main(): G_LOGGER.info('Starting a simulation of %d seconds ...' %wls_config['simulation_time']) while True: - # Check end condition elapsed_s = time.time() - s_time From db6996ded1f6b2b433b7d090acaeab521bef8644 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Fri, 3 Mar 2023 18:47:34 +0100 Subject: [PATCH 067/112] First refactor after rebase with Analysis --- wls-module/wls.py | 299 +++++++++++++++++----------------------------- 1 file changed, 111 insertions(+), 188 deletions(-) diff --git a/wls-module/wls.py b/wls-module/wls.py index fc9f0be..e515504 100644 --- a/wls-module/wls.py +++ b/wls-module/wls.py @@ -1,24 +1,28 @@ -#!/usr/bin/env python3 -""" -Description: Wakurtosis load simulator -""" - """ Dependencies """ -import sys, logging, json, time, random, os, argparse, tomllib, glob, hashlib +import argparse +import hashlib +import json +import logging +import random +import sys +import time +import tomllib import requests -import rtnorm +# Project Imports +import rtnorm """ Globals """ G_APP_NAME = 'WLS' G_LOG_LEVEL = 'DEBUG' G_DEFAULT_CONFIG_FILE = './config/config.json' G_DEFAULT_TOPOLOGY_FILE = './network_topology/network_data.json' -G_LOGGER = None +G_LOGGER = logging.getLogger(G_APP_NAME) +handler = logging.StreamHandler(sys.stdout) -""" Custom logging formatter """ + +# Custom logging formatter class CustomFormatter(logging.Formatter): - # Set different formats for every logging level time_name_stamp = "[%(asctime)s.%(msecs)03d] [" + G_APP_NAME + "]" FORMATS = { @@ -35,81 +39,8 @@ def format(self, record): formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') return formatter.format(record) -def check_waku_node(node_address): - - data = { - 'jsonrpc': '2.0', - 'method': 'get_waku_v2_debug_v1_info', - # 'method' : 'get_waku_v2_debug_v1_version', - 'id': 1, - 'params': []} - - G_LOGGER.info('Waku RPC: %s from %s' %(data['method'], node_address)) - - try: - response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'}) - except Exception as e: - G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return False - - try: - response_obj = response.json() - except Exception as e: - G_LOGGER.debug('%s: %s' % (e.__doc__, e)) - return False - - G_LOGGER.debug('Response from %s: %s' %(node_address, response_obj)) - - return True - -def get_waku_msgs(node_address, topic, cursor=None): - - data = { - 'jsonrpc': '2.0', - 'method': 'get_waku_v2_store_v1_messages', - 'id': 1, - 'params': [topic, None, None, None, {"pageSize": 100, "cursor": cursor,"forward": True}] - } - - G_LOGGER.debug('Waku RPC: %s from %s' %(data['method'], node_address)) - - s_time = time.time() - - response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'}) - - elapsed_ms =(time.time() - s_time) * 1000 - - response_obj = response.json() - - # G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms)) - - return response_obj, elapsed_ms - -# https://rfc.vac.dev/spec/16/#get_waku_v2_relay_v1_messages -def get_last_waku_msgs(node_address, topic): - - data = { - 'jsonrpc': '2.0', - 'method': 'get_waku_v2_relay_v1_messages', - 'id': 1, - 'params' : [topic]} - - G_LOGGER.debug('Waku RPC: %s from %s' %(data['method'], node_address)) - - s_time = time.time() - - response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'}) - - elapsed_ms =(time.time() - s_time) * 1000 - - response_obj = response.json() - - # G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms)) - - return response_obj, elapsed_ms def send_waku_msg(node_address, topic, payload, nonce=1): - # waku_msg = { # 'nonce' : nonce, # 'timestamp' : time.time_ns(), @@ -147,15 +78,18 @@ def send_waku_msg(node_address, topic, payload, nonce=1): return response_obj, elapsed_ms, json.dumps(waku_msg), my_payload['ts'] + # Generate a random interval using a Poisson distribution def poisson_interval(rate): return random.expovariate(rate) + def make_payload(size): payload = hex(random.getrandbits(4*size)) G_LOGGER.debug('Payload of size %d bytes: %s' %(size, payload)) return payload + def make_payload_dist(dist_type, min_size, max_size): # Check if min and max packet sizes are the same @@ -189,23 +123,6 @@ def make_payload_dist(dist_type, min_size, max_size): return '0x00', 0 -def parse_targets(enclave_dump_path, waku_port=8545): - - targets = [] - - G_LOGGER.info('Extracting Waku node addresses from Kurtosus enclance dump in %s' %enclave_dump_path) - - for path_obj in os.walk(enclave_dump_path): - if 'waku_' in path_obj[0]: - with open(path_obj[0] + '/spec.json', "r") as read_file: - spec_obj = json.load(read_file) - network_settings = spec_obj['NetworkSettings'] - waku_address = network_settings['Ports']['%d/tcp' %waku_port] - targets.append('%s:%s' %(waku_address[0]['HostIp'], waku_address[0]['HostPort'])) - - G_LOGGER.info('Parsed %d Waku nodes' %len(targets)) - - return targets def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): @@ -218,68 +135,35 @@ def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' %inter_msg_type) sys.exit() -def get_all_messages_from_node_from_topic(node_address, topic): - - page_cnt = 0 - msg_cnt = 0 - - # Retrieve the first page - response, elapsed = get_waku_msgs(node_address, topic) - if 'error' in response: - G_LOGGER.error(response['error']) - return 0 - - messages = response['result']['messages'] - msg_cnt += len(messages) - G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic)) - - for msg_idx, msg in enumerate(messages): - # Decode the payload - payload_obj = json.loads(''.join(map(chr, msg['payload']))) - - # Retrieve further pages - while(response['result']['pagingOptions']): - page_cnt += 1 - cursor = response['result']['pagingOptions']['cursor'] - index = {"digest" : cursor['digest'], "receivedTime" : cursor['receiverTime']} - response, elapsed = get_waku_msgs(node_address, topic, cursor) - if 'error' in response: - G_LOGGER.error(response['error']) - break - - messages = response['result']['messages'] - msg_cnt += len(messages) - G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic)) - - for msg_idx, msg in enumerate(messages): - # Decode the payload - payload_obj = json.loads(''.join(map(chr, msg['payload']))) - - return msg_cnt -def main(): - - global G_LOGGER - +def innit_logging(): """ Init Logging """ - G_LOGGER = logging.getLogger(G_APP_NAME) - handler = logging.StreamHandler(sys.stdout) handler.setFormatter(CustomFormatter()) G_LOGGER.addHandler(handler) - G_LOGGER.info('Started') + +def configure_logging(wls_config, config_file): + G_LOGGER.setLevel(wls_config['debug_level']) + handler.setLevel(wls_config['debug_level']) + G_LOGGER.debug(wls_config) + G_LOGGER.info('Configuration loaded from %s' %config_file) + + +def parse_cli(): """ Parse command line args. """ parser = argparse.ArgumentParser() - parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", default=G_DEFAULT_CONFIG_FILE) + parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", + default=G_DEFAULT_CONFIG_FILE) parser.add_argument("-t", "--topology_file", help="Topology file", action="store_true", default=G_DEFAULT_TOPOLOGY_FILE) args = parser.parse_args() - config_file = args.config_file - topology_file = args.topology_file - + return args + + +def load_config_file(config_file): """ Load config file """ try: with open(config_file, 'r') as f: @@ -287,19 +171,11 @@ def main(): except Exception as e: G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() - - # Set loglevel from config - wls_config = config['wls'] - - G_LOGGER.setLevel(wls_config['debug_level']) - handler.setLevel(wls_config['debug_level']) - G_LOGGER.debug(wls_config) - G_LOGGER.info('Configuration loaded from %s' %config_file) + return config - # Set RPNG seed from config - random.seed(config['general']['prng_seed']) +def load_topology(topology_file): """ Load topology """ try: with open(topology_file, 'r') as read_file: @@ -313,56 +189,64 @@ def main(): sys.exit(1) G_LOGGER.debug(topology) - G_LOGGER.info('%d topology loaded' %len(topology)) + G_LOGGER.info('%d topology loaded' % len(topology)) - # Dictionary to count messages of every topic being sent - topics_msg_cnt = {} + return topology + + +def load_topics_into_topology(topology): """ Load Topics """ nodes = topology["nodes"] for node, node_info in nodes.items(): try: - with open("tomls/"+node_info["node_config"], mode='rb') as read_file: + with open("tomls/" + node_info["node_config"], mode='rb') as read_file: toml_config = tomllib.load(read_file) if node_info["image"] == "nim-waku": topics = list(toml_config["topics"].split(" ")) elif node_info["image"] == "go-waku": topics = toml_config["topics"] - for topic in topics: - topics_msg_cnt[topic] = 0 - # Load topics into topology for easier access nodes[node]["topics"] = topics except Exception as e: G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() - - G_LOGGER.info('Loaded nodes topics from toml files: %s' %topics_msg_cnt.keys()) + G_LOGGER.info('Loaded nodes topics from toml files') + + +def get_random_emitters(topology, wls_config): + nodes = topology["nodes"] """ Define the subset of emitters """ num_emitters = int(len(nodes) * wls_config["emitters_fraction"]) if num_emitters == 0: - G_LOGGER.error('The number of emitters must be greater than zero. Try increasing the fraction of emitters.') + G_LOGGER.error( + 'The number of emitters must be greater than zero. Try increasing the fraction of emitters.') sys.exit() random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) G_LOGGER.info('Selected %d emitters out of %d total nodes' % (len(random_emitters), len(nodes))) + return random_emitters + + +def start_traffic_inyection(wls_config, random_emitters): """ Start simulation """ s_time = time.time() last_msg_time = 0 next_time_to_msg = 0 msgs_dict = {} - G_LOGGER.info('Starting a simulation of %d seconds ...' %wls_config['simulation_time']) + G_LOGGER.info('Starting a simulation of %d seconds ...' % wls_config['simulation_time']) while True: # Check end condition elapsed_s = time.time() - s_time - if elapsed_s >= wls_config['simulation_time']: - G_LOGGER.info('Simulation ended. Sent %d messages in %ds.' %(len(msgs_dict), elapsed_s)) + if elapsed_s >= wls_config['simulation_time']: + G_LOGGER.info( + 'Simulation ended. Sent %d messages in %ds.' % (len(msgs_dict), elapsed_s)) break # Send message @@ -371,38 +255,49 @@ def main(): if msg_elapsed <= next_time_to_msg: continue - G_LOGGER.debug('Time Δ: %.6f ms.' %((msg_elapsed - next_time_to_msg) * 1000.0)) - + G_LOGGER.debug('Time Δ: %.6f ms.' % ((msg_elapsed - next_time_to_msg) * 1000.0)) + # Pick an emitter at random from the emitters list - # emitter_idx = random.choice(emitters_indices) random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) - - emitter_address = f"http://{random_emitter_info['ip_address']}:{random_emitter_info['ports']['rpc_'+random_emitter][0]}/" + + emitter_address = f"http://{random_emitter_info['ip_address']}:{random_emitter_info['ports']['rpc_' + random_emitter][0]}/" emitter_topics = random_emitter_info["topics"] # Pick a topic at random from the topics supported by the emitter emitter_topic = random.choice(emitter_topics) - G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' %(emitter_topic, emitter_address)) + G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' % ( + emitter_topic, emitter_address)) - payload, size = make_payload_dist(dist_type=wls_config['dist_type'].lower(), min_size=wls_config['min_packet_size'], max_size=wls_config['max_packet_size']) - response, elapsed, waku_msg, ts = send_waku_msg(emitter_address, topic=emitter_topic, payload=payload, nonce=len(msgs_dict)) + payload, size = make_payload_dist(dist_type=wls_config['dist_type'].lower(), + min_size=wls_config['min_packet_size'], + max_size=wls_config['max_packet_size']) + response, elapsed, waku_msg, ts = send_waku_msg(emitter_address, topic=emitter_topic, + payload=payload, nonce=len(msgs_dict)) if response['result']: msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() if msg_hash in msgs_dict: - G_LOGGER.error('Hash collision. %s already exists in dictionary' %msg_hash) + G_LOGGER.error('Hash collision. %s already exists in dictionary' % msg_hash) continue - msgs_dict[msg_hash] = {'ts' : ts, 'injection_point' : emitter_address, 'nonce' : len(msgs_dict), 'topic' : emitter_topic, 'payload' : payload, 'payload_size' : size} - + msgs_dict[msg_hash] = {'ts': ts, 'injection_point': emitter_address, + 'nonce': len(msgs_dict), 'topic': emitter_topic, + 'payload': payload, 'payload_size': size} + # Compute the time to next message - next_time_to_msg = get_next_time_to_msg(wls_config['inter_msg_type'], wls_config['message_rate'], wls_config['simulation_time']) - G_LOGGER.debug('Next message will happen in %d ms.' %(next_time_to_msg * 1000.0)) - + next_time_to_msg = get_next_time_to_msg(wls_config['inter_msg_type'], + wls_config['message_rate'], + wls_config['simulation_time']) + G_LOGGER.debug('Next message will happen in %d ms.' % (next_time_to_msg * 1000.0)) + last_msg_time = time.time() - + elapsed_s = time.time() - s_time + return msgs_dict + + +def save_messages(msgs_dict): # Save messages for further analysis with open('./messages.json', 'w') as f: f.write(json.dumps(msgs_dict, indent=4)) @@ -411,6 +306,34 @@ def main(): G_LOGGER.info('Ended') -if __name__ == "__main__": +def main(): + innit_logging() + + args = parse_cli() + + config_file = args.config_file + topology_file = args.topology_file + + config = load_config_file(config_file) + # Set loglevel from config + wls_config = config['wls'] + + configure_logging(wls_config, config_file) + + # Set RPNG seed from config + random.seed(config['general']['prng_seed']) + + topology = load_topology(topology_file) + + load_topics_into_topology(topology) + + random_emitters = get_random_emitters(topology, wls_config) + + msgs_dict = start_traffic_inyection(wls_config, random_emitters) + + save_messages(msgs_dict) + + +if __name__ == "__main__": main() From 1236ae679bf52396d879bbfd214499f1e676f100 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Fri, 3 Mar 2023 19:47:56 +0100 Subject: [PATCH 068/112] Refactor and restructured wls --- src/tests/test_wls.star | 0 wls-module/{dockerfile => Dockerfile} | 0 wls-module/__init__.py | 0 wls-module/build.sh | 1 - wls-module/config/wls.yml | 30 --- wls-module/src/__init__.py | 0 wls-module/{ => src}/nomos.py | 0 .../__pycache__/__init__.cpython-311.pyc | Bin 0 -> 181 bytes .../__pycache__/test_wls.cpython-311.pyc | Bin 0 -> 15017 bytes wls-module/src/utils/__init__.py | 0 wls-module/src/utils/files.py | 39 ++++ wls-module/src/utils/logger.py | 45 +++++ wls-module/src/utils/payloads.py | 47 +++++ wls-module/{ => src/utils}/rtnorm.py | 0 wls-module/src/utils/waku_messaging.py | 85 +++++++++ wls-module/src/wls.py | 175 ++++++++++++++++++ wls-module/{ => src}/wls_nomos.py | 4 +- 17 files changed, 392 insertions(+), 34 deletions(-) create mode 100644 src/tests/test_wls.star rename wls-module/{dockerfile => Dockerfile} (100%) create mode 100644 wls-module/__init__.py delete mode 100644 wls-module/config/wls.yml create mode 100644 wls-module/src/__init__.py rename wls-module/{ => src}/nomos.py (100%) create mode 100644 wls-module/src/tests/__pycache__/__init__.cpython-311.pyc create mode 100644 wls-module/src/tests/__pycache__/test_wls.cpython-311.pyc create mode 100644 wls-module/src/utils/__init__.py create mode 100644 wls-module/src/utils/files.py create mode 100644 wls-module/src/utils/logger.py create mode 100644 wls-module/src/utils/payloads.py rename wls-module/{ => src/utils}/rtnorm.py (100%) create mode 100644 wls-module/src/utils/waku_messaging.py create mode 100644 wls-module/src/wls.py rename wls-module/{ => src}/wls_nomos.py (97%) diff --git a/src/tests/test_wls.star b/src/tests/test_wls.star new file mode 100644 index 0000000..e69de29 diff --git a/wls-module/dockerfile b/wls-module/Dockerfile similarity index 100% rename from wls-module/dockerfile rename to wls-module/Dockerfile diff --git a/wls-module/__init__.py b/wls-module/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/wls-module/build.sh b/wls-module/build.sh index efdcccc..37a68df 100755 --- a/wls-module/build.sh +++ b/wls-module/build.sh @@ -1,5 +1,4 @@ #!/bin/sh -# pip freeze > requirements.txt image_id=$(docker images -q wls:0.0.1) echo $image_id docker image rm -f $image_id diff --git a/wls-module/config/wls.yml b/wls-module/config/wls.yml deleted file mode 100644 index 2638b41..0000000 --- a/wls-module/config/wls.yml +++ /dev/null @@ -1,30 +0,0 @@ -general: - - debug_level : "DEBUG" - - targets_file : "./targets/targets.json" - - prng_seed : 0 - - # Simulation time in seconds - simulation_time : 100 - - # Message rate in messages per second - msg_rate : 10 - - # Packet size in bytes - min_packet_size : 2 - max_packet_size : 1024 - - # Packe size distribution - # Values: uniform and gaussian - dist_type : "gaussian" - - # Fraction (of the total number of nodes) that inject traffic - # Values: [0., 1.] - emitters_fraction : 0.5 - - # Inter-message times - # Values: uniform and poisson - inter_msg_type : "uniform" - diff --git a/wls-module/src/__init__.py b/wls-module/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/wls-module/nomos.py b/wls-module/src/nomos.py similarity index 100% rename from wls-module/nomos.py rename to wls-module/src/nomos.py diff --git a/wls-module/src/tests/__pycache__/__init__.cpython-311.pyc b/wls-module/src/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..224946aefb10f7ec1b46de1d52e2844e10d67140 GIT binary patch literal 181 zcmZ3^%ge<81UF;ZQ$X}%5CH>>P{wCAAY(d13PUi1CZpd)=dU}j`w{J;PsikN|70BgN2?*IS* literal 0 HcmV?d00001 diff --git a/wls-module/src/tests/__pycache__/test_wls.cpython-311.pyc b/wls-module/src/tests/__pycache__/test_wls.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a2e5b02a48591171ce3960cac1563dc7043f61a GIT binary patch literal 15017 zcmdU0TWl0rdaml~>aKQI+iqXlHW=^)8`@xljjs#~*kHgjj2Q;Q3?3$xrmJl?w7Wf3 zskSO4%c}q1a39BR`q-Y;_h-Ac!H1bpa z|5RV9`eH6aW_Qcw&!^6{>ip+B|9}2-{Er?_2?f`G|F26tyOW~+2OrWGn+iO89|E^1 zkrL?$HE#T+$7y)BMQqc|I5TY@x6>HUMA&J^xPzu_)JaOTe?p0Dh`QoTS9q79sITDH zxW+k=K1cZ-Pw;WnZ_{1VvEWr9HWP~K>`XujPWfq_xfYRwsRHDWE%oqh5dycU5QQ%? zZVNFY{Rwp!;=W2{pr~=XX#0d3XGI3kfv+>}6j?w{bO5?UCt!)l0lGyOATO2xdPFy1 zsmKGCi5|dmu@ulNmH}3X<$#r<7qCjK0IU`(0exZ>V2xM}SS$Jf>%j1b#DWN`Oxji}0 zoTHZLQNLZXL%PmF7fFg<5)j2>kx(EyI|H4n#Af_-oXYKBycjB2d15Mt5D4FSf?+>XM+%4ERzkk107q?Wj__@276C+X|ee7!F$ zv)>-my3ZuJcQo!DHGTi!VzUS!L#tc?Y(mA1(DDgPs0d=)20djEuAtcT>a4_$of$nb za?+3&N;6+*)rEwn%x&9DWJ&{6Wu{qTY_TtKCh62hv+ z5m}|uTc9%e3_zM)LrLE*&A01i1odLv!Y?N(?KT8%p~8W``Imwdd+M^vt%ss1CBp!U z%Br(MijWwuU{)3C=Vq#tlI_=Q*O`mdHJYX_Ld_Wy3=~yn>dUfe&e0x{q?iO|18=CU zcLQNLB5xJ*8A}cz=^gJVE>J*)zeYrcf1$&rUV8hfrE4mIrYl}){n+Tfz$>Go@ZtFucsoefV1CPTVA z8WX`lBj%WH$81?EZpkTO7ZTH2!tc^~DWuFwQQ_l2WHw}&c)Wn^2}X0oVpwt*792gT zyKzMr5{)&Xmn;ucZ%In&F4B59IuQfQ99~4mE(Sv!E`vyhAz^Mph~fw27!vsza6u;X zF!o@o= z4TCDbZLOns#iO>LfS|^=J#N~&a$c=H26ud9t+G0Pd~NHt`0>wupPv35U#9W(w=XW* z7blW@kH+_?e9szR{*bSK0P$mq);pILcWc`YEKjanQ+rM(`BNHyO65eZ^=uLx5PwPGkG2OCa;CO?okPsic&X1FG3$eKSE~3H0PX*5o$_rVEkc(BM3tP zd9F} z)y!(OeW=d4T2BL7*nzGvV%J>yy|5#P8Aa3@sLITG1Ku}tDa2K1JX)0|v9{3pk-D-j zzrxB8T!jU!t*E=0tyL&D+Q7x~gL|xG6V?pCYw9d9o3eEAb2-{dIttmOeVErZd_5SN zQNppPVYUroNWczTBEDAJEDN)bar^4pYe_4_JntgJ2k%XYQHw|bZYR3K?`okb9C%Qn5mD~a47N!^M#Vk)NEjO_`?V{79 zxHwbTti;?R|C*GT=ecww_M4%&4kEpX9?R%ASm;`9NrRnebnA5{3sMM2l^9x+#3N|f zlPs|(rDIs@afD$k#icXJSc%DI+)h+q(K_c^Eh@5TzVlG4T=F7>cDOFQm<3t8BhkA2 z=GU`o+j+bjFQ_%|#ZSSCPUHQHyOaC@jX$9B2eOvq`-w?yTgT$Z+P2>1>)N)X_xip* zsSb@L`STioUggiPd$8?wDvM7^_#0D(v3015S*@}U)jC(V(tsA? zWi;fwe~`p`y}B`%u0McY+{~Ipy(pp2+o0iT3?U6SQH|1m%!CY*(CsR{3y@2wotqO% zFcjq87o(MtTfk;|UyNM+FQhFAw@!XNthT+2cjGy=W-NYcEu$$9CcPb+w*$3XKLMgC z+ZU&j<$YRtpIY9BTGB`&>8K_9QA?WQvZb@3l$OlP<#xCrE)B>_s4e}NO(+yf(a}^q zCs#^K+J@?x`_0uucJ_Wh58+k2eTZ|datwyuG=_N+uD1_0I9Ho#KnqzjTJk!`19pgE zk3wWbJ8G!~8vH2u*H(B}bDo~hSsrb&gU6)n&0kQ#{+(mg7_{h(c>?Yd3ig09ibMB? zro)O7l4M~*3Ix%HTVf2?L*-;h5sbZ4A)U$47tZ9RBb_Qtujik8Gjs@ETPJgW3*m2Hdt%NLTB zhqTH=aqe+u52|+Y?21=yegp33?l&P@$s*)q2o#VSc)wng)SMy{&hf>iT&J8#X3|A? z4{03X1As~Rh3oMVvNIuFg0S=_06G1>t!Teb1tJr3Ezj+E%r=Ad;*3|U=VjHq=Q%x} zIJtN>aVW{{*SP&^`p(}j%5dd3&|I-zf1Qr*e1^TE_t05fRuZ9`ove~fs^CNTL|_?i z#7amaiOknH<}#i+!^$c|2tyrMt*oN?u0fs8mH>(g(4bC1mv}Es!meIoJ~by!nOtw^ z3^x)<4)lcgq9fcCM-mEwF`aLc^%?Hl-1QB*nENjKpvQT?oCYjBtri(7Kq}(UhKlm9 zSJ@MJT|-5}lR{DmVFEz5NPI_8!V-yh&Fy+FWsQsrSpO6PPMnsW%S>%|Af@=eQSOl% zP*%2iS^#^(xKN3aNf7HxUD+KZk)5}NDXWi=C@*_DuleshlW*nWv8T}umlB>1bs#)J z&x&3qho@&F0aQ@}iN)5RU?0LZ6RyytqX-zQ*9c)*Fy)^JN5k?|NQAR124Zpq)Q2&ZHSRTSQ}Ut=-DC z=VZT&&AbKhqQa?2)*RAm4iVv`2v(ouS~ad!P2c(A$ylFXLrZDvGfazik)5=OjynwO zaf6Q$V#uyDiQz?$SPC&^VtFc$_YSi_7hBI+)!Zep3>R#htm$lGg;k`RoGS!%oST;% zVX7P@WQ1MqnIJjG+Ud6$tBC{fnXrisn^~QK)Zfc~MlT7^2ynPl0$#!yl8qo`?i#7YS9{x+~bmsBUu%21ezptCwP{Gf*#JI%ZF3FW8V*z#K}1ZQu#} zye(^5!U+-vnC1k3ehIM2UPNVq=Y5`;x8DV!euaW17hy=|DIJS{MbA8wz@e0P;q;O{ z5}(VWc}Dc0WocXsEPNpj{JaZJw3KG^iO#%wikB-hm2KGF{^~}EPdbx?it7((}AmaNSr(gVpvw-R1KUgC(mRO z+wf;g;pTkGoPZPOaPS_8JS|V68p4ee_#c3fJR4EQjH3c*q3Cw3lgw62jPAk*LAfy# z(#xj9QD_$q)+n%on8S#`b#ugqv$KQz5I3-Xw70*%ufI?1pP1kPw8_oOT|83YhiBy@=BuBm`LMV|A{O&I6$a+=RM zmzR_J?@*evOf`aQVgC673_#%;YllK6OWVdYAa z8__t(K7t0E=cgXk_x_r%`Jr$31K;kXuTAr{Jp~`fUUX{gMK8wQ$5oALlc-gN;FU5? z#kxfIogIsfNp7#k?NzzGrWCcgTdV6?zLDexHEvMl22E*+KCN!g;@Kp(PviEf+`hb| z3rVh9(#hkHGOX~&EvM)?{}JqDIVRZ z3{3M*G^sY3>cPxhHtS{r;BN*QMNmzB3aOxEqO4t5ytaFek)>ffq!f`I=r zBnWzmAf*1`O3-;h!2e@OrMQFmUscl~Why30rSNq|ceh|@H^Lr-c7#p@JSrl+i7<+Q z8wb(_gaASi0e$GwRRr9bk>(Np0^uhJw-A;Q{taM>B5zB~MhV>z1kfWTe@f=9o6z0( z^O!APx6y13Oc-pP1qz2AJu0CyU_uyLlS6tP3xErW$an<>U50??wl=s!#^5|G{RcdP z*eUrF0I=|B`Y}~@ll-hvo}1)njcQX1-Qdxqt5mAee6Kr385%yQFvh2}(j8Ap;x|RI HkhlClQ2_Az literal 0 HcmV?d00001 diff --git a/wls-module/src/utils/__init__.py b/wls-module/src/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/wls-module/src/utils/files.py b/wls-module/src/utils/files.py new file mode 100644 index 0000000..3e5754d --- /dev/null +++ b/wls-module/src/utils/files.py @@ -0,0 +1,39 @@ +# Python Imports +import json +import sys + +# Project Imports +import logger + +G_LOGGER, handler = logger.innit_logging() + + +def load_config_file(config_file): + """ Load config file """ + try: + with open(config_file, 'r') as f: + config = json.load(f) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + return config + + +def load_topology(topology_file): + """ Load topology """ + try: + with open(topology_file, 'r') as read_file: + topology = json.load(read_file) + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + if len(topology) == 0: + G_LOGGER.error('Cannot find valid topology. Aborting.') + sys.exit(1) + + G_LOGGER.debug(topology) + G_LOGGER.info('%d topology loaded' % len(topology)) + + return topology diff --git a/wls-module/src/utils/logger.py b/wls-module/src/utils/logger.py new file mode 100644 index 0000000..951b677 --- /dev/null +++ b/wls-module/src/utils/logger.py @@ -0,0 +1,45 @@ +import sys +import logging + +G_APP_NAME = 'WLS' + + +# Custom logging formatter +class CustomFormatter(logging.Formatter): + # Set different formats for every logging level + time_name_stamp = "[%(asctime)s.%(msecs)03d] [" + G_APP_NAME + "]" + FORMATS = { + logging.ERROR: time_name_stamp + " ERROR in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", + logging.WARNING: time_name_stamp + " WARNING - %(msg)s", + logging.CRITICAL: time_name_stamp + " CRITICAL in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", + logging.INFO: time_name_stamp + " %(msg)s", + logging.DEBUG: time_name_stamp + " %(funcName)s() %(msg)s", + 'DEFAULT': time_name_stamp + " %(msg)s", + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT']) + formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') + return formatter.format(record) + + +def innit_logging(): + """ Init Logging """ + handler = logging.StreamHandler(sys.stdout) + G_LOGGER = logging.getLogger(G_APP_NAME) + handler.setFormatter(CustomFormatter()) + G_LOGGER.addHandler(handler) + G_LOGGER.info('Started') + + return G_LOGGER, handler + + +def configure_logging(G_LOGGER, handler, wls_config, config_file): + G_LOGGER.setLevel(wls_config['debug_level']) + handler.setLevel(wls_config['debug_level']) + G_LOGGER.debug(wls_config) + G_LOGGER.info('Configuration loaded from %s' % config_file) + + + + diff --git a/wls-module/src/utils/payloads.py b/wls-module/src/utils/payloads.py new file mode 100644 index 0000000..f28ea31 --- /dev/null +++ b/wls-module/src/utils/payloads.py @@ -0,0 +1,47 @@ +# Python Imports +import random + +# Project Imports +import logger +import rtnorm + +G_LOGGER, handler = logger.innit_logging() + + +def _make_payload(size): + payload = hex(random.getrandbits(4 * size)) + G_LOGGER.debug('Payload of size %d bytes: %s' % (size, payload)) + return payload + + +def make_payload_dist(dist_type, min_size, max_size): + # Check if min and max packet sizes are the same + if min_size == max_size: + G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' % min_size) + return _make_payload(min_size), min_size + + # Payload sizes are even integers uniformly distributed in [min_size, max_size] + if dist_type == 'uniform': + size = int(random.uniform(min_size, max_size)) + + # Reject non even sizes + while (size % 2) != 0: + size = int(random.uniform(min_size, max_size)) + + return _make_payload(size), size + + # Payload sizes are even integers ~"normally" distributed in [min_size, max_size] + if dist_type == 'gaussian': + σ = (max_size - min_size) / 5. + μ = (max_size - min_size) / 2. + size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) + + # Reject non even sizes + while (size % 2) != 0: + size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) + + return _make_payload(size), size + + G_LOGGER.error('Unknown distribution type %s') + + return '0x00', 0 diff --git a/wls-module/rtnorm.py b/wls-module/src/utils/rtnorm.py similarity index 100% rename from wls-module/rtnorm.py rename to wls-module/src/utils/rtnorm.py diff --git a/wls-module/src/utils/waku_messaging.py b/wls-module/src/utils/waku_messaging.py new file mode 100644 index 0000000..828f431 --- /dev/null +++ b/wls-module/src/utils/waku_messaging.py @@ -0,0 +1,85 @@ +# Python Imports +import time +import json +import requests +import sys +import random + +# Project Imports +import logger + +G_LOGGER, handler = logger.innit_logging() + + +def _poisson_interval(rate): + # Generate a random interval using a Poisson distribution + return random.expovariate(rate) + + +def _get_waku_payload(nonce, payload): + my_payload = { + 'nonce': nonce, + 'ts': time.time_ns(), + 'payload': payload + } + + return my_payload + + +def _create_waku_msg(payload): + waku_msg = { + 'payload': json.dumps(payload).encode('utf-8').hex() + } + + return waku_msg + + +def _create_waku_rpc_data(topic, waku_msg, node_address): + data = { + 'jsonrpc': '2.0', + 'method': 'post_waku_v2_relay_v1_message', + 'id': 1, + 'params': [topic, waku_msg]} + + G_LOGGER.debug(f"Waku RPC: {data['method']} from {node_address} Topic: {topic}") + + return data + + +def _send_waku_rpc(data, node_address): + s_time = time.time() + + json_data = json.dumps(data) + + response = requests.post(node_address, data=json_data, + headers={'content-type': 'application/json'}) + + elapsed_ms = (time.time() - s_time) * 1000 + + response_obj = response.json() + + G_LOGGER.debug(f"Response from {node_address}: {response_obj} [{elapsed_ms:.4f} ms.]") + + return response_obj, elapsed_ms + + +def send_msg_to_node(node_address, topic, payload, nonce=1): + my_payload = _get_waku_payload(nonce, payload) + waku_msg = _create_waku_msg(payload) + data = _create_waku_rpc_data(topic, waku_msg, node_address) + + response_obj, elapsed_ms = _send_waku_rpc(data, node_address) + + return response_obj, elapsed_ms, json.dumps(waku_msg), my_payload['ts'] + + +def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): + if inter_msg_type == 'poisson': + return _poisson_interval(msg_rate) + + if inter_msg_type == 'uniform': + return simulation_time / msg_rate + + G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' % inter_msg_type) + sys.exit() + diff --git a/wls-module/src/wls.py b/wls-module/src/wls.py new file mode 100644 index 0000000..5c5a4a1 --- /dev/null +++ b/wls-module/src/wls.py @@ -0,0 +1,175 @@ +# Python Imports +import argparse +import hashlib +import json +import random +import sys +import time +import tomllib + +# Project Imports +from utils import logger +from utils import waku_messaging +from utils import payloads +from utils import files + +""" Globals """ +G_DEFAULT_CONFIG_FILE = './config/config.json' +G_DEFAULT_TOPOLOGY_FILE = './network_topology/network_data.json' +G_LOGGER, handler = logger.innit_logging() + + +def parse_cli(): + """ Parse command line args. """ + parser = argparse.ArgumentParser() + parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", + default=G_DEFAULT_CONFIG_FILE) + parser.add_argument("-t", "--topology_file", help="Topology file", action="store_true", + default=G_DEFAULT_TOPOLOGY_FILE) + + args = parser.parse_args() + + return args + + +def load_topics_into_topology(topology): + """ Load Topics """ + nodes = topology["nodes"] + for node, node_info in nodes.items(): + try: + with open("tomls/" + node_info["node_config"], mode='rb') as read_file: + toml_config = tomllib.load(read_file) + if node_info["image"] == "nim-waku": + topics = list(toml_config["topics"].split(" ")) + elif node_info["image"] == "go-waku": + topics = toml_config["topics"] + + # Load topics into topology for easier access + nodes[node]["topics"] = topics + except Exception as e: + G_LOGGER.error('%s: %s' % (e.__doc__, e)) + sys.exit() + + G_LOGGER.info('Loaded nodes topics from toml files') + + +def get_random_emitters(topology, wls_config): + nodes = topology["nodes"] + """ Define the subset of emitters """ + num_emitters = int(len(nodes) * wls_config["emitters_fraction"]) + + if num_emitters == 0: + G_LOGGER.error( + 'The number of emitters must be greater than zero. Try increasing the fraction of emitters.') + sys.exit() + + random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) + G_LOGGER.info('Selected %d emitters out of %d total nodes' % (len(random_emitters), len(nodes))) + + return random_emitters + + +def start_traffic_inyection(wls_config, random_emitters): + """ Start simulation """ + s_time = time.time() + last_msg_time = 0 + next_time_to_msg = 0 + msgs_dict = {} + + G_LOGGER.info('Starting a simulation of %d seconds ...' % wls_config['simulation_time']) + + while True: + # Check end condition + elapsed_s = time.time() - s_time + + if elapsed_s >= wls_config['simulation_time']: + G_LOGGER.info( + 'Simulation ended. Sent %d messages in %ds.' % (len(msgs_dict), elapsed_s)) + break + + # Send message + # BUG: There is a constant discrepancy. The average number of messages sent by time interval is slightly less than expected + msg_elapsed = time.time() - last_msg_time + if msg_elapsed <= next_time_to_msg: + continue + + G_LOGGER.debug('Time Δ: %.6f ms.' % ((msg_elapsed - next_time_to_msg) * 1000.0)) + + # Pick an emitter at random from the emitters list + random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) + + emitter_address = f"http://{random_emitter_info['ip_address']}:{random_emitter_info['ports']['rpc_' + random_emitter][0]}/" + emitter_topics = random_emitter_info["topics"] + + # Pick a topic at random from the topics supported by the emitter + emitter_topic = random.choice(emitter_topics) + + G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' % ( + emitter_topic, emitter_address)) + + payload, size = payloads.make_payload_dist(dist_type=wls_config['dist_type'].lower(), + min_size=wls_config['min_packet_size'], + max_size=wls_config['max_packet_size']) + response, elapsed, waku_msg, ts = waku_messaging.send_msg_to_node(emitter_address, topic=emitter_topic, + payload=payload, nonce=len(msgs_dict)) + + if response['result']: + msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() + if msg_hash in msgs_dict: + G_LOGGER.error('Hash collision. %s already exists in dictionary' % msg_hash) + continue + msgs_dict[msg_hash] = {'ts': ts, 'injection_point': emitter_address, + 'nonce': len(msgs_dict), 'topic': emitter_topic, + 'payload': payload, 'payload_size': size} + + # Compute the time to next message + next_time_to_msg = waku_messaging.get_next_time_to_msg(wls_config['inter_msg_type'], + wls_config['message_rate'], + wls_config['simulation_time']) + G_LOGGER.debug('Next message will happen in %d ms.' % (next_time_to_msg * 1000.0)) + + last_msg_time = time.time() + + elapsed_s = time.time() - s_time + + return msgs_dict + + +def save_messages(msgs_dict): + # Save messages for further analysis + with open('./messages.json', 'w') as f: + f.write(json.dumps(msgs_dict, indent=4)) + + """ We are done """ + G_LOGGER.info('Ended') + + +def main(): + args = parse_cli() + + config_file = args.config_file + topology_file = args.topology_file + + config = files.load_config_file(config_file) + + # Set loglevel from config + wls_config = config['wls'] + + logger.configure_logging(G_LOGGER, handler, wls_config, config_file) + + # Set RPNG seed from config + random.seed(config['general']['prng_seed']) + + topology = files.load_topology(topology_file) + + load_topics_into_topology(topology) + + random_emitters = get_random_emitters(topology, wls_config) + + msgs_dict = start_traffic_inyection(wls_config, random_emitters) + + save_messages(msgs_dict) + + +if __name__ == "__main__": + main() diff --git a/wls-module/wls_nomos.py b/wls-module/src/wls_nomos.py similarity index 97% rename from wls-module/wls_nomos.py rename to wls-module/src/wls_nomos.py index 3dd7a8d..85b9b7e 100644 --- a/wls-module/wls_nomos.py +++ b/wls-module/src/wls_nomos.py @@ -5,9 +5,7 @@ """ """ Dependencies """ -import sys, logging, yaml, json, random, os, argparse, tomllib, glob -import requests -import rtnorm +import sys, logging, yaml, json, random, argparse import nomos # from pathlib import Path # import numpy as np From 4edd19879301b483a50c7fe642ae693352193671 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sat, 4 Mar 2023 18:05:38 +0100 Subject: [PATCH 069/112] More refactor and PEP-8 --- wls-module/src/utils/files.py | 9 ++ wls-module/src/utils/waku_messaging.py | 1 - wls-module/src/wls.py | 130 ++++++++++++++----------- 3 files changed, 84 insertions(+), 56 deletions(-) diff --git a/wls-module/src/utils/files.py b/wls-module/src/utils/files.py index 3e5754d..2b709b8 100644 --- a/wls-module/src/utils/files.py +++ b/wls-module/src/utils/files.py @@ -37,3 +37,12 @@ def load_topology(topology_file): G_LOGGER.info('%d topology loaded' % len(topology)) return topology + + +def save_messages_to_json(msgs_dict): + # Save messages for further analysis + with open('./messages.json', 'w') as f: + f.write(json.dumps(msgs_dict, indent=4)) + + """ We are done """ + G_LOGGER.info('Ended') diff --git a/wls-module/src/utils/waku_messaging.py b/wls-module/src/utils/waku_messaging.py index 828f431..1b1ffea 100644 --- a/wls-module/src/utils/waku_messaging.py +++ b/wls-module/src/utils/waku_messaging.py @@ -82,4 +82,3 @@ def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' % inter_msg_type) sys.exit() - diff --git a/wls-module/src/wls.py b/wls-module/src/wls.py index 5c5a4a1..a616dcc 100644 --- a/wls-module/src/wls.py +++ b/wls-module/src/wls.py @@ -1,7 +1,6 @@ # Python Imports import argparse import hashlib -import json import random import sys import time @@ -60,7 +59,8 @@ def get_random_emitters(topology, wls_config): if num_emitters == 0: G_LOGGER.error( - 'The number of emitters must be greater than zero. Try increasing the fraction of emitters.') + 'The number of emitters must be greater than zero. ' + 'Try increasing the fraction of emitters.') sys.exit() random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) @@ -69,81 +69,101 @@ def get_random_emitters(topology, wls_config): return random_emitters +def _is_simulation_finished(start_time, wls_config, msgs_dict): + # Check end condition + elapsed_s = time.time() - start_time + + if elapsed_s >= wls_config['simulation_time']: + G_LOGGER.info(f"Simulation ended. Sent {len(msgs_dict)} messages in {elapsed_s}.") + return True + + return False + + +def _time_to_send_next_message(last_msg_time, next_time_to_msg): + # Send message + # BUG: There is a constant discrepancy. + # The average number of messages sent by time interval is slightly less than expected + msg_elapsed = time.time() - last_msg_time + + if msg_elapsed <= next_time_to_msg: + return False + + G_LOGGER.debug(f"Time Δ: {(msg_elapsed - next_time_to_msg) * 1000.0:6f}ms.") + + return True + + +def _select_emitter_and_topic(random_emitters): + # Pick an emitter at random from the emitters list + random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) + emitter_address = f"http://{random_emitter_info['ip_address']}:" \ + f"{random_emitter_info['ports']['rpc_' + random_emitter][0]}/" + emitter_topics = random_emitter_info["topics"] + # Pick a topic at random from the topics supported by the emitter + emitter_topic = random.choice(emitter_topics) + + G_LOGGER.info(f"Injecting message of topic {emitter_topic} to network " + f"through Waku node {emitter_address} ...") + + return emitter_address, emitter_topic + + +def _inyect_message(emitter_address, emitter_topic, msgs_dict, wls_config): + payload, size = payloads.make_payload_dist(dist_type=wls_config['dist_type'].lower(), + min_size=wls_config['min_packet_size'], + max_size=wls_config['max_packet_size']) + + response, elapsed, waku_msg, ts = waku_messaging.send_msg_to_node(emitter_address, + topic=emitter_topic, + payload=payload, + nonce=len(msgs_dict)) + + if response['result']: + msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() + if msg_hash in msgs_dict: + G_LOGGER.error(f"Hash collision. {msg_hash} already exists in dictionary") + raise RuntimeWarning + + msgs_dict[msg_hash] = {'ts': ts, 'injection_point': emitter_address, + 'nonce': len(msgs_dict), 'topic': emitter_topic, + 'payload': payload, 'payload_size': size} + + def start_traffic_inyection(wls_config, random_emitters): """ Start simulation """ - s_time = time.time() + start_time = time.time() last_msg_time = 0 next_time_to_msg = 0 msgs_dict = {} - G_LOGGER.info('Starting a simulation of %d seconds ...' % wls_config['simulation_time']) + G_LOGGER.info(f"Starting a simulation of {wls_config['simulation_time']} seconds...") while True: - # Check end condition - elapsed_s = time.time() - s_time - - if elapsed_s >= wls_config['simulation_time']: - G_LOGGER.info( - 'Simulation ended. Sent %d messages in %ds.' % (len(msgs_dict), elapsed_s)) + if _is_simulation_finished(start_time, wls_config, msgs_dict): break - # Send message - # BUG: There is a constant discrepancy. The average number of messages sent by time interval is slightly less than expected - msg_elapsed = time.time() - last_msg_time - if msg_elapsed <= next_time_to_msg: + if not _time_to_send_next_message(last_msg_time, next_time_to_msg): continue - G_LOGGER.debug('Time Δ: %.6f ms.' % ((msg_elapsed - next_time_to_msg) * 1000.0)) - - # Pick an emitter at random from the emitters list - random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) - - emitter_address = f"http://{random_emitter_info['ip_address']}:{random_emitter_info['ports']['rpc_' + random_emitter][0]}/" - emitter_topics = random_emitter_info["topics"] - - # Pick a topic at random from the topics supported by the emitter - emitter_topic = random.choice(emitter_topics) - - G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' % ( - emitter_topic, emitter_address)) + emitter_address, emitter_topic = _select_emitter_and_topic(random_emitters) - payload, size = payloads.make_payload_dist(dist_type=wls_config['dist_type'].lower(), - min_size=wls_config['min_packet_size'], - max_size=wls_config['max_packet_size']) - response, elapsed, waku_msg, ts = waku_messaging.send_msg_to_node(emitter_address, topic=emitter_topic, - payload=payload, nonce=len(msgs_dict)) - - if response['result']: - msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() - if msg_hash in msgs_dict: - G_LOGGER.error('Hash collision. %s already exists in dictionary' % msg_hash) - continue - msgs_dict[msg_hash] = {'ts': ts, 'injection_point': emitter_address, - 'nonce': len(msgs_dict), 'topic': emitter_topic, - 'payload': payload, 'payload_size': size} + try: + _inyect_message(emitter_address, emitter_topic, msgs_dict, wls_config) + except RuntimeWarning: + continue # Compute the time to next message next_time_to_msg = waku_messaging.get_next_time_to_msg(wls_config['inter_msg_type'], - wls_config['message_rate'], - wls_config['simulation_time']) + wls_config['message_rate'], + wls_config['simulation_time']) G_LOGGER.debug('Next message will happen in %d ms.' % (next_time_to_msg * 1000.0)) last_msg_time = time.time() - elapsed_s = time.time() - s_time - return msgs_dict -def save_messages(msgs_dict): - # Save messages for further analysis - with open('./messages.json', 'w') as f: - f.write(json.dumps(msgs_dict, indent=4)) - - """ We are done """ - G_LOGGER.info('Ended') - - def main(): args = parse_cli() @@ -168,7 +188,7 @@ def main(): msgs_dict = start_traffic_inyection(wls_config, random_emitters) - save_messages(msgs_dict) + files.save_messages_to_json(msgs_dict) if __name__ == "__main__": From 523020c588c37b613519684c86c101a5fb7e25f9 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sat, 4 Mar 2023 18:55:23 +0100 Subject: [PATCH 070/112] Fixed logger and wls.py call before starting with tests --- src/system_variables.star | 2 +- wls-module/src/utils/files.py | 16 +++++------ wls-module/src/utils/payloads.py | 12 ++++----- wls-module/src/utils/waku_messaging.py | 10 +++---- .../src/utils/{logger.py => wls_logger.py} | 3 +-- wls-module/src/wls.py | 27 ++++++++++--------- 6 files changed, 32 insertions(+), 38 deletions(-) rename wls-module/src/utils/{logger.py => wls_logger.py} (97%) diff --git a/src/system_variables.star b/src/system_variables.star index edac592..82f9078 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -94,7 +94,7 @@ WLS_CONFIG_PATH = "/wls/config" WLS_TARGETS_PATH = "/wls/targets" WLS_TOMLS_PATH = "/wls/tomls" WLS_TOPOLOGY_PATH = "/wls/network_topology" -WLS_CMD = ["python3", "wls.py"] +WLS_CMD = ["python3", "src/wls.py"] CONTAINER_WLS_CONFIGURATION_FILE_NAME = "config.json" diff --git a/wls-module/src/utils/files.py b/wls-module/src/utils/files.py index 2b709b8..4f544ab 100644 --- a/wls-module/src/utils/files.py +++ b/wls-module/src/utils/files.py @@ -3,9 +3,7 @@ import sys # Project Imports -import logger - -G_LOGGER, handler = logger.innit_logging() +from . import wls_logger def load_config_file(config_file): @@ -14,7 +12,7 @@ def load_config_file(config_file): with open(config_file, 'r') as f: config = json.load(f) except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) + wls_logger.G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() return config @@ -26,15 +24,15 @@ def load_topology(topology_file): with open(topology_file, 'r') as read_file: topology = json.load(read_file) except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) + wls_logger.G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() if len(topology) == 0: - G_LOGGER.error('Cannot find valid topology. Aborting.') + wls_logger.G_LOGGER.error('Cannot find valid topology. Aborting.') sys.exit(1) - G_LOGGER.debug(topology) - G_LOGGER.info('%d topology loaded' % len(topology)) + wls_logger.G_LOGGER.debug(topology) + wls_logger.G_LOGGER.info('Topology loaded') return topology @@ -45,4 +43,4 @@ def save_messages_to_json(msgs_dict): f.write(json.dumps(msgs_dict, indent=4)) """ We are done """ - G_LOGGER.info('Ended') + wls_logger.G_LOGGER.info('Ended') diff --git a/wls-module/src/utils/payloads.py b/wls-module/src/utils/payloads.py index f28ea31..20b5eeb 100644 --- a/wls-module/src/utils/payloads.py +++ b/wls-module/src/utils/payloads.py @@ -2,22 +2,20 @@ import random # Project Imports -import logger -import rtnorm - -G_LOGGER, handler = logger.innit_logging() +from . import wls_logger +from . import rtnorm def _make_payload(size): payload = hex(random.getrandbits(4 * size)) - G_LOGGER.debug('Payload of size %d bytes: %s' % (size, payload)) + wls_logger.G_LOGGER.debug('Payload of size %d bytes: %s' % (size, payload)) return payload def make_payload_dist(dist_type, min_size, max_size): # Check if min and max packet sizes are the same if min_size == max_size: - G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' % min_size) + wls_logger.G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' % min_size) return _make_payload(min_size), min_size # Payload sizes are even integers uniformly distributed in [min_size, max_size] @@ -42,6 +40,6 @@ def make_payload_dist(dist_type, min_size, max_size): return _make_payload(size), size - G_LOGGER.error('Unknown distribution type %s') + wls_logger.G_LOGGER.error('Unknown distribution type %s') return '0x00', 0 diff --git a/wls-module/src/utils/waku_messaging.py b/wls-module/src/utils/waku_messaging.py index 1b1ffea..1ed5629 100644 --- a/wls-module/src/utils/waku_messaging.py +++ b/wls-module/src/utils/waku_messaging.py @@ -6,9 +6,7 @@ import random # Project Imports -import logger - -G_LOGGER, handler = logger.innit_logging() +from . import wls_logger def _poisson_interval(rate): @@ -41,7 +39,7 @@ def _create_waku_rpc_data(topic, waku_msg, node_address): 'id': 1, 'params': [topic, waku_msg]} - G_LOGGER.debug(f"Waku RPC: {data['method']} from {node_address} Topic: {topic}") + wls_logger.G_LOGGER.debug(f"Waku RPC: {data['method']} from {node_address} Topic: {topic}") return data @@ -58,7 +56,7 @@ def _send_waku_rpc(data, node_address): response_obj = response.json() - G_LOGGER.debug(f"Response from {node_address}: {response_obj} [{elapsed_ms:.4f} ms.]") + wls_logger.G_LOGGER.debug(f"Response from {node_address}: {response_obj} [{elapsed_ms:.4f} ms.]") return response_obj, elapsed_ms @@ -80,5 +78,5 @@ def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): if inter_msg_type == 'uniform': return simulation_time / msg_rate - G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' % inter_msg_type) + wls_logger.G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' % inter_msg_type) sys.exit() diff --git a/wls-module/src/utils/logger.py b/wls-module/src/utils/wls_logger.py similarity index 97% rename from wls-module/src/utils/logger.py rename to wls-module/src/utils/wls_logger.py index 951b677..1d19b9d 100644 --- a/wls-module/src/utils/logger.py +++ b/wls-module/src/utils/wls_logger.py @@ -41,5 +41,4 @@ def configure_logging(G_LOGGER, handler, wls_config, config_file): G_LOGGER.info('Configuration loaded from %s' % config_file) - - +G_LOGGER, handler = innit_logging() diff --git a/wls-module/src/wls.py b/wls-module/src/wls.py index a616dcc..bd7b056 100644 --- a/wls-module/src/wls.py +++ b/wls-module/src/wls.py @@ -7,15 +7,16 @@ import tomllib # Project Imports -from utils import logger +from utils import wls_logger from utils import waku_messaging from utils import payloads from utils import files """ Globals """ G_DEFAULT_CONFIG_FILE = './config/config.json' +# G_DEFAULT_CONFIG_FILE = 'config.json' G_DEFAULT_TOPOLOGY_FILE = './network_topology/network_data.json' -G_LOGGER, handler = logger.innit_logging() +# G_DEFAULT_TOPOLOGY_FILE = 'topology_generated/network_data.json' def parse_cli(): @@ -46,10 +47,10 @@ def load_topics_into_topology(topology): # Load topics into topology for easier access nodes[node]["topics"] = topics except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) + wls_logger.G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() - G_LOGGER.info('Loaded nodes topics from toml files') + wls_logger.G_LOGGER.info('Loaded nodes topics from toml files') def get_random_emitters(topology, wls_config): @@ -58,13 +59,13 @@ def get_random_emitters(topology, wls_config): num_emitters = int(len(nodes) * wls_config["emitters_fraction"]) if num_emitters == 0: - G_LOGGER.error( + wls_logger.G_LOGGER.error( 'The number of emitters must be greater than zero. ' 'Try increasing the fraction of emitters.') sys.exit() random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) - G_LOGGER.info('Selected %d emitters out of %d total nodes' % (len(random_emitters), len(nodes))) + wls_logger.G_LOGGER.info('Selected %d emitters out of %d total nodes' % (len(random_emitters), len(nodes))) return random_emitters @@ -74,7 +75,7 @@ def _is_simulation_finished(start_time, wls_config, msgs_dict): elapsed_s = time.time() - start_time if elapsed_s >= wls_config['simulation_time']: - G_LOGGER.info(f"Simulation ended. Sent {len(msgs_dict)} messages in {elapsed_s}.") + wls_logger.G_LOGGER.info(f"Simulation ended. Sent {len(msgs_dict)} messages in {elapsed_s}.") return True return False @@ -89,7 +90,7 @@ def _time_to_send_next_message(last_msg_time, next_time_to_msg): if msg_elapsed <= next_time_to_msg: return False - G_LOGGER.debug(f"Time Δ: {(msg_elapsed - next_time_to_msg) * 1000.0:6f}ms.") + wls_logger.G_LOGGER.debug(f"Time Δ: {(msg_elapsed - next_time_to_msg) * 1000.0:6f}ms.") return True @@ -103,7 +104,7 @@ def _select_emitter_and_topic(random_emitters): # Pick a topic at random from the topics supported by the emitter emitter_topic = random.choice(emitter_topics) - G_LOGGER.info(f"Injecting message of topic {emitter_topic} to network " + wls_logger.G_LOGGER.info(f"Injecting message of topic {emitter_topic} to network " f"through Waku node {emitter_address} ...") return emitter_address, emitter_topic @@ -122,7 +123,7 @@ def _inyect_message(emitter_address, emitter_topic, msgs_dict, wls_config): if response['result']: msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() if msg_hash in msgs_dict: - G_LOGGER.error(f"Hash collision. {msg_hash} already exists in dictionary") + wls_logger.G_LOGGER.error(f"Hash collision. {msg_hash} already exists in dictionary") raise RuntimeWarning msgs_dict[msg_hash] = {'ts': ts, 'injection_point': emitter_address, @@ -137,7 +138,7 @@ def start_traffic_inyection(wls_config, random_emitters): next_time_to_msg = 0 msgs_dict = {} - G_LOGGER.info(f"Starting a simulation of {wls_config['simulation_time']} seconds...") + wls_logger.G_LOGGER.info(f"Starting a simulation of {wls_config['simulation_time']} seconds...") while True: if _is_simulation_finished(start_time, wls_config, msgs_dict): @@ -157,7 +158,7 @@ def start_traffic_inyection(wls_config, random_emitters): next_time_to_msg = waku_messaging.get_next_time_to_msg(wls_config['inter_msg_type'], wls_config['message_rate'], wls_config['simulation_time']) - G_LOGGER.debug('Next message will happen in %d ms.' % (next_time_to_msg * 1000.0)) + wls_logger.G_LOGGER.debug('Next message will happen in %d ms.' % (next_time_to_msg * 1000.0)) last_msg_time = time.time() @@ -175,7 +176,7 @@ def main(): # Set loglevel from config wls_config = config['wls'] - logger.configure_logging(G_LOGGER, handler, wls_config, config_file) + wls_logger.configure_logging(wls_logger.G_LOGGER, wls_logger.handler, wls_config, config_file) # Set RPNG seed from config random.seed(config['general']['prng_seed']) From 112ed64d40342101e8d7a4509de173770e56b1fb Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sat, 4 Mar 2023 20:57:46 +0100 Subject: [PATCH 071/112] Deleted unnecessary check --- wls-module/src/utils/files.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/wls-module/src/utils/files.py b/wls-module/src/utils/files.py index 4f544ab..e83452d 100644 --- a/wls-module/src/utils/files.py +++ b/wls-module/src/utils/files.py @@ -27,10 +27,6 @@ def load_topology(topology_file): wls_logger.G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() - if len(topology) == 0: - wls_logger.G_LOGGER.error('Cannot find valid topology. Aborting.') - sys.exit(1) - wls_logger.G_LOGGER.debug(topology) wls_logger.G_LOGGER.info('Topology loaded') From ca4bb88931376ab145564db7a66b06a4cedfb76a Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sat, 4 Mar 2023 20:58:34 +0100 Subject: [PATCH 072/112] Refactored payloads.py to add tests --- wls-module/src/utils/payloads.py | 56 +++++++++++++++++++------------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/wls-module/src/utils/payloads.py b/wls-module/src/utils/payloads.py index 20b5eeb..a1c50ae 100644 --- a/wls-module/src/utils/payloads.py +++ b/wls-module/src/utils/payloads.py @@ -6,40 +6,52 @@ from . import rtnorm -def _make_payload(size): - payload = hex(random.getrandbits(4 * size)) - wls_logger.G_LOGGER.debug('Payload of size %d bytes: %s' % (size, payload)) +def _make_payload(bytes_size): + # todo preguntar: cuando enviamos un payload, se tiene en cuenta el 0x en el tamaño? + # todo por que coño se multiplica por 4, si el tamaño es en bytes? + # Si multiplicamos por 4, tenemos 4 bits, que es medio byte, 1 hexadecimal, deberian ser 2. + payload = hex(random.getrandbits(4 * bytes_size)) + # payload = hex(random.getrandbits(8 * bytes_size)) + wls_logger.G_LOGGER.debug(f"Payload of size {bytes_size} bytes: {payload}") return payload +def _make_uniform_dist(min_size, max_size): + size = int(random.uniform(min_size, max_size)) + + # Reject non even sizes + while (size % 2) != 0: + size = int(random.uniform(min_size, max_size)) + + return _make_payload(size), size + + +def _make_gaussian_dist(min_size, max_size): + σ = (max_size - min_size) / 5. + μ = (max_size - min_size) / 2. + size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) + + # Reject non even sizes + while (size % 2) != 0: + size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) + + return _make_payload(size), size + + def make_payload_dist(dist_type, min_size, max_size): # Check if min and max packet sizes are the same if min_size == max_size: - wls_logger.G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' % min_size) + wls_logger.G_LOGGER.warning(f"Packet size is constant: min_size=max_size={min_size}") return _make_payload(min_size), min_size # Payload sizes are even integers uniformly distributed in [min_size, max_size] if dist_type == 'uniform': - size = int(random.uniform(min_size, max_size)) - - # Reject non even sizes - while (size % 2) != 0: - size = int(random.uniform(min_size, max_size)) - - return _make_payload(size), size + return _make_uniform_dist(min_size, max_size) # Payload sizes are even integers ~"normally" distributed in [min_size, max_size] if dist_type == 'gaussian': - σ = (max_size - min_size) / 5. - μ = (max_size - min_size) / 2. - size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - - # Reject non even sizes - while (size % 2) != 0: - size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - - return _make_payload(size), size + return _make_gaussian_dist(min_size, max_size) - wls_logger.G_LOGGER.error('Unknown distribution type %s') + wls_logger.G_LOGGER.error(f"Unknown distribution type {dist_type}") - return '0x00', 0 + raise ValueError('Unknown distribution type %s' % dist_type) From 557e149a2ed891e1386eb0fe328b18bfddedb5dc Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sat, 4 Mar 2023 20:58:53 +0100 Subject: [PATCH 073/112] Added tests for Moved node builder star files to types folder.py --- wls-module/src/utils/tests/__init__.py | 0 wls-module/src/utils/tests/test_files.py | 33 +++++++++++++++++++ .../utils/tests/test_files/test_config.json | 8 +++++ .../utils/tests/test_files/test_topology.json | 18 ++++++++++ 4 files changed, 59 insertions(+) create mode 100644 wls-module/src/utils/tests/__init__.py create mode 100644 wls-module/src/utils/tests/test_files.py create mode 100644 wls-module/src/utils/tests/test_files/test_config.json create mode 100644 wls-module/src/utils/tests/test_files/test_topology.json diff --git a/wls-module/src/utils/tests/__init__.py b/wls-module/src/utils/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/wls-module/src/utils/tests/test_files.py b/wls-module/src/utils/tests/test_files.py new file mode 100644 index 0000000..ad65815 --- /dev/null +++ b/wls-module/src/utils/tests/test_files.py @@ -0,0 +1,33 @@ +import json +import unittest +import os + +from .. import files + + +class TestFiles(unittest.TestCase): + + def test_load_config_file(self): + config = files.load_config_file("test_files/test_config.json") + self.assertEqual(config["general"]["prng_seed"], 1234) + self.assertEqual(config["kurtosis"]["enclave_name"], "test") + + def test_config_file_error(self): + with self.assertRaises(FileNotFoundError): + files.load_config_file("test_files/test_config_error.json") + + def test_load_topology(self): + test_topology = files.load_topology("test_files/test_topology.json") + self.assertEqual(test_topology["containers"]["containers_0"][0], "node_0") + self.assertEqual(test_topology["nodes"]["node_0"]["image"], "nim-waku") + + def test_load_topology_error(self): + with self.assertRaises(FileNotFoundError): + files.load_topology("test_files/test_topology_error.json") + + def test_save_messages_to_json(self): + msgs_dict = {"test": "test"} + files.save_messages_to_json(msgs_dict) + with open("messages.json", "r") as f: + self.assertEqual(json.load(f), msgs_dict) + os.remove("messages.json") diff --git a/wls-module/src/utils/tests/test_files/test_config.json b/wls-module/src/utils/tests/test_files/test_config.json new file mode 100644 index 0000000..e13fe42 --- /dev/null +++ b/wls-module/src/utils/tests/test_files/test_config.json @@ -0,0 +1,8 @@ +{ + "general":{ + "prng_seed" : 1234 + }, + "kurtosis": { + "enclave_name": "test" + } +} diff --git a/wls-module/src/utils/tests/test_files/test_topology.json b/wls-module/src/utils/tests/test_files/test_topology.json new file mode 100644 index 0000000..54176ab --- /dev/null +++ b/wls-module/src/utils/tests/test_files/test_topology.json @@ -0,0 +1,18 @@ +{ + "containers": { + "containers_0": [ + "node_0" + ] + }, + "nodes": { + "node_0": { + "static_nodes": [], + "subnetwork": "subnetwork_0", + "image": "nim-waku", + "node_config": "node_0.toml", + "node_log": "node_0.log", + "port_shift": 0, + "container_id": "containers_0" + } + } +} \ No newline at end of file From 18f7f585ab5736f0e2a2784efbdb8f5ba4a314b2 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sun, 5 Mar 2023 21:54:44 +0100 Subject: [PATCH 074/112] Cleaned imports --- wls-module/src/utils/files.py | 2 +- wls-module/src/utils/payloads.py | 4 ++-- wls-module/src/utils/tests/test_files.py | 2 +- wls-module/src/utils/waku_messaging.py | 5 +++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/wls-module/src/utils/files.py b/wls-module/src/utils/files.py index e83452d..1ef6dce 100644 --- a/wls-module/src/utils/files.py +++ b/wls-module/src/utils/files.py @@ -3,7 +3,7 @@ import sys # Project Imports -from . import wls_logger +from src.utils import wls_logger def load_config_file(config_file): diff --git a/wls-module/src/utils/payloads.py b/wls-module/src/utils/payloads.py index a1c50ae..c6fe5d6 100644 --- a/wls-module/src/utils/payloads.py +++ b/wls-module/src/utils/payloads.py @@ -2,8 +2,8 @@ import random # Project Imports -from . import wls_logger -from . import rtnorm +import wls_logger +import rtnorm def _make_payload(bytes_size): diff --git a/wls-module/src/utils/tests/test_files.py b/wls-module/src/utils/tests/test_files.py index ad65815..5073231 100644 --- a/wls-module/src/utils/tests/test_files.py +++ b/wls-module/src/utils/tests/test_files.py @@ -2,7 +2,7 @@ import unittest import os -from .. import files +from src.utils import files class TestFiles(unittest.TestCase): diff --git a/wls-module/src/utils/waku_messaging.py b/wls-module/src/utils/waku_messaging.py index 1ed5629..648bc80 100644 --- a/wls-module/src/utils/waku_messaging.py +++ b/wls-module/src/utils/waku_messaging.py @@ -6,7 +6,7 @@ import random # Project Imports -from . import wls_logger +from src.utils import wls_logger def _poisson_interval(rate): @@ -37,7 +37,8 @@ def _create_waku_rpc_data(topic, waku_msg, node_address): 'jsonrpc': '2.0', 'method': 'post_waku_v2_relay_v1_message', 'id': 1, - 'params': [topic, waku_msg]} + 'params': [topic, waku_msg] + } wls_logger.G_LOGGER.debug(f"Waku RPC: {data['method']} from {node_address} Topic: {topic}") From 8db84f0324b2883856d84e2e98a6c0ba0f7940db Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sun, 5 Mar 2023 21:54:56 +0100 Subject: [PATCH 075/112] Added exit code to sys.exit --- wls-module/src/utils/waku_messaging.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wls-module/src/utils/waku_messaging.py b/wls-module/src/utils/waku_messaging.py index 648bc80..b6f314e 100644 --- a/wls-module/src/utils/waku_messaging.py +++ b/wls-module/src/utils/waku_messaging.py @@ -79,5 +79,5 @@ def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): if inter_msg_type == 'uniform': return simulation_time / msg_rate - wls_logger.G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' % inter_msg_type) - sys.exit() + wls_logger.G_LOGGER.error(f'{inter_msg_type} is not a valid inter_msg_type. Aborting.') + sys.exit(1) From d640d977cec23bffedb0658967704203944481e6 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sun, 5 Mar 2023 21:55:11 +0100 Subject: [PATCH 076/112] Added tests for waku messaging --- .../src/utils/tests/test_waku_messaging.py | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 wls-module/src/utils/tests/test_waku_messaging.py diff --git a/wls-module/src/utils/tests/test_waku_messaging.py b/wls-module/src/utils/tests/test_waku_messaging.py new file mode 100644 index 0000000..dab36d3 --- /dev/null +++ b/wls-module/src/utils/tests/test_waku_messaging.py @@ -0,0 +1,82 @@ +import unittest +import random +import json +from unittest.mock import patch + +from src.utils import waku_messaging + +random.seed(1) + + +class TestPayloads(unittest.TestCase): + + def create_patch(self, name): + patcher = patch(name) + thing = patcher.start() + self.addCleanup(patcher.stop) + return thing + + def test__poisson_interval(self): + test_1 = waku_messaging._poisson_interval(1) + test_5 = waku_messaging._poisson_interval(5) + test_10 = waku_messaging._poisson_interval(10) + self.assertEqual(test_1, 0.1442910641095092) + self.assertEqual(test_5, 0.3760312530841251) + self.assertEqual(test_10, 0.1442968925346663) + + def test__get_waku_payload(self): + mock_time = self.create_patch('time.time_ns') + + mock_time.return_value = 123456789 + test_payload = waku_messaging._get_waku_payload(1, 'test') + self.assertEqual(test_payload, {'nonce': 1, 'ts': 123456789, 'payload': 'test'}) + + def test__create_waku_msg(self): + test_payload = waku_messaging._create_waku_msg('test') + self.assertEqual(test_payload, {'payload': '227465737422'}) + + def test__create_waku_rpc_data(self): + test_data = waku_messaging._create_waku_rpc_data('test', 'test', 'test') + self.assertEqual(test_data, {'jsonrpc': '2.0', 'method': 'post_waku_v2_relay_v1_message', + 'id': 1, 'params': ['test', 'test']}) + + def test__send_waku_rpc(self): + mock_response = self.create_patch('requests.post') + mock_time = self.create_patch('time.time') + + mock_response.return_value.json.return_value = 'test' + mock_time.return_value = 10 + test_response, test_time = waku_messaging._send_waku_rpc('test', 'test') + self.assertEqual(test_response, 'test') + self.assertEqual(test_time, 0) + + def test_send_msg_to_node(self): + mock_waku_payload = self.create_patch('src.utils.waku_messaging._get_waku_payload') + mock_create_waku_msg = self.create_patch('src.utils.waku_messaging._create_waku_msg') + mock_send_waku_rpc = self.create_patch('src.utils.waku_messaging._send_waku_rpc') + + mock_waku_payload.return_value = {'ts': 1} + mock_create_waku_msg.return_value = 'test3' + mock_send_waku_rpc.return_value = ('test1', 0) + + test_response_1, test_response_2, test_response_3, test_response_4 = \ + waku_messaging.send_msg_to_node('test', 'test', 'test') + + self.assertEqual(test_response_1, 'test1') + self.assertEqual(test_response_2, 0) + self.assertEqual(test_response_3, json.dumps('test3')) + self.assertEqual(test_response_4, 1) + + def test_get_next_time_to_msg_poisson(self): + test = waku_messaging.get_next_time_to_msg('poisson', 1, 1) + self.assertEqual(test, 0.29446371689426293) + + def test_get_next_time_to_msg_uniform(self): + test = waku_messaging.get_next_time_to_msg('uniform', 1, 1) + self.assertEqual(test, 1) + + def test_get_next_time_to_msg_invalid(self): + with self.assertRaises(SystemExit) as cm: + waku_messaging.get_next_time_to_msg('test', 1, 1) + + self.assertEqual(cm.exception.code, 1) From 7ddbbc2e5327da2da13412fc8a7698d7106c3b07 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sun, 5 Mar 2023 22:12:37 +0100 Subject: [PATCH 077/112] Added tests for payloads --- wls-module/src/utils/payloads.py | 4 +- wls-module/src/utils/tests/test_payloads.py | 57 +++++++++++++++++++++ 2 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 wls-module/src/utils/tests/test_payloads.py diff --git a/wls-module/src/utils/payloads.py b/wls-module/src/utils/payloads.py index c6fe5d6..48070e2 100644 --- a/wls-module/src/utils/payloads.py +++ b/wls-module/src/utils/payloads.py @@ -2,8 +2,8 @@ import random # Project Imports -import wls_logger -import rtnorm +from src.utils import wls_logger +from src.utils import rtnorm def _make_payload(bytes_size): diff --git a/wls-module/src/utils/tests/test_payloads.py b/wls-module/src/utils/tests/test_payloads.py new file mode 100644 index 0000000..03b19d8 --- /dev/null +++ b/wls-module/src/utils/tests/test_payloads.py @@ -0,0 +1,57 @@ +import unittest +import random + +from src.utils import payloads + +random.seed(1) + + +class TestPayloads(unittest.TestCase): + + """ + def test__make_payload(self): + payload = payloads._make_payload(1) + print(payload) + print(b"a") + self.assertEqual(payload, '0x2') + + + def test__make_payload_error(self): + with self.assertRaises(ValueError): + payloads._make_payload(0) <<- todo añadir error + """ + + def test__make_uniform_dist(self): + payload, size = payloads._make_uniform_dist(1, 10) + self.assertEqual(payload, '0xd8') + self.assertEqual(size, 2) + + # def test__make_uniform_dist_error(self): <<- todo + + """ + def test__make_gaussian_dist(self): + payload, size = payloads._make_gaussian_dist(1, 10) <<- path as it does not use random + self.assertEqual(payload, '0x2265') + self.assertEqual(size, 2) + """ + + def test_make_payload_dist_same(self): + payload, size = payloads.make_payload_dist('test', 1, 1) + self.assertEqual(payload, '0xd8') + self.assertEqual(size, 2) + + def test_make_payload_dist_uniform(self): + payload, size = payloads.make_payload_dist('uniform', 1, 10) + self.assertEqual(payload, '0xcd') + self.assertEqual(size, 2) + + """ + def test_make_payload_dist_gaussian(self): + payload, size = payloads.make_payload_dist('gaussian', 1, 3) <<- same as test before + self.assertEqual(payload, '0x2265') + self.assertEqual(size, 2) + """ + + def test_make_payload_dist_error(self): + with self.assertRaises(ValueError): + payloads.make_payload_dist('test', 1, 4) From 14244a5fb54433e9712f7adf9a2ef1abb2407de4 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Sun, 5 Mar 2023 22:24:18 +0100 Subject: [PATCH 078/112] Deleted unnecessary wls template. --- src/templates.star | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/src/templates.star b/src/templates.star index dcf3e73..8b341de 100644 --- a/src/templates.star +++ b/src/templates.star @@ -32,41 +32,3 @@ def get_prometheus_template(): """ return template - - -## WLS -#def get_wls_template(): -# # Traffic simulation parameters -# wls_yml_template = """ -# general: -# -# debug_level : "DEBUG" -# -# targets_file : "./targets/targets.json" -# -# prng_seed : 0 -# -# # Simulation time in seconds -# simulation_time : {{.simulation_time}} -# -# # Message rate in messages per second -# msg_rate : {{.message_rate}} -# -# # Packet size in bytes -# min_packet_size : {{.min_packet_size}} -# max_packet_size : {{.max_packet_size}} -# -# # Packe size distribution -# # Values: uniform and gaussian -# dist_type : {{.dist_type}} -# -# # Fraction (of the total number of nodes) that inject traffic -# # Values: [0., 1.] -# emitters_fraction : {{.emitters_fraction}} -# -# # Inter-message times -# # Values: uniform and poisson -# inter_msg_type : {{.inter_msg_type}} -# """ -# -# return wls_yml_template From da58f5799470fce06ad04a84b110363220cdcefb Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 7 Mar 2023 20:07:51 +0100 Subject: [PATCH 079/112] Make wls actually take arguments --- src/system_variables.star | 6 ++-- src/wls.star | 15 ++++++-- wls-module/Dockerfile | 28 +++++++++++++-- wls-module/src/wls.py | 74 +++++++++++++++++++-------------------- 4 files changed, 79 insertions(+), 44 deletions(-) diff --git a/src/system_variables.star b/src/system_variables.star index 82f9078..26a9d5d 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -90,15 +90,15 @@ PORTS_KEY = "ports" # WLS Configuration WLS_IMAGE = "wls:0.0.1" WLS_SERVICE_NAME = "wls" -WLS_CONFIG_PATH = "/wls/config" +WLS_CONFIG_PATH = "/wls/config/" WLS_TARGETS_PATH = "/wls/targets" WLS_TOMLS_PATH = "/wls/tomls" WLS_TOPOLOGY_PATH = "/wls/network_topology" -WLS_CMD = ["python3", "src/wls.py"] +WLS_CONFIG_FILE_FLAG = "--config-file" +WLS_TOPOLOGY_FILE_FLAG = "--topology-file" CONTAINER_WLS_CONFIGURATION_FILE_NAME = "config.json" -# CONTAINER_TARGETS_FILE_NAME_WLS = "targets.json" CONTAINER_TOPOLOGY_FILE_NAME_WLS = "network_data.json" # Waku RPC methods diff --git a/src/wls.star b/src/wls.star index 3f2e5a5..3e9a732 100644 --- a/src/wls.star +++ b/src/wls.star @@ -55,6 +55,16 @@ def create_new_topology_information(plan, network_topology): return artifact_id +def create_cmd(config_file): + config_file_name = config_file.split("/")[-1] + + config_file = vars.WLS_CONFIG_FILE_FLAG + " " + \ + vars.WLS_CONFIG_PATH + config_file_name + topology_file = vars.WLS_TOPOLOGY_FILE_FLAG + " " + \ + vars.WLS_TOPOLOGY_PATH + vars.CONTAINER_TOPOLOGY_FILE_NAME_WLS + + return config_file + " " + topology_file + def init(plan, network_topology, config_file): # Generate simulation config @@ -68,6 +78,8 @@ def init(plan, network_topology, config_file): # Get complete network topology information wls_topology = create_new_topology_information(plan, network_topology) + wls_cmd = create_cmd(config_file) + add_service_config = ServiceConfig( image=vars.WLS_IMAGE, ports={}, @@ -76,7 +88,7 @@ def init(plan, network_topology, config_file): vars.WLS_TOMLS_PATH: tomls_artifact, vars.WLS_TOPOLOGY_PATH: wls_topology }, - cmd=vars.WLS_CMD + cmd=wls_cmd ) wls_service = plan.add_service( service_name=vars.WLS_SERVICE_NAME, @@ -84,4 +96,3 @@ def init(plan, network_topology, config_file): ) return wls_service - diff --git a/wls-module/Dockerfile b/wls-module/Dockerfile index 98e6041..4a03a60 100644 --- a/wls-module/Dockerfile +++ b/wls-module/Dockerfile @@ -1,5 +1,29 @@ -FROM python:3.11.0 +# Create the build image +FROM python:3.11-slim AS build-image + +# Create the virtualenv +RUN python -m venv /opt/venv + +# Use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" + +# Perform the installs +COPY requirements.txt . +RUN pip install -r requirements.txt + +# Create the production image +FROM python:3.11-slim AS prod-image LABEL Maintainer="Daimakaimura" + +# Copy the requisite files from the build image to the production image +COPY --from=build-image /opt/venv /opt/venv + +# Copy the wls files to the production image WORKDIR /wls COPY . . -RUN pip install -r requirements.txt + +# Deploy the virtualenv in production image +ENV PATH="/opt/venv/bin:$PATH" + +# Set the entrypoint +ENTRYPOINT ["python", "wls.py"] \ No newline at end of file diff --git a/wls-module/src/wls.py b/wls-module/src/wls.py index bd7b056..7c46e59 100644 --- a/wls-module/src/wls.py +++ b/wls-module/src/wls.py @@ -7,29 +7,41 @@ import tomllib # Project Imports -from utils import wls_logger -from utils import waku_messaging -from utils import payloads -from utils import files +from .utils import wls_logger +from .utils import waku_messaging +from .utils import payloads +from .utils import files """ Globals """ -G_DEFAULT_CONFIG_FILE = './config/config.json' -# G_DEFAULT_CONFIG_FILE = 'config.json' -G_DEFAULT_TOPOLOGY_FILE = './network_topology/network_data.json' -# G_DEFAULT_TOPOLOGY_FILE = 'topology_generated/network_data.json' +G_DEFAULT_CONFIG_FILE = 'config.json' +G_DEFAULT_TOPOLOGY_FILE = 'topology_generated/network_data.json' -def parse_cli(): +def parse_cli(args): """ Parse command line args. """ parser = argparse.ArgumentParser() - parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", + parser.add_argument("-cfg", "--config_file", type=str, help="Config file", default=G_DEFAULT_CONFIG_FILE) - parser.add_argument("-t", "--topology_file", help="Topology file", action="store_true", + parser.add_argument("-t", "--topology_file", type=str, help="Topology file", default=G_DEFAULT_TOPOLOGY_FILE) - args = parser.parse_args() + parsed_args = parser.parse_args(args) - return args + return parsed_args + + +def _load_topics(node_info, nodes, node): + topics = None + with open("tomls/" + node_info["node_config"], mode='rb') as read_file: + toml_config = tomllib.load(read_file) + if node_info["image"] == "nim-waku": + topics = list(toml_config["topics"].split(" ")) + elif node_info["image"] == "go-waku": + topics = toml_config["topics"] + else: + raise ValueError("Unknown image type") + # Load topics into topology for easier access + nodes[node]["topics"] = topics def load_topics_into_topology(topology): @@ -37,16 +49,8 @@ def load_topics_into_topology(topology): nodes = topology["nodes"] for node, node_info in nodes.items(): try: - with open("tomls/" + node_info["node_config"], mode='rb') as read_file: - toml_config = tomllib.load(read_file) - if node_info["image"] == "nim-waku": - topics = list(toml_config["topics"].split(" ")) - elif node_info["image"] == "go-waku": - topics = toml_config["topics"] - - # Load topics into topology for easier access - nodes[node]["topics"] = topics - except Exception as e: + _load_topics(node_info, nodes, node) + except ValueError as e: wls_logger.G_LOGGER.error('%s: %s' % (e.__doc__, e)) sys.exit() @@ -58,10 +62,9 @@ def get_random_emitters(topology, wls_config): """ Define the subset of emitters """ num_emitters = int(len(nodes) * wls_config["emitters_fraction"]) - if num_emitters == 0: + if num_emitters == 0 or num_emitters > len(nodes): wls_logger.G_LOGGER.error( - 'The number of emitters must be greater than zero. ' - 'Try increasing the fraction of emitters.') + 'The number of emitters must be greater than zero and less or equals than one.') sys.exit() random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) @@ -95,7 +98,7 @@ def _time_to_send_next_message(last_msg_time, next_time_to_msg): return True -def _select_emitter_and_topic(random_emitters): +def _select_emitter_with_topic(random_emitters): # Pick an emitter at random from the emitters list random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) emitter_address = f"http://{random_emitter_info['ip_address']}:" \ @@ -110,7 +113,7 @@ def _select_emitter_and_topic(random_emitters): return emitter_address, emitter_topic -def _inyect_message(emitter_address, emitter_topic, msgs_dict, wls_config): +def _inject_message(emitter_address, emitter_topic, msgs_dict, wls_config): payload, size = payloads.make_payload_dist(dist_type=wls_config['dist_type'].lower(), min_size=wls_config['min_packet_size'], max_size=wls_config['max_packet_size']) @@ -124,14 +127,14 @@ def _inyect_message(emitter_address, emitter_topic, msgs_dict, wls_config): msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() if msg_hash in msgs_dict: wls_logger.G_LOGGER.error(f"Hash collision. {msg_hash} already exists in dictionary") - raise RuntimeWarning + return msgs_dict[msg_hash] = {'ts': ts, 'injection_point': emitter_address, 'nonce': len(msgs_dict), 'topic': emitter_topic, 'payload': payload, 'payload_size': size} -def start_traffic_inyection(wls_config, random_emitters): +def start_traffic_injection(wls_config, random_emitters): """ Start simulation """ start_time = time.time() last_msg_time = 0 @@ -147,12 +150,9 @@ def start_traffic_inyection(wls_config, random_emitters): if not _time_to_send_next_message(last_msg_time, next_time_to_msg): continue - emitter_address, emitter_topic = _select_emitter_and_topic(random_emitters) + emitter_address, emitter_topic = _select_emitter_with_topic(random_emitters) - try: - _inyect_message(emitter_address, emitter_topic, msgs_dict, wls_config) - except RuntimeWarning: - continue + _inject_message(emitter_address, emitter_topic, msgs_dict, wls_config) # Compute the time to next message next_time_to_msg = waku_messaging.get_next_time_to_msg(wls_config['inter_msg_type'], @@ -166,7 +166,7 @@ def start_traffic_inyection(wls_config, random_emitters): def main(): - args = parse_cli() + args = parse_cli(sys.argv[1:]) config_file = args.config_file topology_file = args.topology_file @@ -187,7 +187,7 @@ def main(): random_emitters = get_random_emitters(topology, wls_config) - msgs_dict = start_traffic_inyection(wls_config, random_emitters) + msgs_dict = start_traffic_injection(wls_config, random_emitters) files.save_messages_to_json(msgs_dict) From c4ee242471fc262ff8da30f2e6244692d0fe250f Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 7 Mar 2023 20:08:24 +0100 Subject: [PATCH 080/112] Remove comment in payload and added error --- wls-module/src/utils/payloads.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/wls-module/src/utils/payloads.py b/wls-module/src/utils/payloads.py index 48070e2..65fbeff 100644 --- a/wls-module/src/utils/payloads.py +++ b/wls-module/src/utils/payloads.py @@ -7,11 +7,15 @@ def _make_payload(bytes_size): - # todo preguntar: cuando enviamos un payload, se tiene en cuenta el 0x en el tamaño? - # todo por que coño se multiplica por 4, si el tamaño es en bytes? - # Si multiplicamos por 4, tenemos 4 bits, que es medio byte, 1 hexadecimal, deberian ser 2. + # Multiplied by 4 because each character in a string is one byte, so in a hex + # we cannot go to two characters, this means we can only use 4 bits per byte. + # We send half of the information but with the correct size, and as this is for testing purposes + # we don't care about the information we are sending. + if bytes_size == 0: + raise ValueError('Payload size cannot be 0') + payload = hex(random.getrandbits(4 * bytes_size)) - # payload = hex(random.getrandbits(8 * bytes_size)) + wls_logger.G_LOGGER.debug(f"Payload of size {bytes_size} bytes: {payload}") return payload From 1345575333494a948243063feedef9601947c71a Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 7 Mar 2023 20:08:37 +0100 Subject: [PATCH 081/112] Finished test payloads --- wls-module/src/utils/tests/test_payloads.py | 50 +++++++++++---------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/wls-module/src/utils/tests/test_payloads.py b/wls-module/src/utils/tests/test_payloads.py index 03b19d8..c9a0d2e 100644 --- a/wls-module/src/utils/tests/test_payloads.py +++ b/wls-module/src/utils/tests/test_payloads.py @@ -1,5 +1,6 @@ import unittest import random +from unittest.mock import patch from src.utils import payloads @@ -8,49 +9,52 @@ class TestPayloads(unittest.TestCase): - """ + def create_patch(self, name): + patcher = patch(name) + thing = patcher.start() + self.addCleanup(patcher.stop) + return thing + def test__make_payload(self): payload = payloads._make_payload(1) print(payload) print(b"a") - self.assertEqual(payload, '0x2') - + self.assertEqual(payload, '0x9') def test__make_payload_error(self): with self.assertRaises(ValueError): - payloads._make_payload(0) <<- todo añadir error - """ + payloads._make_payload(0) def test__make_uniform_dist(self): payload, size = payloads._make_uniform_dist(1, 10) - self.assertEqual(payload, '0xd8') - self.assertEqual(size, 2) - - # def test__make_uniform_dist_error(self): <<- todo + self.assertEqual(payload, '0xc386bbc4') + self.assertEqual(size, 8) - """ def test__make_gaussian_dist(self): - payload, size = payloads._make_gaussian_dist(1, 10) <<- path as it does not use random - self.assertEqual(payload, '0x2265') - self.assertEqual(size, 2) - """ + mock_rtnorm = self.create_patch('src.utils.rtnorm.rtnorm') + mock_rtnorm.return_value = 6 + + payload, size = payloads._make_gaussian_dist(1, 10) + self.assertEqual(payload, '0x2265b1') + self.assertEqual(size, 6) def test_make_payload_dist_same(self): payload, size = payloads.make_payload_dist('test', 1, 1) - self.assertEqual(payload, '0xd8') - self.assertEqual(size, 2) + self.assertEqual(payload, '0x1') + self.assertEqual(size, 1) def test_make_payload_dist_uniform(self): payload, size = payloads.make_payload_dist('uniform', 1, 10) - self.assertEqual(payload, '0xcd') - self.assertEqual(size, 2) + self.assertEqual(payload, '0xc9e9c6') + self.assertEqual(size, 6) - """ def test_make_payload_dist_gaussian(self): - payload, size = payloads.make_payload_dist('gaussian', 1, 3) <<- same as test before - self.assertEqual(payload, '0x2265') - self.assertEqual(size, 2) - """ + mock__make_gaussian_dist = self.create_patch('src.utils.payloads._make_gaussian_dist') + mock__make_gaussian_dist.return_value = '0x213', 3 + + payload, size = payloads.make_payload_dist('gaussian', 1, 10) + self.assertEqual(payload, '0x213') + self.assertEqual(size, 3) def test_make_payload_dist_error(self): with self.assertRaises(ValueError): From a774ecec84ab4321a2249290b7e7e4ac48998f2b Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Tue, 7 Mar 2023 20:08:55 +0100 Subject: [PATCH 082/112] Added tests for wls.py --- wls-module/src/tests/__init__.py | 0 wls-module/src/tests/test_wls.py | 213 +++++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+) create mode 100644 wls-module/src/tests/__init__.py create mode 100644 wls-module/src/tests/test_wls.py diff --git a/wls-module/src/tests/__init__.py b/wls-module/src/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/wls-module/src/tests/test_wls.py b/wls-module/src/tests/test_wls.py new file mode 100644 index 0000000..093b142 --- /dev/null +++ b/wls-module/src/tests/test_wls.py @@ -0,0 +1,213 @@ +import unittest +import random +from unittest.mock import mock_open, patch + +from src import wls + +random.seed(0) + + +class TestWLS(unittest.TestCase): + + def create_patch(self, name): + patcher = patch(name) + thing = patcher.start() + self.addCleanup(patcher.stop) + return thing + + def test_parse_cli(self): + args_parsed = wls.parse_cli(["-cfg", "test1", "-t", "test2"]) + self.assertEqual(args_parsed.config_file, "test1") + self.assertEqual(args_parsed.topology_file, "test2") + + def test_parse_cli_only_cfg(self): + args_parsed = wls.parse_cli(["-cfg", "test1"]) + self.assertEqual(args_parsed.config_file, "test1") + self.assertEqual(args_parsed.topology_file, wls.G_DEFAULT_TOPOLOGY_FILE) + + def test_parse_cli_only_t(self): + args_parsed = wls.parse_cli(["-t", "test2"]) + self.assertEqual(args_parsed.config_file, wls.G_DEFAULT_CONFIG_FILE) + self.assertEqual(args_parsed.topology_file, "test2") + + def test_parse_cli_no_args(self): + args_parsed = wls.parse_cli([]) + self.assertEqual(args_parsed.config_file, wls.G_DEFAULT_CONFIG_FILE) + self.assertEqual(args_parsed.topology_file, wls.G_DEFAULT_TOPOLOGY_FILE) + + def test_parse_cli_error(self): + with self.assertRaises(SystemExit): + wls.parse_cli(["-cfg", "test1", "-t", "test2", "-error"]) + + def test__load_topics_nwaku(self): + m = mock_open(read_data='asd') + with patch('builtins.open', m) as mocked_open: + mock_tomllib_load = self.create_patch('tomllib.load') + mock_tomllib_load.return_value = {"topics": 'test1 test2'} + + node_info = {"image": "nim-waku", "node_config": "asd"} + node = {} + node["test"] = {} + + wls._load_topics(node_info, node, "test") + self.assertEqual(node["test"]["topics"], ["test1", "test2"]) + + def test__load_topics_gowaku(self): + m = mock_open(read_data='asd') + with patch('builtins.open', m) as mocked_open: + mock_tomllib_load = self.create_patch('tomllib.load') + mock_tomllib_load.return_value = {"topics": ['test1', 'test2']} + + node_info = {"image": "go-waku", "node_config": "asd"} + node = {} + node["test"] = {} + + wls._load_topics(node_info, node, "test") + self.assertEqual(node["test"]["topics"], ["test1", "test2"]) + + def test__load_topics_error(self): + m = mock_open(read_data='asd') + with patch('builtins.open', m) as mocked_open: + mock_tomllib_load = self.create_patch('tomllib.load') + mock_tomllib_load.return_value = {"topics": 'test1 test2'} + + node_info = {"image": "error", "node_config": "asd"} + node = {} + node["test"] = {} + + with self.assertRaises(Exception): + wls._load_topics(node_info, node, "test") + + def test_load_topics_into_topology(self): + m = mock_open(read_data='asd') + with patch('builtins.open', m) as mocked_open: + mock_tomllib_load = self.create_patch('tomllib.load') + mock_tomllib_load.return_value = {"topics": 'test1 test2'} + + topology = {"nodes": {"test": {"image": "nim-waku", "node_config": "asd"}}} + wls.load_topics_into_topology(topology) + self.assertEqual(topology["nodes"]["test"]["topics"], ["test1", "test2"]) + + def test_load_topics_into_topology_error(self): + m = mock_open(read_data='asd') + with patch('builtins.open', m) as mocked_open: + mock_tomllib_load = self.create_patch('tomllib.load') + mock_tomllib_load.return_value = {"topics": 'test1 test2'} + + topology = {"nodes": {"test": {"image": "error", "node_config": "asd"}}} + with self.assertRaises(SystemExit): + wls.load_topics_into_topology(topology) + + def test_get_random_emitters_all(self): + topology = {"nodes": {"test1": 1, "test2": 2}} + config = {"emitters_fraction": 1} + emitters = wls.get_random_emitters(topology, config) + + self.assertEqual(emitters, {"test1": 1, "test2": 2}) + + def test_get_random_emitters_half(self): + topology = {"nodes": {"test1": 1, "test2": 2}} + config = {"emitters_fraction": 0.5} + emitters = wls.get_random_emitters(topology, config) + + self.assertEqual(emitters, {"test2": 2}) + + def test_get_random_emitters_error(self): + topology = {"nodes": {"test1": 1, "test2": 2}} + config = {"emitters_fraction": 0} + with self.assertRaises(SystemExit): + wls.get_random_emitters(topology, config) + + def test_get_random_emitters_error2(self): + topology = {"nodes": {"test1": 1, "test2": 2}} + config = {"emitters_fraction": 2} + with self.assertRaises(SystemExit): + wls.get_random_emitters(topology, config) + + def test__is_simulation_finished(self): + # time.time returns the number of seconds passed since epoch + mock_time = self.create_patch('time.time') + # we assume we are at time 10 + mock_time.return_value = 10 + + # we want the simulation last for 5 seconds + wls_config = {'simulation_time': 5} + + # if simulation started at time 3, 10-5 >= 3 -> true + finished = wls._is_simulation_finished(3, wls_config, {}) + self.assertTrue(finished) + + def test__is_simulation_finished_false(self): + # time.time returns the number of seconds passed since epoch + mock_time = self.create_patch('time.time') + # we assume we are at time 10 + mock_time.return_value = 10 + + # we want the simulation last for 5 seconds + wls_config = {'simulation_time': 5} + + # if simulation started at time 7, 10-7 >= 7 -> false + finished = wls._is_simulation_finished(7, wls_config, {}) + self.assertFalse(finished) + + def test__time_to_send_text_message_true(self): + mock_time = self.create_patch('time.time') + mock_time.return_value = 5 + + # We want to send messages every second, last message time was 1, + # and we are at time 5-> true + next_message = wls._time_to_send_next_message(1, 1) + self.assertTrue(next_message) + + def test__time_to_send_text_message_false(self): + mock_time = self.create_patch('time.time') + mock_time.return_value = 5 + + # We want to send messages every 10 seconds, last message time was 1, + # and we are at time 5-> false + next_message = wls._time_to_send_next_message(1, 10) + self.assertFalse(next_message) + + def test__select_emitter_and_topic(self): + emitters = {"test1": {"ip_address": 1, "ports": {"rpc_test1": (2, "asd")}, + "topics": ["test1a", "test1b"]}, + "test2": {"ip_address": 5, "ports": {"rpc_test2": (6, "tcp")}, + "topics": ["test2a", "test2b"]}, + "test3": {"ip_address": 10, "ports": {"rpc_test3": (11, "tcp")}, + "topics": ["test3a", "test3b"]}} + + emitter_address, topic = wls._select_emitter_with_topic(emitters) + + self.assertEqual(emitter_address, "http://5:6/") + self.assertEqual(topic, "test2b") + + def test__inject_message(self): + mock_dist = self.create_patch('src.utils.payloads.make_payload_dist') + mock_dist.return_value = "payload", 2 + mock_send_message = self.create_patch('src.utils.waku_messaging.send_msg_to_node') + mock_send_message.return_value = {'result': True}, None, "asd", 1 + + messages_dict = {} + wls_config = {"dist_type": "dist", "min_packet_size": 1, "max_packet_size": 10} + wls._inject_message("1.1.1.1", "test", messages_dict, wls_config) + self.assertIn("688787d8ff144c502c7f5cffaafe2cc588d86079f9de88304c26b0cb99ce91c6", + messages_dict.keys()) + hash_dict = messages_dict["688787d8ff144c502c7f5cffaafe2cc588d86079f9de88304c26b0cb99ce91c6"] + self.assertEqual(hash_dict['ts'], 1) + self.assertEqual(hash_dict['injection_point'], "1.1.1.1") + self.assertEqual(hash_dict['nonce'], 0) + self.assertEqual(hash_dict['topic'], "test") + self.assertEqual(hash_dict['payload'], "payload") + self.assertEqual(hash_dict['payload_size'], 2) + + def test__inject_message_error(self): + mock_dist = self.create_patch('src.utils.payloads.make_payload_dist') + mock_dist.return_value = "payload", 2 + mock_send_message = self.create_patch('src.utils.waku_messaging.send_msg_to_node') + mock_send_message.return_value = {'result': True}, None, "asd", 1 + + messages_dict = {"688787d8ff144c502c7f5cffaafe2cc588d86079f9de88304c26b0cb99ce91c6": "asd"} + wls_config = {"dist_type": "dist", "min_packet_size": 1, "max_packet_size": 10} + wls._inject_message("1.1.1.1", "test", messages_dict, wls_config) + self.assertEqual(len(messages_dict), 1) + From 60998586c03b0844791d37eb199ea45acf2f794b Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:19:19 +0100 Subject: [PATCH 083/112] Bug: Ports shift values are now used --- src/node_builders/types/gowaku_builder.star | 15 +++++++++------ src/node_builders/types/nwaku_builder.star | 12 +++++++----- src/node_builders/types/waku_builder.star | 21 ++++++++++++--------- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/src/node_builders/types/gowaku_builder.star b/src/node_builders/types/gowaku_builder.star index c5f3799..26827bc 100644 --- a/src/node_builders/types/gowaku_builder.star +++ b/src/node_builders/types/gowaku_builder.star @@ -5,10 +5,11 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") waku_builder = import_module(vars.WAKU_BUILDER_MODULE) -def prepare_gowaku_service(gowakunode_name, all_services, config_files, artifact_ids, service_id): - prepared_ports = waku_builder.prepare_waku_ports_in_service(gowakunode_name) - prepared_files = waku_builder.prepare_waku_config_files_in_service(gowakunode_name, artifact_ids) - prepared_cmd = _prepare_gowaku_cmd_in_service(gowakunode_name, config_files) +def prepare_gowaku_service(gowakunode_names, all_services, config_files, artifact_ids, service_id, + network_topology): + prepared_ports = waku_builder.prepare_waku_ports_in_service(gowakunode_names, network_topology) + prepared_files = waku_builder.prepare_waku_config_files_in_service(gowakunode_names, artifact_ids) + prepared_cmd = _prepare_gowaku_cmd_in_service(gowakunode_names, config_files, network_topology) add_service_config = ServiceConfig( image=vars.GOWAKU_IMAGE, @@ -21,15 +22,17 @@ def prepare_gowaku_service(gowakunode_name, all_services, config_files, artifact all_services[service_id] = add_service_config -def _prepare_gowaku_cmd_in_service(gowakunode_names, config_files): +def _prepare_gowaku_cmd_in_service(gowakunode_names, config_files, network_topology): prepared_cmd = "" for i in range(len(gowakunode_names)): prepared_cmd += vars.GOWAKU_ENTRYPOINT + " " prepared_cmd += vars.WAKUNODE_CONFIGURATION_FILE_FLAG + \ vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + \ gowakunode_names[i] + "/" + config_files[i] + " " - prepared_cmd += vars.WAKUNODE_PORT_SHIFT_FLAG + str(i) + prepared_cmd += vars.WAKUNODE_PORT_SHIFT_FLAG + \ + str(network_topology[vars.GENNET_NODES_KEY][gowakunode_names[i]][vars.GENNET_PORT_SHIFT_KEY]) if i != len(gowakunode_names) - 1: prepared_cmd += " & " + return [prepared_cmd] \ No newline at end of file diff --git a/src/node_builders/types/nwaku_builder.star b/src/node_builders/types/nwaku_builder.star index e0d622a..7aa006b 100644 --- a/src/node_builders/types/nwaku_builder.star +++ b/src/node_builders/types/nwaku_builder.star @@ -5,10 +5,11 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") waku_builder = import_module(vars.WAKU_BUILDER_MODULE) -def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, service_id): - prepared_ports = waku_builder.prepare_waku_ports_in_service(nwakunode_names) +def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, service_id, + network_topology): + prepared_ports = waku_builder.prepare_waku_ports_in_service(nwakunode_names, network_topology) prepared_files = waku_builder.prepare_waku_config_files_in_service(nwakunode_names, artifact_ids) - prepared_cmd = _prepare_nwaku_cmd_in_service(nwakunode_names, config_files) + prepared_cmd = _prepare_nwaku_cmd_in_service(nwakunode_names, config_files, network_topology) add_service_config = ServiceConfig( image=vars.NWAKU_IMAGE, @@ -21,14 +22,15 @@ def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ all_services[service_id] = add_service_config -def _prepare_nwaku_cmd_in_service(nwakunode_names, config_files): +def _prepare_nwaku_cmd_in_service(nwakunode_names, config_files, network_topology): prepared_cmd = "" for i in range(len(nwakunode_names)): prepared_cmd += vars.NWAKU_ENTRYPOINT + " " prepared_cmd += vars.WAKUNODE_CONFIGURATION_FILE_FLAG + \ vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + \ nwakunode_names[i] + "/" + config_files[i] + " " - prepared_cmd += vars.WAKUNODE_PORT_SHIFT_FLAG + str(i) + prepared_cmd += vars.WAKUNODE_PORT_SHIFT_FLAG + \ + str(network_topology[vars.GENNET_NODES_KEY][nwakunode_names[i]][vars.GENNET_PORT_SHIFT_KEY]) if i != len(nwakunode_names) - 1: prepared_cmd += " & " diff --git a/src/node_builders/types/waku_builder.star b/src/node_builders/types/waku_builder.star index 6e256c4..a686aaa 100644 --- a/src/node_builders/types/waku_builder.star +++ b/src/node_builders/types/waku_builder.star @@ -2,19 +2,22 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") -def prepare_waku_ports_in_service(wakunode_names): +def prepare_waku_ports_in_service(node_names, network_topology): prepared_ports = {} - for i in range(len(wakunode_names)): - prepared_ports[vars.RPC_PORT_ID + "_" + wakunode_names[i]] = \ - PortSpec(number=vars.WAKU_RPC_PORT_NUMBER + i, + + for node_name in node_names: + node_info = network_topology[vars.GENNET_NODES_KEY][node_name] + + prepared_ports[vars.RPC_PORT_ID + "_" + node_name] = \ + PortSpec(number=vars.WAKU_RPC_PORT_NUMBER + node_info[vars.GENNET_PORT_SHIFT_KEY], transport_protocol=vars.WAKU_RPC_PORT_PROTOCOL) - prepared_ports[vars.PROMETHEUS_PORT_ID + "_" + wakunode_names[i]] = \ - PortSpec(number=vars.PROMETHEUS_PORT_NUMBER + i, + prepared_ports[vars.PROMETHEUS_PORT_ID + "_" + node_name] = \ + PortSpec(number=vars.PROMETHEUS_PORT_NUMBER + node_info[vars.GENNET_PORT_SHIFT_KEY], transport_protocol=vars.PROMETHEUS_PORT_PROTOCOL) - prepared_ports[vars.WAKU_LIBP2P_PORT_ID + "_" + wakunode_names[i]] = \ - PortSpec(number=vars.WAKU_LIBP2P_PORT + i, + prepared_ports[vars.WAKU_LIBP2P_PORT_ID + "_" + node_name] = \ + PortSpec(number=vars.WAKU_LIBP2P_PORT + node_info[vars.GENNET_PORT_SHIFT_KEY], transport_protocol=vars.WAKU_LIBP2P_PORT_PROTOCOL) return prepared_ports @@ -28,7 +31,7 @@ def prepare_waku_config_files_in_service(node_names, artifact_ids): return prepared_files -def _add_waku_ports_info_to_topology(network_topology, all_services_information, node_info, node_id): +def add_waku_ports_info_to_topology(network_topology, all_services_information, node_info, node_id): waku_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id libp2p_port_id = vars.WAKU_LIBP2P_PORT_ID + "_" + node_id prometheus_port_id = vars.PROMETHEUS_PORT_ID + "_" + node_id From d6cc337403e09911ba689539fa08750bc377588a Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:20:04 +0100 Subject: [PATCH 084/112] Moved dispatchers to other file --- src/node_builders/dispatchers.star | 41 ++++++++++++++++++++++ src/node_builders/node_builders.star | 51 ++++------------------------ src/system_variables.star | 10 ++++-- 3 files changed, 56 insertions(+), 46 deletions(-) create mode 100644 src/node_builders/dispatchers.star diff --git a/src/node_builders/dispatchers.star b/src/node_builders/dispatchers.star new file mode 100644 index 0000000..99f6212 --- /dev/null +++ b/src/node_builders/dispatchers.star @@ -0,0 +1,41 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +waku = import_module(vars.WAKU_MODULE) +nomos = import_module(vars.NOMOS_MODULE) +waku_builder = import_module(vars.WAKU_BUILDER_MODULE) +nwaku_builder = import_module(vars.NWAKU_BUILDER_MODULE) +gowaku_builder = import_module(vars.GOWAKU_BUILDER_MODULE) +nomos_builder = import_module(vars.NOMOS_BUILDER_MODULE) + + +service_builder_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: gowaku_builder.prepare_gowaku_service, + vars.GENNET_NWAKU_IMAGE_VALUE: nwaku_builder.prepare_nwaku_service, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos_builder.prepare_nomos_service +} + +service_info_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: waku.get_wakunode_peer_id, + vars.GENNET_NWAKU_IMAGE_VALUE: waku.get_wakunode_peer_id, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos.get_nomos_peer_id +} + +service_multiaddr_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: waku.create_node_multiaddress, + vars.GENNET_NWAKU_IMAGE_VALUE: waku.create_node_multiaddress, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos.create_node_multiaddress +} + +service_connect_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: waku.connect_wakunode_to_peers, + vars.GENNET_NWAKU_IMAGE_VALUE: waku.connect_wakunode_to_peers, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos.connect_nomos_to_peers +} + +ports_dispatcher = { + vars.GENNET_GOWAKU_IMAGE_VALUE: waku_builder.add_waku_ports_info_to_topology, + vars.GENNET_NWAKU_IMAGE_VALUE: waku_builder.add_waku_ports_info_to_topology, + vars.GENNET_NOMOS_IMAGE_VALUE: nomos_builder.add_nomos_ports_info_to_topology +} \ No newline at end of file diff --git a/src/node_builders/node_builders.star b/src/node_builders/node_builders.star index 1063b6a..f8246de 100644 --- a/src/node_builders/node_builders.star +++ b/src/node_builders/node_builders.star @@ -2,44 +2,8 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports -waku = import_module(vars.WAKU_MODULE) -nomos = import_module(vars.NOMOS_MODULE) files = import_module(vars.FILE_HELPERS_MODULE) -waku_builder = import_module(vars.WAKU_BUILDER_MODULE) -nwaku_builder = import_module(vars.NWAKU_BUILDER_MODULE) -gowaku_builder = import_module(vars.GOWAKU_BUILDER_MODULE) -nomos_builder = import_module(vars.NOMOS_BUILDER_MODULE) - - -service_builder_dispatcher = { - vars.GENNET_GOWAKU_IMAGE_VALUE: gowaku_builder.prepare_gowaku_service, - vars.GENNET_NWAKU_IMAGE_VALUE: nwaku_builder.prepare_nwaku_service, - vars.GENNET_NOMOS_IMAGE_VALUE: nomos_builder.prepare_nomos_service -} - -service_info_dispatcher = { - vars.GENNET_GOWAKU_IMAGE_VALUE: waku.get_wakunode_peer_id, - vars.GENNET_NWAKU_IMAGE_VALUE: waku.get_wakunode_peer_id, - vars.GENNET_NOMOS_IMAGE_VALUE: nomos.get_nomos_peer_id -} - -service_multiaddr_dispatcher = { - vars.GENNET_GOWAKU_IMAGE_VALUE: waku.create_node_multiaddress, - vars.GENNET_NWAKU_IMAGE_VALUE: waku.create_node_multiaddress, - vars.GENNET_NOMOS_IMAGE_VALUE: nomos.create_node_multiaddress -} - -service_connect_dispatcher = { - vars.GENNET_GOWAKU_IMAGE_VALUE: waku.connect_wakunode_to_peers, - vars.GENNET_NWAKU_IMAGE_VALUE: waku.connect_wakunode_to_peers, - vars.GENNET_NOMOS_IMAGE_VALUE: nomos.connect_nomos_to_peers -} - -ports_dispatcher = { - vars.GENNET_GOWAKU_IMAGE_VALUE: waku_builder._add_waku_ports_info_to_topology, - vars.GENNET_NWAKU_IMAGE_VALUE: waku_builder._add_waku_ports_info_to_topology, - vars.GENNET_NOMOS_IMAGE_VALUE: nomos_builder._add_nomos_ports_info_to_topology -} +dispatchers = import_module(vars.DISPATCHERS_MODULE) def instantiate_services(plan, network_topology, testing): """ @@ -61,14 +25,13 @@ def instantiate_services(plan, network_topology, testing): }, "node_1": {...} } - } """ all_services_configuration = {} for service_id, nodes_in_service in network_topology[vars.GENNET_ALL_CONTAINERS_KEY].items(): image = network_topology[vars.GENNET_NODES_KEY][nodes_in_service[0]][vars.GENNET_IMAGE_KEY] - service_builder = service_builder_dispatcher[image] + service_builder = dispatchers.service_builder_dispatcher[image] # Get all config file names needed config_file_names = [network_topology[vars.GENNET_NODES_KEY][node][vars.GENNET_CONFIG_KEY] @@ -81,7 +44,7 @@ def instantiate_services(plan, network_topology, testing): ] service_builder(nodes_in_service, all_services_configuration, config_file_names, - config_files_artifact_ids, service_id) + config_files_artifact_ids, service_id, network_topology) all_services_information = plan.add_services( configs=all_services_configuration @@ -97,8 +60,8 @@ def interconnect_nodes(plan, topology_information, interconnection_batch): for node_id in nodes_in_topology.keys(): image = nodes_in_topology[node_id][vars.GENNET_IMAGE_KEY] peers = nodes_in_topology[node_id][vars.GENNET_STATIC_NODES_KEY] - create_node_multiaddress = service_multiaddr_dispatcher[image] - connect_node_to_peers = service_connect_dispatcher[image] + create_node_multiaddress = dispatchers.service_multiaddr_dispatcher[image] + connect_node_to_peers = dispatchers.service_connect_dispatcher[image] for i in range(0, len(peers), interconnection_batch): peer_ids = [create_node_multiaddress(peer, nodes_in_topology[peer]) @@ -114,7 +77,7 @@ def _add_service_info_to_topology(plan, all_services_information, network_topolo node_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id image = network_topology[vars.GENNET_NODES_KEY][node_id][vars.GENNET_IMAGE_KEY] - peer_id_getter = service_info_dispatcher[image] + peer_id_getter = dispatchers.service_info_dispatcher[image] node_peer_id = peer_id_getter(plan, node_info[vars.GENNET_NODE_CONTAINER_KEY], node_rpc_port_id) @@ -123,5 +86,5 @@ def _add_service_info_to_topology(plan, all_services_information, network_topolo network_topology[vars.GENNET_NODES_KEY][node_id][vars.IP_KEY] = \ all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ip_address - ports_adder = ports_dispatcher[node_info[vars.GENNET_IMAGE_KEY]] + ports_adder = dispatchers.ports_dispatcher[node_info[vars.GENNET_IMAGE_KEY]] ports_adder(network_topology, all_services_information, node_info, node_id) diff --git a/src/system_variables.star b/src/system_variables.star index 26a9d5d..46a50cc 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -1,5 +1,5 @@ # Waku Configuration -NWAKU_IMAGE = "statusteam/nim-waku:nwaku-trace" +NWAKU_IMAGE = "statusteam/nim-waku:nwaku-trace2" GOWAKU_IMAGE = "gowaku" RPC_PORT_ID = "rpc" @@ -74,6 +74,7 @@ CONTAINER_DATASOURCES_FILE_NAME_GRAFANA = "datasources.yaml" # Gennet topology Keys GENNET_NODES_KEY = "nodes" +GENNET_PORT_SHIFT_KEY = "port_shift" GENNET_ALL_CONTAINERS_KEY = "containers" GENNET_IMAGE_KEY = "image" GENNET_CONFIG_KEY = "node_config" @@ -110,6 +111,7 @@ GET_PEERS_METHOD = "get_waku_v2_admin_v1_peers" # Import locations WAKU_MODULE = "github.com/logos-co/wakurtosis/src/waku.star" NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/node_builders.star" +DISPATCHERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/dispatchers.star" WAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/waku_builder.star" NWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/nwaku_builder.star" GOWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/gowaku_builder.star" @@ -123,10 +125,14 @@ WLS_MODULE = "github.com/logos-co/wakurtosis/src/wls.star" CALL_PROTOCOLS = "github.com/logos-co/wakurtosis/src/call_protocols.star" NOMOS_MODULE = "github.com/logos-co/wakurtosis/src/nomos.star" + TEST_ARGUMENTS_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_arguments_parser.star" TEST_FILES_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_file_helpers.star" -TEST_NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_node_builders.star" TEST_WAKU_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_waku_methods.star" +TEST_NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/tests/test_node_builders.star" +TEST_WAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/tests/test_waku_builder.star" +TEST_GOWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/tests/test_gowaku_builder.star" +TEST_NWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/tests/test_nwaku_builder.star" # Default main starlark arguments TOPOLOGIES_LOCATION = "github.com/logos-co/wakurtosis/config/topology_generated/" From 2686ed839b608032b04c63c52d02225056e90ed7 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:21:18 +0100 Subject: [PATCH 085/112] Made nomos method public by sintax --- src/node_builders/types/nomos_builder.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/node_builders/types/nomos_builder.star b/src/node_builders/types/nomos_builder.star index fc2ac3f..d51eb9d 100644 --- a/src/node_builders/types/nomos_builder.star +++ b/src/node_builders/types/nomos_builder.star @@ -57,7 +57,7 @@ def _prepare_nomos_config_files_in_service(node_names, artifact_ids): return prepared_files -def _add_nomos_ports_info_to_topology(network_topology, all_services_information, node_info, node_id): +def add_nomos_ports_info_to_topology(network_topology, all_services_information, node_info, node_id): nomos_rpc_port_id = vars.RPC_PORT_ID + "_" + node_id libp2p_port_id = vars.NOMOS_LIBP2P_PORT_ID + "_" + node_id prometheus_port_id = vars.PROMETHEUS_PORT_ID + "_" + node_id From fd4556445ae7ee0d72ad3fe05b86a8cb37ef840f Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:21:38 +0100 Subject: [PATCH 086/112] Removed unnused method from wls.star --- src/wls.star | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/src/wls.star b/src/wls.star index 3e9a732..1bc9b79 100644 --- a/src/wls.star +++ b/src/wls.star @@ -13,28 +13,6 @@ def upload_config(plan, config_file): return config_artifact -def create_targets(plan, services): - - # Get private ip and ports of all nodes - template_data = files.generate_template_node_targets(services, vars.RPC_PORT_ID, "targets") - - # Template - template = """ - {{.targets}} - """ - - artifact_id = plan.render_templates( - config={ - vars.CONTAINER_TARGETS_FILE_NAME_WLS: struct( - template=template, - data=template_data, - ) - }, - name="wls_targets" - ) - - return artifact_id - def create_new_topology_information(plan, network_topology): template = """ {{.information}} From 0e67be8e1c49f32d482e0ce7640256b4a78ec4e6 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:22:01 +0100 Subject: [PATCH 087/112] Fixed get peers method for multinode approach --- src/waku.star | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/waku.star b/src/waku.star index 79f598e..3e1f316 100644 --- a/src/waku.star +++ b/src/waku.star @@ -49,11 +49,12 @@ def make_service_wait(plan, service_name, time): plan.exec(exec_recipe) -def get_waku_peers(plan, waku_service_name): +def get_waku_peers(plan, waku_service_container, node_name): extract = {"peers": '.result | length'} + port_name = vars.RPC_PORT_ID +"_" + node_name - response = call_protocols.send_json_rpc(plan, waku_service_name, vars.RPC_PORT_ID, - vars.GET_PEERS_METHOD, "", extract) + response = call_protocols.send_json_rpc(plan, waku_service_container, port_name, + vars.GET_PEERS_METHOD, "", extract) plan.assert(value=response["code"], assertion="==", target_value=200) From 2e4f8e42a8a4e2c7ebee3f4d2648c744c24c52e1 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:22:37 +0100 Subject: [PATCH 088/112] Fixed test_file_helpers.star because new gennet information --- src/tests/test_file_helpers.star | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/tests/test_file_helpers.star b/src/tests/test_file_helpers.star index 898cf88..e367ae3 100644 --- a/src/tests/test_file_helpers.star +++ b/src/tests/test_file_helpers.star @@ -20,23 +20,22 @@ def test_get_toml_configuration_artifact_same_config_false(plan): def test_generate_template_node_targets_single(plan): - service_struct = struct(ip_address="1.1.1.1", ports={"http": PortSpec(number=80)}) - services_example={"test1":{"service_info": service_struct}} + network_topology={"nodes": {"test1":{"ip_address": "1.1.1.1", + "ports": { "rpc_test1": (80, 'tcp')}}}} - template_data = files.generate_template_node_targets(services_example, "http") + template_data = files.generate_template_node_targets(network_topology, "rpc", "targets") plan.assert(value=template_data["targets"], assertion="==", target_value='["1.1.1.1:80"]') def test_generate_template_node_targets_multiple(plan): - service_struct_1 = struct(ip_address="1.1.1.1", ports={"http": PortSpec(number=80)}) - service_struct_2 = struct(ip_address="2.2.2.2", ports={"http": PortSpec(number=88)}) - services_example={"test1":{"service_info": service_struct_1}, - "test2":{"service_info": service_struct_2}} + network_topology={"nodes": {"test1":{"ip_address": "1.1.1.1", "ports": { "rpc_test1": (80, 'tcp')}}, + "test2":{"ip_address": "2.2.2.2", "ports": { "rpc_test2": (10, 'tcp')}}}} - template_data = files.generate_template_node_targets(services_example, "http") + template_data = files.generate_template_node_targets(network_topology, "rpc", "targets") - plan.assert(value=template_data["targets"], assertion="==",target_value='["1.1.1.1:80","2.2.2.2:88"]') + plan.assert(value=template_data["targets"], assertion="==", + target_value='["1.1.1.1:80","2.2.2.2:10"]') def test_generate_template_prometheus_url(plan): prometheus_service_struct = struct(ip_address="1.2.3.4", From 462b3cbb871b993f0b3441715a0a9d51c5364b15 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:22:59 +0100 Subject: [PATCH 089/112] Added new data and mofified old one for tests --- config/test_files/nwaku_2.toml | 5 ++ config/test_files/nwaku_3.toml | 5 ++ config/test_files/test_network_data.json | 66 +++++++++++++++++----- config/test_files/test_network_data_2.json | 42 +++++++++----- 4 files changed, 92 insertions(+), 26 deletions(-) create mode 100644 config/test_files/nwaku_2.toml create mode 100644 config/test_files/nwaku_3.toml diff --git a/config/test_files/nwaku_2.toml b/config/test_files/nwaku_2.toml new file mode 100644 index 0000000..baf275d --- /dev/null +++ b/config/test_files/nwaku_2.toml @@ -0,0 +1,5 @@ +topics="test" +rpc-admin=true +keep-alive=true +metrics-server=true +nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b4" \ No newline at end of file diff --git a/config/test_files/nwaku_3.toml b/config/test_files/nwaku_3.toml new file mode 100644 index 0000000..36373c7 --- /dev/null +++ b/config/test_files/nwaku_3.toml @@ -0,0 +1,5 @@ +topics="test" +rpc-admin=true +keep-alive=true +metrics-server=true +nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b5" \ No newline at end of file diff --git a/config/test_files/test_network_data.json b/config/test_files/test_network_data.json index bc87274..b13eeee 100644 --- a/config/test_files/test_network_data.json +++ b/config/test_files/test_network_data.json @@ -1,18 +1,58 @@ { - "nwaku_0": { - "static_nodes": [ - "nwaku_1" + "containers": { + "container_0": [ + "nwaku_0", + "nwaku_2" ], - "subnetwork": "subnetwork_0", - "image": "nim-waku", - "configuration": "nwaku_0.toml" + "container_1": [ + "nwaku_1", + "nwaku_3" + ] }, - "nwaku_1": { - "static_nodes": [ - "nwaku_0" - ], - "subnetwork": "subnetwork_0", - "image": "nim-waku", - "configuration": "nwaku_1.toml" + "nodes": { + "nwaku_0": { + "static_nodes": [ + "nwaku_1" + ], + "subnetwork": "subnetwork_0", + "image": "nim-waku", + "node_config": "nwaku_0.toml", + "node_log": "node_0.log", + "port_shift": 0, + "container_id": "container_0" + }, + "nwaku_1": { + "static_nodes": [ + "nwaku_0" + ], + "subnetwork": "subnetwork_0", + "image": "nim-waku", + "node_config": "nwaku_1.toml", + "node_log": "node_1.log", + "port_shift": 0, + "container_id": "container_1" + }, + "nwaku_2": { + "static_nodes": [ + "nwaku_3" + ], + "subnetwork": "subnetwork_0", + "image": "nim-waku", + "node_config": "nwaku_2.toml", + "node_log": "node_1.log", + "port_shift": 1, + "container_id": "container_0" + }, + "nwaku_3": { + "static_nodes": [ + "nwaku_2" + ], + "subnetwork": "subnetwork_0", + "image": "nim-waku", + "node_config": "nwaku_3.toml", + "node_log": "node_3.log", + "port_shift": 1, + "container_id": "container_1" + } } } \ No newline at end of file diff --git a/config/test_files/test_network_data_2.json b/config/test_files/test_network_data_2.json index b6dbba0..8f01a21 100644 --- a/config/test_files/test_network_data_2.json +++ b/config/test_files/test_network_data_2.json @@ -1,18 +1,34 @@ { - "nwaku_0_2": { - "static_nodes": [ - "nwaku_1_2" - ], - "subnetwork": "subnetwork_0", - "image": "nim-waku", - "configuration": "nwaku_0_2.toml" - }, - "nwaku_1_2": { - "static_nodes": [ + "containers": { + "cid1": [ "nwaku_0_2" ], - "subnetwork": "subnetwork_0", - "image": "nim-waku", - "configuration": "nwaku_1_2.toml" + "cid2": [ + "nwaku_1_2" + ] + }, + "nodes": { + "nwaku_0_2": { + "static_nodes": [ + "nwaku_1_2" + ], + "subnetwork": "subnetwork_0", + "image": "nim-waku", + "node_config": "nwaku_0_2.toml", + "node_log": "node_0_2.log", + "port_shift": 0, + "container_id": "cid1" + }, + "nwaku_1_2": { + "static_nodes": [ + "nwaku_0_2" + ], + "subnetwork": "subnetwork_0", + "image": "nim-waku", + "node_config": "nwaku_1_2.toml", + "node_log": "node_1_2.log", + "port_shift": 0, + "container_id": "cid2" + } } } \ No newline at end of file From 21202c01440fc133c3347136ed6a9f75a2b77f2e Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:23:56 +0100 Subject: [PATCH 090/112] Moved and changed test_node_builders.star --- .../tests/test_node_builders.star | 33 +++++++ src/tests/test_node_builders.star | 92 ------------------- 2 files changed, 33 insertions(+), 92 deletions(-) create mode 100644 src/node_builders/tests/test_node_builders.star delete mode 100644 src/tests/test_node_builders.star diff --git a/src/node_builders/tests/test_node_builders.star b/src/node_builders/tests/test_node_builders.star new file mode 100644 index 0000000..e1bef31 --- /dev/null +++ b/src/node_builders/tests/test_node_builders.star @@ -0,0 +1,33 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +node_builders = import_module(vars.NODE_BUILDERS_MODULE) +waku = import_module(vars.WAKU_MODULE) + + +def test_instantiate_services(plan): + topology = read_file(src=vars.TEST_FILES_LOCATION + + vars.DEFAULT_TOPOLOGY_FILE_DEFAULT_ARGUMENT_VALUE) + topology = json.decode(topology) + + node_builders.instantiate_services(plan, topology, True) + + for node_info in topology["nodes"].values(): + plan.assert(value="peer_id", assertion="IN", target_value=node_info.keys()) + plan.assert (value="ip_address", assertion="IN", target_value=node_info.keys()) + plan.assert (value="ports", assertion="IN", target_value=node_info.keys()) + + node_builders.interconnect_nodes(plan, topology, 1) + _test_node_neighbours(plan, topology) + + for node_id in topology["containers"].keys(): + plan.remove_service(node_id) + + +def _test_node_neighbours(plan, topology): + for node_name, node_info in topology["nodes"].items(): + peers = waku.get_waku_peers(plan, node_info["container_id"], node_name) + plan.assert(value=peers, assertion="==", target_value=1) + + diff --git a/src/tests/test_node_builders.star b/src/tests/test_node_builders.star deleted file mode 100644 index be3ec5a..0000000 --- a/src/tests/test_node_builders.star +++ /dev/null @@ -1,92 +0,0 @@ -# System Imports -vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") - -# Module Imports -node_builders = import_module(vars.NODE_BUILDERS_MODULE) -waku = import_module(vars.WAKU_MODULE) - - -def test_prepare_nwaku_service(plan): - test_dict = {} - node_builders.prepare_nwaku_service("test", test_dict, "test.toml", "id_1") - - # hasattr doesn't work in dicts? - plan.assert(value=str(test_dict.get("test")), - assertion="!=", target_value="None") - plan.assert(value=test_dict["test"].image, - assertion="==", target_value=vars.NWAKU_IMAGE) - plan.assert(value=str(test_dict["test"].ports[vars.WAKU_RPC_PORT_ID].number), - assertion="==", target_value=str(vars.WAKU_RPC_PORT_NUMBER)) - plan.assert(value=str(test_dict["test"].ports[vars.PROMETHEUS_PORT_ID].number), - assertion="==", target_value=str(vars.PROMETHEUS_PORT_NUMBER)) - plan.assert(value=str(test_dict["test"].ports[vars.WAKU_LIBP2P_PORT_ID].number), - assertion="==", target_value=str(vars.WAKU_LIBP2P_PORT)) - plan.assert(value=test_dict["test"].files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION], - assertion="==", target_value="id_1") - # Only way to assert lists? - for i in range(len(test_dict["test"].entrypoint)): - plan.assert(value=test_dict["test"].entrypoint[i], - assertion="==", target_value=vars.NWAKU_ENTRYPOINT[i]) - plan.assert(value=test_dict["test"].cmd[0], - assertion="==", target_value=vars.NODE_CONFIGURATION_FILE_FLAG + - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + - "test.toml") - - - -def test_prepare_gowaku_service(plan): - test_dict = {} - node_builders.prepare_gowaku_service("test", test_dict, "test.toml", "id_2") - - # hasattr doesn't work in dicts? - plan.assert(value=str(test_dict.get("test")), - assertion="!=", target_value="None") - plan.assert(value=test_dict["test"].image, - assertion="==", target_value=vars.GOWAKU_IMAGE) - plan.assert(value=str(test_dict["test"].ports[vars.WAKU_RPC_PORT_ID].number), - assertion="==", target_value=str(vars.WAKU_RPC_PORT_NUMBER)) - plan.assert(value=str(test_dict["test"].ports[vars.PROMETHEUS_PORT_ID].number), - assertion="==", target_value=str(vars.PROMETHEUS_PORT_NUMBER)) - plan.assert(value=str(test_dict["test"].ports[vars.WAKU_LIBP2P_PORT_ID].number), - assertion="==", target_value=str(vars.WAKU_LIBP2P_PORT)) - plan.assert(value=test_dict["test"].files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION], - assertion="==", target_value="id_2") - # Only way to assert lists? - for i in range(len(test_dict["test"].entrypoint)): - plan.assert(value=test_dict["test"].entrypoint[i], - assertion="==", target_value=vars.GOWAKU_ENTRYPOINT[i]) - plan.assert(value=test_dict["test"].cmd[0], - assertion="==", target_value=vars.NODE_CONFIGURATION_FILE_FLAG + - vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + "test.toml") - - -def test_instantiate_services(plan): - topology = read_file(src=vars.TEST_FILES_LOCATION + - vars.DEFAULT_TOPOLOGY_FILE_DEFAULT_ARGUMENT_VALUE) - topology = json.decode(topology) - - node_test_services = node_builders.instantiate_services(plan, topology, True) - - waku.interconnect_waku_nodes(plan, topology, node_test_services) - _test_node_neighbours(plan, node_test_services) - _test__add_waku_service_information(plan, node_test_services) - - for node_id in node_test_services.keys(): - plan.remove_service(node_id) - - -def _test_node_neighbours(plan, topology_information): - for node_name in topology_information.keys(): - peers = waku.get_waku_peers(plan, node_name) - plan.assert(value=peers, assertion="==", target_value=1) - - -def _test__add_waku_service_information(plan, node_test_services): - # Already done in instantiate_services, so here just checking data is correct - - plan.assert(value=str(len(node_test_services)), assertion="==", target_value="2") - plan.assert(value=str(node_test_services.get("nwaku_0")), - assertion="!=", target_value="None") - plan.assert(value=str(node_test_services.get("nwaku_1")), - assertion="!=", target_value="None") - From 96979724709cb468de6c0ea417b7390f055ae65b Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:24:23 +0100 Subject: [PATCH 091/112] Fixed tests for test_waku_methods.star --- src/tests/test_waku_methods.star | 74 +++++++++++--------------------- 1 file changed, 26 insertions(+), 48 deletions(-) diff --git a/src/tests/test_waku_methods.star b/src/tests/test_waku_methods.star index e4f6138..1de61b7 100644 --- a/src/tests/test_waku_methods.star +++ b/src/tests/test_waku_methods.star @@ -4,6 +4,7 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports waku = import_module(vars.WAKU_MODULE) node_builders = import_module(vars.NODE_BUILDERS_MODULE) +call_protocols = import_module(vars.CALL_PROTOCOLS) # We have to encapsulate all tests into one function, so we can use the same service for all tests, @@ -16,50 +17,49 @@ def test_waku_methods(plan): topology = read_file(src=topology_for_test_file) topology = json.decode(topology) - services_info = node_builders.instantiate_services(plan, topology, True) + node_builders.instantiate_services(plan, topology, True) expected_ids = { "nwaku_0_2": "16Uiu2HAm7ZPmRY3ECVz7fAJQdxEDrBw3ToneYgUryKDJPtz25R2n", "nwaku_1_2": "16Uiu2HAmV7KPdL24S9Lztu6orfWuHypA9F6NUR4GkBDvWg8U4B5Z" } - for test_node in services_info.keys(): - test_send_json_rpc(plan, test_node) - test_get_wakunode_peer_id(plan, test_node, expected_ids) - test_connect_wakunode_to_peers(plan, test_node) - test_post_waku_v2_relay_v1_message(plan, test_node) + for test_node, test_node_info in topology["nodes"].items(): + test_send_json_rpc(plan, test_node, test_node_info) + test_get_wakunode_peer_id(plan, test_node, test_node_info, expected_ids) - test_create_waku_id(plan) + test_create_node_multiaddress(plan) test__merge_peer_ids(plan) - test_get_waku_peers(plan, topology) - test_interconnect_waku_nodes(plan, topology, services_info) - test_get_waku_peers_after(plan, topology) + test_get_waku_peers(plan, topology, 0) + waku.interconnect_waku_nodes(plan, topology, 1) + test_get_waku_peers(plan, topology, 1) - for service_name in services_info.keys(): + for service_name in topology["containers"].keys(): plan.remove_service(service_name) -def test_send_json_rpc(plan, service_name): +def test_send_json_rpc(plan, test_node, test_node_info): waku_message = '{"payload": "0x1a2b3c4d5e6f", "timestamp": 1626813243}' params = "test, " + waku_message + service_id = test_node_info[vars.GENNET_NODE_CONTAINER_KEY] # Automatically waits for 200 - waku.send_json_rpc(plan, service_name, vars.RPC_PORT_ID, + call_protocols.send_json_rpc(plan, service_id, vars.RPC_PORT_ID+"_"+test_node, vars.POST_RELAY_MESSAGE_METHOD, params) -def test_get_wakunode_peer_id(plan, service_name, expected_ids): - peer_id = waku.get_wakunode_peer_id(plan, service_name, vars.RPC_PORT_ID) +def test_get_wakunode_peer_id(plan, test_node, test_node_info, expected_ids): + service_id = test_node_info[vars.GENNET_NODE_CONTAINER_KEY] + peer_id = waku.get_wakunode_peer_id(plan, service_id, vars.RPC_PORT_ID+"_"+test_node) - plan.assert(value=peer_id, assertion="==", - target_value=expected_ids[service_name]) + plan.assert(value=peer_id, assertion="==", target_value=expected_ids[test_node]) -def test_create_waku_id(plan): - service_struct = struct(ip_address="1.1.1.1", - ports={vars.WAKU_LIBP2P_PORT_ID: PortSpec(number=1234)}) - services_example = {"service_info": service_struct, "peer_id": "ASDFGHJKL"} +def test_create_node_multiaddress(plan): + node_id = "test" + node_information = {"ip_address": "1.1.1.1", "ports": {"libp2p_test": (1234, 'tcp')}, + "peer_id": "ASDFGHJKL"} - waku_id = waku.create_node_multiaddress(services_example) + waku_id = waku.create_node_multiaddress(node_id, node_information) plan.assert(value=waku_id, assertion="==", target_value='"/ip4/1.1.1.1/tcp/1234/p2p/ASDFGHJKL"') @@ -74,30 +74,8 @@ def test__merge_peer_ids(plan): target_value="[/ip4/1.1.1.1/tcp/1234/p2p/ASDFGHJKL,/ip4/2.2.2.2/tcp/1234/p2p/QWERTYUIOP]") -def test_connect_wakunode_to_peers(plan, service_name): - # It will print an error but 200 code - waku.connect_wakunode_to_peers(plan, service_name, vars.RPC_PORT_ID, ["asd"]) +def test_get_waku_peers(plan, test_topology, expected): + for test_node, test_node_info in test_topology["nodes"].items(): + num_peers = waku.get_waku_peers(plan, test_node_info["container_id"], test_node) -def test_post_waku_v2_relay_v1_message(plan, service_name): - waku.post_waku_v2_relay_v1_message_test(plan, service_name, "test") - - -def test_get_waku_peers(plan, test_topology): - for test_node in test_topology.keys(): - num_peers = waku.get_waku_peers(plan, test_node) - - plan.assert(value=num_peers, assertion="==", target_value=0) - -def test_get_waku_peers_after(plan, test_topology): - for test_node in test_topology.keys(): - num_peers = waku.get_waku_peers(plan, test_node) - - plan.assert(value=num_peers, assertion="==", target_value=1) - -def test_interconnect_waku_nodes(plan, test_topology, node_test_services): - - waku.interconnect_waku_nodes(plan, test_topology, node_test_services) - - for service_name in node_test_services: - neighbours = waku.get_waku_peers(plan, service_name) - plan.assert(value=neighbours, assertion="==", target_value=1) + plan.assert(value=num_peers, assertion="==", target_value=expected) From 1932de5a145c20a3b76ae6db0df3debd5144562c Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:24:49 +0100 Subject: [PATCH 092/112] Added tests for gowaku_builder.star --- .../types/tests/test_gowaku_builder.star | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 src/node_builders/types/tests/test_gowaku_builder.star diff --git a/src/node_builders/types/tests/test_gowaku_builder.star b/src/node_builders/types/tests/test_gowaku_builder.star new file mode 100644 index 0000000..264c331 --- /dev/null +++ b/src/node_builders/types/tests/test_gowaku_builder.star @@ -0,0 +1,58 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +gowaku_builder = import_module(vars.GOWAKU_BUILDER_MODULE) + + +def test_prepare_gowaku_service(plan): + test_dict = {} + topology = {"nodes": {"test1": {vars.GENNET_PORT_SHIFT_KEY: 0}, + "test2": {vars.GENNET_PORT_SHIFT_KEY: 1}}} + + gowaku_builder.prepare_gowaku_service(["test1", "test2"], test_dict, + ["test1.toml", "test2.toml"], + ["a1", "a2"], + "id_1", topology) + + # hasattr doesn't work in dicts? + plan.assert(value=str(test_dict.get("id_1")), + assertion="!=", target_value="None") + + plan.assert(value=test_dict["id_1"].image, + assertion="==", target_value=vars.GOWAKU_IMAGE) + + for node in ["test1", "test2"]: + plan.assert(value=str(test_dict["id_1"].ports[vars.RPC_PORT_ID+"_"+node].number), + assertion="==", target_value = str(vars.WAKU_RPC_PORT_NUMBER + + topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY])) + plan.assert(value=str(test_dict["id_1"].ports[vars.PROMETHEUS_PORT_ID+"_"+node].number), + assertion="==", target_value=str(vars.PROMETHEUS_PORT_NUMBER + + topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY])) + plan.assert(value=str(test_dict["id_1"].ports[vars.WAKU_LIBP2P_PORT_ID+"_"+node].number), + assertion="==", target_value=str(vars.WAKU_LIBP2P_PORT + + topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY])) + + for node, file in zip(["test1", "test2"], ["a1", "a2"]): + plan.assert(value=test_dict["id_1"].files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+node], + assertion="==", target_value=file) + + for i in range(len(test_dict["id_1"].entrypoint)): + plan.assert(value=test_dict["id_1"].entrypoint[i], assertion="==", + target_value=vars.GENERAL_ENTRYPOINT[i]) + + +def test__prepare_gowaku_cmd_in_service(plan): + + topology = {"nodes": {"a": {"port_shift": 0}, "b": {"port_shift": 1}}} + result = gowaku_builder._prepare_gowaku_cmd_in_service(["a", "b"], ["c", "d"], topology) + + plan.assert(value=result[0], + assertion="==", + target_value=vars.GOWAKU_ENTRYPOINT+" "+vars.WAKUNODE_CONFIGURATION_FILE_FLAG+ + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+"a"+"/"+"c"+" "+ + vars.WAKUNODE_PORT_SHIFT_FLAG+"0"+" & "+ + vars.GOWAKU_ENTRYPOINT+" "+vars.WAKUNODE_CONFIGURATION_FILE_FLAG+ + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+"b"+"/"+"d"+" "+ + vars.WAKUNODE_PORT_SHIFT_FLAG+"1" + ) From a155da00fb4e9c61b81ca454dd222b9e23aaf4a8 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:24:55 +0100 Subject: [PATCH 093/112] Added tests for waku_builder.star --- .../types/tests/test_waku_builder.star | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 src/node_builders/types/tests/test_waku_builder.star diff --git a/src/node_builders/types/tests/test_waku_builder.star b/src/node_builders/types/tests/test_waku_builder.star new file mode 100644 index 0000000..00abdce --- /dev/null +++ b/src/node_builders/types/tests/test_waku_builder.star @@ -0,0 +1,51 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +waku_builders = import_module(vars.WAKU_BUILDER_MODULE) + + +def test_prepare_waku_ports_in_service(plan): + topology = {"nodes":{"test1": {vars.GENNET_PORT_SHIFT_KEY : 0}, + "test2": {vars.GENNET_PORT_SHIFT_KEY : 1}}} + ports = waku_builders.prepare_waku_ports_in_service(["test1", "test2"], topology) + + for node_name in ["test1", "test2"]: + plan.assert(value=str(ports[vars.RPC_PORT_ID+"_"+node_name].number), + assertion="==", target_value = str(vars.WAKU_RPC_PORT_NUMBER + + topology["nodes"][node_name][vars.GENNET_PORT_SHIFT_KEY])) + plan.assert(value=str(ports[vars.PROMETHEUS_PORT_ID+"_"+node_name].number), + assertion="==", target_value=str(vars.PROMETHEUS_PORT_NUMBER + + topology["nodes"][node_name][vars.GENNET_PORT_SHIFT_KEY])) + plan.assert(value=str(ports[vars.WAKU_LIBP2P_PORT_ID+"_"+node_name].number), + assertion="==", target_value=str(vars.WAKU_LIBP2P_PORT + + topology["nodes"][node_name][vars.GENNET_PORT_SHIFT_KEY])) + +def test_prepare_waku_config_files_in_service(plan): + names = ["test1", "test2"] + artifact_ids = ["a1", "a2"] + + files = waku_builders.prepare_waku_config_files_in_service(names, artifact_ids) + + for name, artif_id in zip(names, artifact_ids): + plan.assert(value=files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+name], + assertion="==", target_value=artif_id) + +def test_add_waku_ports_info_to_topology(plan): + network_topology = {"nodes": {"test1": {}, "test2": {}}} + service_struct_1 = struct(ports={vars.RPC_PORT_ID+"_test1": PortSpec(number=1), + vars.WAKU_LIBP2P_PORT_ID+"_test1": PortSpec(number=2), + vars.PROMETHEUS_PORT_ID+"_test1": PortSpec(number=3)}) + + node_info1 = {vars.GENNET_NODE_CONTAINER_KEY: "cid1"} + + services = {"cid1": service_struct_1} + + waku_builders.add_waku_ports_info_to_topology(network_topology, services, node_info1, "test1") + + plan.assert(value=str(network_topology["nodes"]["test1"]["ports"][vars.RPC_PORT_ID+"_test1"][0]), + assertion="==", target_value=str(1)) + plan.assert(value=str(network_topology["nodes"]["test1"]["ports"][vars.WAKU_LIBP2P_PORT_ID+"_test1"][0]), + assertion="==", target_value=str(2)) + plan.assert(value=str(network_topology["nodes"]["test1"]["ports"][vars.PROMETHEUS_PORT_ID+"_test1"][0]), + assertion="==", target_value=str(3)) From de6c75cec7279f756677cc1443c45a8446c8d280 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:24:59 +0100 Subject: [PATCH 094/112] Added tests for nwaku_builder.star --- .../types/tests/test_nwaku_builder.star | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 src/node_builders/types/tests/test_nwaku_builder.star diff --git a/src/node_builders/types/tests/test_nwaku_builder.star b/src/node_builders/types/tests/test_nwaku_builder.star new file mode 100644 index 0000000..7083db7 --- /dev/null +++ b/src/node_builders/types/tests/test_nwaku_builder.star @@ -0,0 +1,57 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Module Imports +nwaku_builder = import_module(vars.NWAKU_BUILDER_MODULE) + +def test_prepare_nwaku_service(plan): + test_dict = {} + topology = {"nodes": {"test1": {vars.GENNET_PORT_SHIFT_KEY: 0}, + "test2": {vars.GENNET_PORT_SHIFT_KEY: 1}}} + + nwaku_builder.prepare_nwaku_service(["test1", "test2"], test_dict, + ["test1.toml", "test2.toml"], + ["a1", "a2"], + "id_1", topology) + + # hasattr doesn't work in dicts? + plan.assert(value=str(test_dict.get("id_1")), + assertion="!=", target_value="None") + + plan.assert(value=test_dict["id_1"].image, + assertion="==", target_value=vars.NWAKU_IMAGE) + + for node in ["test1", "test2"]: + plan.assert(value=str(test_dict["id_1"].ports[vars.RPC_PORT_ID+"_"+node].number), + assertion="==", target_value = str(vars.WAKU_RPC_PORT_NUMBER + + topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY])) + plan.assert(value=str(test_dict["id_1"].ports[vars.PROMETHEUS_PORT_ID+"_"+node].number), + assertion="==", target_value=str(vars.PROMETHEUS_PORT_NUMBER + + topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY])) + plan.assert(value=str(test_dict["id_1"].ports[vars.WAKU_LIBP2P_PORT_ID+"_"+node].number), + assertion="==", target_value=str(vars.WAKU_LIBP2P_PORT + + topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY])) + + for node, file in zip(["test1", "test2"], ["a1", "a2"]): + plan.assert(value=test_dict["id_1"].files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+node], + assertion="==", target_value=file) + + for i in range(len(test_dict["id_1"].entrypoint)): + plan.assert(value=test_dict["id_1"].entrypoint[i], assertion="==", + target_value=vars.GENERAL_ENTRYPOINT[i]) + + +def test__prepare_nwaku_cmd_in_service(plan): + + topology = {"nodes": {"a": {"port_shift": 0}, "b": {"port_shift": 1}}} + result = nwaku_builder._prepare_nwaku_cmd_in_service(["a", "b"], ["c", "d"], topology) + + plan.assert(value=result[0], + assertion="==", + target_value=vars.NWAKU_ENTRYPOINT+" "+vars.WAKUNODE_CONFIGURATION_FILE_FLAG+ + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+"a"+"/"+"c"+" "+ + vars.WAKUNODE_PORT_SHIFT_FLAG+"0"+" & "+ + vars.NWAKU_ENTRYPOINT+" "+vars.WAKUNODE_CONFIGURATION_FILE_FLAG+ + vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+"b"+"/"+"d"+" "+ + vars.WAKUNODE_PORT_SHIFT_FLAG+"1" + ) From da8cfe837764b98bdfcb145578b4cfc9052db9be Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Wed, 8 Mar 2023 18:25:21 +0100 Subject: [PATCH 095/112] Added new tests to main test file and one extra data file for tests --- config/test_files/nwaku_0.toml | 2 +- tests.star | 27 +++++++++++++++++++++++---- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/config/test_files/nwaku_0.toml b/config/test_files/nwaku_0.toml index 6dd3341..b5208a8 100644 --- a/config/test_files/nwaku_0.toml +++ b/config/test_files/nwaku_0.toml @@ -1,4 +1,4 @@ -topics="test" +topics="test asd" rpc-admin=true keep-alive=true metrics-server=true diff --git a/tests.star b/tests.star index aa9a99c..d391815 100644 --- a/tests.star +++ b/tests.star @@ -4,8 +4,12 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") # Module Imports args_parser_test = import_module(vars.TEST_ARGUMENTS_MODULE) file_helpers_test = import_module(vars.TEST_FILES_MODULE) -node_builders_test = import_module(vars.TEST_NODE_BUILDERS_MODULE) waku_test = import_module(vars.TEST_WAKU_MODULE) +node_builders_test = import_module(vars.TEST_NODE_BUILDERS_MODULE) +waku_builder_test = import_module(vars.TEST_WAKU_BUILDER_MODULE) +gowaku_builder_test = import_module(vars.TEST_GOWAKU_BUILDER_MODULE) +nwaku_builder_test = import_module(vars.TEST_NWAKU_BUILDER_MODULE) + def run(plan, args): @@ -19,8 +23,23 @@ def run(plan, args): file_helpers_test.test_generate_template_prometheus_url(plan) file_helpers_test.test_prepare_artifact_files_grafana(plan) - node_builders_test.test_prepare_nwaku_service(plan) - node_builders_test.test_prepare_gowaku_service(plan) + waku_test.test_waku_methods(plan) + node_builders_test.test_instantiate_services(plan) - waku_test.test_waku_methods(plan) \ No newline at end of file + waku_builder_test.test_prepare_waku_ports_in_service(plan) + + waku_builder_test.test_prepare_waku_config_files_in_service(plan) + waku_builder_test.test_add_waku_ports_info_to_topology(plan) + + + gowaku_builder_test.test_prepare_gowaku_service(plan) + gowaku_builder_test.test__prepare_gowaku_cmd_in_service(plan) + + nwaku_builder_test.test_prepare_nwaku_service(plan) + nwaku_builder_test.test__prepare_nwaku_cmd_in_service(plan) + + + + + From f4fb0bdd45bc5a60d211138b20a5997393f3834f Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:43:31 +0100 Subject: [PATCH 096/112] Removed unnecessary catches --- wls-module/src/utils/files.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/wls-module/src/utils/files.py b/wls-module/src/utils/files.py index 1ef6dce..f77dbe3 100644 --- a/wls-module/src/utils/files.py +++ b/wls-module/src/utils/files.py @@ -1,6 +1,5 @@ # Python Imports import json -import sys # Project Imports from src.utils import wls_logger @@ -8,24 +7,16 @@ def load_config_file(config_file): """ Load config file """ - try: - with open(config_file, 'r') as f: - config = json.load(f) - except Exception as e: - wls_logger.G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() + with open(config_file, 'r') as f: + config = json.load(f) return config def load_topology(topology_file): """ Load topology """ - try: - with open(topology_file, 'r') as read_file: - topology = json.load(read_file) - except Exception as e: - wls_logger.G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() + with open(topology_file, 'r') as read_file: + topology = json.load(read_file) wls_logger.G_LOGGER.debug(topology) wls_logger.G_LOGGER.info('Topology loaded') From 01551df10479ace2f4a5ccb9e6b09ee090c17ef9 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:44:36 +0100 Subject: [PATCH 097/112] Fixed CMD starlark command to wls --- src/wls.star | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/wls.star b/src/wls.star index 1bc9b79..e93ece6 100644 --- a/src/wls.star +++ b/src/wls.star @@ -34,14 +34,15 @@ def create_new_topology_information(plan, network_topology): def create_cmd(config_file): + cmd = [] config_file_name = config_file.split("/")[-1] - config_file = vars.WLS_CONFIG_FILE_FLAG + " " + \ - vars.WLS_CONFIG_PATH + config_file_name - topology_file = vars.WLS_TOPOLOGY_FILE_FLAG + " " + \ - vars.WLS_TOPOLOGY_PATH + vars.CONTAINER_TOPOLOGY_FILE_NAME_WLS + cmd.append(vars.WLS_CONFIG_FILE_FLAG) + cmd.append(vars.WLS_CONFIG_PATH + config_file_name) + cmd.append(vars.WLS_TOPOLOGY_FILE_FLAG) + cmd.append(vars.WLS_TOPOLOGY_PATH + vars.CONTAINER_TOPOLOGY_FILE_NAME_WLS) - return config_file + " " + topology_file + return cmd def init(plan, network_topology, config_file): From 1ae60e839511eea0f8ac9b2f1b7b8c2af392aa33 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:44:56 +0100 Subject: [PATCH 098/112] Removed unnecessary init py in wls-module --- wls-module/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 wls-module/__init__.py diff --git a/wls-module/__init__.py b/wls-module/__init__.py deleted file mode 100644 index e69de29..0000000 From fcd814d8e8762ccf6c2e3e0dbcc878db57427412 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:45:17 +0100 Subject: [PATCH 099/112] Fixed pythonpath in wls dockerfile --- wls-module/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/wls-module/Dockerfile b/wls-module/Dockerfile index 4a03a60..12f8353 100644 --- a/wls-module/Dockerfile +++ b/wls-module/Dockerfile @@ -25,5 +25,7 @@ COPY . . # Deploy the virtualenv in production image ENV PATH="/opt/venv/bin:$PATH" +ENV PYTHONPATH "${PYTHONPATH}:/wls/src/" + # Set the entrypoint -ENTRYPOINT ["python", "wls.py"] \ No newline at end of file +ENTRYPOINT ["python", "src/wls.py"] \ No newline at end of file From e9237955528b7574c1c621e983f16b811120c57b Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:45:35 +0100 Subject: [PATCH 100/112] Fixed paths for wls in system_variables.star --- src/system_variables.star | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/system_variables.star b/src/system_variables.star index 46a50cc..6d8fcb0 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -92,12 +92,11 @@ PORTS_KEY = "ports" WLS_IMAGE = "wls:0.0.1" WLS_SERVICE_NAME = "wls" WLS_CONFIG_PATH = "/wls/config/" -WLS_TARGETS_PATH = "/wls/targets" -WLS_TOMLS_PATH = "/wls/tomls" -WLS_TOPOLOGY_PATH = "/wls/network_topology" -WLS_CONFIG_FILE_FLAG = "--config-file" -WLS_TOPOLOGY_FILE_FLAG = "--topology-file" - +WLS_TARGETS_PATH = "/wls/targets/" +WLS_TOMLS_PATH = "/wls/tomls/" +WLS_TOPOLOGY_PATH = "/wls/network_topology/" +WLS_CONFIG_FILE_FLAG = "--config_file" +WLS_TOPOLOGY_FILE_FLAG = "--topology_file" CONTAINER_WLS_CONFIGURATION_FILE_NAME = "config.json" CONTAINER_TOPOLOGY_FILE_NAME_WLS = "network_data.json" From ca738873617152df30fe2d5fe62a8913e22f553a Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:46:11 +0100 Subject: [PATCH 101/112] Fixed imports in wls tests --- wls-module/src/tests/test_wls.py | 12 ++++++++---- wls-module/src/utils/tests/test_files.py | 10 ++++++---- wls-module/src/utils/tests/test_payloads.py | 10 +++++++--- wls-module/src/utils/wls_logger.py | 1 + 4 files changed, 22 insertions(+), 11 deletions(-) diff --git a/wls-module/src/tests/test_wls.py b/wls-module/src/tests/test_wls.py index 093b142..a6e0b19 100644 --- a/wls-module/src/tests/test_wls.py +++ b/wls-module/src/tests/test_wls.py @@ -1,14 +1,18 @@ +# Python Imports import unittest import random from unittest.mock import mock_open, patch +# Project Imports from src import wls -random.seed(0) - class TestWLS(unittest.TestCase): + @classmethod + def setUpClass(cls): + random.seed(1) + def create_patch(self, name): patcher = patch(name) thing = patcher.start() @@ -178,8 +182,8 @@ def test__select_emitter_and_topic(self): emitter_address, topic = wls._select_emitter_with_topic(emitters) - self.assertEqual(emitter_address, "http://5:6/") - self.assertEqual(topic, "test2b") + self.assertEqual(emitter_address, "http://1:2/") + self.assertEqual(topic, "test1a") def test__inject_message(self): mock_dist = self.create_patch('src.utils.payloads.make_payload_dist') diff --git a/wls-module/src/utils/tests/test_files.py b/wls-module/src/utils/tests/test_files.py index 5073231..17ef394 100644 --- a/wls-module/src/utils/tests/test_files.py +++ b/wls-module/src/utils/tests/test_files.py @@ -1,29 +1,31 @@ +# Python Imports import json import unittest import os +# Project Imports from src.utils import files class TestFiles(unittest.TestCase): def test_load_config_file(self): - config = files.load_config_file("test_files/test_config.json") + config = files.load_config_file("src/utils/tests/test_files/test_config.json") self.assertEqual(config["general"]["prng_seed"], 1234) self.assertEqual(config["kurtosis"]["enclave_name"], "test") def test_config_file_error(self): with self.assertRaises(FileNotFoundError): - files.load_config_file("test_files/test_config_error.json") + files.load_config_file("src/utils/tests/test_files/test_config_error.json") def test_load_topology(self): - test_topology = files.load_topology("test_files/test_topology.json") + test_topology = files.load_topology("src/utils/tests/test_files/test_topology.json") self.assertEqual(test_topology["containers"]["containers_0"][0], "node_0") self.assertEqual(test_topology["nodes"]["node_0"]["image"], "nim-waku") def test_load_topology_error(self): with self.assertRaises(FileNotFoundError): - files.load_topology("test_files/test_topology_error.json") + files.load_topology("src/utils/tests/test_files/test_topology_error.json") def test_save_messages_to_json(self): msgs_dict = {"test": "test"} diff --git a/wls-module/src/utils/tests/test_payloads.py b/wls-module/src/utils/tests/test_payloads.py index c9a0d2e..5565527 100644 --- a/wls-module/src/utils/tests/test_payloads.py +++ b/wls-module/src/utils/tests/test_payloads.py @@ -1,14 +1,18 @@ +# Python Imports import unittest -import random from unittest.mock import patch +import random +# Project Imports from src.utils import payloads -random.seed(1) - class TestPayloads(unittest.TestCase): + @classmethod + def setUpClass(cls): + random.seed(1) + def create_patch(self, name): patcher = patch(name) thing = patcher.start() diff --git a/wls-module/src/utils/wls_logger.py b/wls-module/src/utils/wls_logger.py index 1d19b9d..20a4e2f 100644 --- a/wls-module/src/utils/wls_logger.py +++ b/wls-module/src/utils/wls_logger.py @@ -1,3 +1,4 @@ +# Python Imports import sys import logging From fe268b84b3b50a8b32e6eff03236f7e508cbe557 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:46:27 +0100 Subject: [PATCH 102/112] Fixed imports in wls main --- wls-module/src/wls.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wls-module/src/wls.py b/wls-module/src/wls.py index 7c46e59..1c0a299 100644 --- a/wls-module/src/wls.py +++ b/wls-module/src/wls.py @@ -7,10 +7,10 @@ import tomllib # Project Imports -from .utils import wls_logger -from .utils import waku_messaging -from .utils import payloads -from .utils import files +from src.utils import wls_logger +from src.utils import waku_messaging +from src.utils import payloads +from src.utils import files """ Globals """ G_DEFAULT_CONFIG_FILE = 'config.json' From 23c612153daaaab59e50d480b90d79b45eda6cf9 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 9 Mar 2023 16:46:47 +0100 Subject: [PATCH 103/112] Added test for uniform mode in wls and fixed bug --- .../src/utils/tests/test_waku_messaging.py | 16 ++++++++++++---- wls-module/src/utils/waku_messaging.py | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/wls-module/src/utils/tests/test_waku_messaging.py b/wls-module/src/utils/tests/test_waku_messaging.py index dab36d3..eebc4fd 100644 --- a/wls-module/src/utils/tests/test_waku_messaging.py +++ b/wls-module/src/utils/tests/test_waku_messaging.py @@ -1,15 +1,19 @@ +# Python Imports import unittest -import random import json +import random from unittest.mock import patch +# Project Imports from src.utils import waku_messaging -random.seed(1) - class TestPayloads(unittest.TestCase): + @classmethod + def setUpClass(cls): + random.seed(1) + def create_patch(self, name): patcher = patch(name) thing = patcher.start() @@ -20,7 +24,7 @@ def test__poisson_interval(self): test_1 = waku_messaging._poisson_interval(1) test_5 = waku_messaging._poisson_interval(5) test_10 = waku_messaging._poisson_interval(10) - self.assertEqual(test_1, 0.1442910641095092) + self.assertEqual(test_1, 0.1442910641095092) self.assertEqual(test_5, 0.3760312530841251) self.assertEqual(test_10, 0.1442968925346663) @@ -75,6 +79,10 @@ def test_get_next_time_to_msg_uniform(self): test = waku_messaging.get_next_time_to_msg('uniform', 1, 1) self.assertEqual(test, 1) + def test_get_next_time_to_msg_uniform_2(self): + test = waku_messaging.get_next_time_to_msg('uniform', 2, 10) + self.assertEqual(test, 0.5) + def test_get_next_time_to_msg_invalid(self): with self.assertRaises(SystemExit) as cm: waku_messaging.get_next_time_to_msg('test', 1, 1) diff --git a/wls-module/src/utils/waku_messaging.py b/wls-module/src/utils/waku_messaging.py index b6f314e..f52d4d5 100644 --- a/wls-module/src/utils/waku_messaging.py +++ b/wls-module/src/utils/waku_messaging.py @@ -77,7 +77,7 @@ def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): return _poisson_interval(msg_rate) if inter_msg_type == 'uniform': - return simulation_time / msg_rate + return simulation_time / (msg_rate * simulation_time) wls_logger.G_LOGGER.error(f'{inter_msg_type} is not a valid inter_msg_type. Aborting.') sys.exit(1) From 5c171eb1548e92d7f7cda87bc6bcc86a56db491e Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Fri, 10 Mar 2023 19:12:50 +0100 Subject: [PATCH 104/112] Added tests for wls.star --- src/system_variables.star | 5 +++++ src/tests/test_wls.star | 38 ++++++++++++++++++++++++++++++++++++++ src/wls.star | 15 ++++++++------- tests.star | 11 +++++++---- 4 files changed, 58 insertions(+), 11 deletions(-) diff --git a/src/system_variables.star b/src/system_variables.star index 6d8fcb0..a4f5384 100644 --- a/src/system_variables.star +++ b/src/system_variables.star @@ -97,6 +97,10 @@ WLS_TOMLS_PATH = "/wls/tomls/" WLS_TOPOLOGY_PATH = "/wls/network_topology/" WLS_CONFIG_FILE_FLAG = "--config_file" WLS_TOPOLOGY_FILE_FLAG = "--topology_file" +WLS_CONFIG_ARTIFACT_NAME = "config_file" +WLS_TOPOLOGY_ARTIFACT_NAME = "wls_topology" +WLS_TOMLS_ARTIFACT_NAME = "tomls_artifact" + CONTAINER_WLS_CONFIGURATION_FILE_NAME = "config.json" CONTAINER_TOPOLOGY_FILE_NAME_WLS = "network_data.json" @@ -128,6 +132,7 @@ NOMOS_MODULE = "github.com/logos-co/wakurtosis/src/nomos.star" TEST_ARGUMENTS_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_arguments_parser.star" TEST_FILES_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_file_helpers.star" TEST_WAKU_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_waku_methods.star" +TEST_WLS_MODULE = "github.com/logos-co/wakurtosis/src/tests/test_wls.star" TEST_NODE_BUILDERS_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/tests/test_node_builders.star" TEST_WAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/tests/test_waku_builder.star" TEST_GOWAKU_BUILDER_MODULE = "github.com/logos-co/wakurtosis/src/node_builders/types/tests/test_gowaku_builder.star" diff --git a/src/tests/test_wls.star b/src/tests/test_wls.star index e69de29..8ebc3f4 100644 --- a/src/tests/test_wls.star +++ b/src/tests/test_wls.star @@ -0,0 +1,38 @@ +# System Imports +vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") + +# Project Imports +wls = import_module(vars.WLS_MODULE) + + +def test_upload_config(plan): + test_config = vars.TEST_FILES_LOCATION + "test_config_file.json" + test = wls.upload_config(plan, test_config, "test_config") + + plan.assert(value=test, assertion="==", target_value="test_config") + + +def test_create_new_topology_information(plan): + test_topology = {} + test = wls.create_new_topology_information(plan, test_topology, "test_topology") + + plan.assert(value=test, assertion="==", target_value="test_topology") + +def test_create_cmd(plan): + config_file = "test.json" + test = wls.create_cmd(config_file) + result = [vars.WLS_CONFIG_FILE_FLAG, vars.WLS_CONFIG_PATH + config_file, + vars.WLS_TOPOLOGY_FILE_FLAG, vars.WLS_TOPOLOGY_PATH + vars.CONTAINER_TOPOLOGY_FILE_NAME_WLS] + + for i in range(len(result)): + plan.assert(value=test[i], assertion="==", target_value=result[i]) + +def test_init(plan): + test_config = vars.TEST_FILES_LOCATION + "test_config_file.json" + test = wls.upload_config(plan, test_config, "test_config_2") + + test_topology = {} + + test_wls_service = wls.init(plan, test_topology, test_config) + + plan.remove_service(vars.WLS_SERVICE_NAME) \ No newline at end of file diff --git a/src/wls.star b/src/wls.star index e93ece6..6cb58ae 100644 --- a/src/wls.star +++ b/src/wls.star @@ -5,15 +5,15 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") files = import_module(vars.FILE_HELPERS_MODULE) templates = import_module(vars.TEMPLATES_MODULE) -def upload_config(plan, config_file): +def upload_config(plan, config_file, artifact_name): config_artifact = plan.upload_files( src=config_file, - name="config_file" + name=artifact_name ) return config_artifact -def create_new_topology_information(plan, network_topology): +def create_new_topology_information(plan, network_topology, network_artifact_name): template = """ {{.information}} """ @@ -27,7 +27,7 @@ def create_new_topology_information(plan, network_topology): data=info, ) }, - name="wls_topology" + name=network_artifact_name ) return artifact_id @@ -47,15 +47,16 @@ def create_cmd(config_file): def init(plan, network_topology, config_file): # Generate simulation config - config_artifact = upload_config(plan, config_file) + config_artifact = upload_config(plan, config_file, vars.WLS_CONFIG_ARTIFACT_NAME) tomls_artifact = plan.upload_files( src = vars.NODE_CONFIG_FILE_LOCATION, - name = "tomls_artifact", + name = vars.WLS_TOMLS_ARTIFACT_NAME, ) # Get complete network topology information - wls_topology = create_new_topology_information(plan, network_topology) + wls_topology = create_new_topology_information(plan, network_topology, + vars.WLS_TOPOLOGY_ARTIFACT_NAME) wls_cmd = create_cmd(config_file) diff --git a/tests.star b/tests.star index d391815..ff433e7 100644 --- a/tests.star +++ b/tests.star @@ -5,6 +5,7 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star") args_parser_test = import_module(vars.TEST_ARGUMENTS_MODULE) file_helpers_test = import_module(vars.TEST_FILES_MODULE) waku_test = import_module(vars.TEST_WAKU_MODULE) +wls_test = import_module(vars.TEST_WLS_MODULE) node_builders_test = import_module(vars.TEST_NODE_BUILDERS_MODULE) waku_builder_test = import_module(vars.TEST_WAKU_BUILDER_MODULE) gowaku_builder_test = import_module(vars.TEST_GOWAKU_BUILDER_MODULE) @@ -12,7 +13,9 @@ nwaku_builder_test = import_module(vars.TEST_NWAKU_BUILDER_MODULE) + def run(plan, args): + args_parser_test.test_load_config_args_default(plan) args_parser_test.test_load_config_args_given(plan) @@ -39,7 +42,7 @@ def run(plan, args): nwaku_builder_test.test_prepare_nwaku_service(plan) nwaku_builder_test.test__prepare_nwaku_cmd_in_service(plan) - - - - + wls_test.test_upload_config(plan) + wls_test.test_create_new_topology_information(plan) + wls_test.test_create_cmd(plan) + wls_test.test_init(plan) From f63c3b49a006a02f016e1d06de3fa971244025c3 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Fri, 10 Mar 2023 19:12:58 +0100 Subject: [PATCH 105/112] Cleaning --- src/node_builders/tests/test_node_builders.star | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/node_builders/tests/test_node_builders.star b/src/node_builders/tests/test_node_builders.star index e1bef31..80fdbb3 100644 --- a/src/node_builders/tests/test_node_builders.star +++ b/src/node_builders/tests/test_node_builders.star @@ -15,8 +15,8 @@ def test_instantiate_services(plan): for node_info in topology["nodes"].values(): plan.assert(value="peer_id", assertion="IN", target_value=node_info.keys()) - plan.assert (value="ip_address", assertion="IN", target_value=node_info.keys()) - plan.assert (value="ports", assertion="IN", target_value=node_info.keys()) + plan.assert(value="ip_address", assertion="IN", target_value=node_info.keys()) + plan.assert(value="ports", assertion="IN", target_value=node_info.keys()) node_builders.interconnect_nodes(plan, topology, 1) _test_node_neighbours(plan, topology) From 5f45ae4a061dc39cb082e1dcc1c8c532d06ca2ce Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Rendo Date: Fri, 10 Mar 2023 19:24:20 +0100 Subject: [PATCH 106/112] Deleted old wls --- wls-module/wls.py | 339 ---------------------------------------------- 1 file changed, 339 deletions(-) delete mode 100644 wls-module/wls.py diff --git a/wls-module/wls.py b/wls-module/wls.py deleted file mode 100644 index e515504..0000000 --- a/wls-module/wls.py +++ /dev/null @@ -1,339 +0,0 @@ -""" Dependencies """ -import argparse -import hashlib -import json -import logging -import random -import sys -import time -import tomllib -import requests - -# Project Imports -import rtnorm - -""" Globals """ -G_APP_NAME = 'WLS' -G_LOG_LEVEL = 'DEBUG' -G_DEFAULT_CONFIG_FILE = './config/config.json' -G_DEFAULT_TOPOLOGY_FILE = './network_topology/network_data.json' -G_LOGGER = logging.getLogger(G_APP_NAME) -handler = logging.StreamHandler(sys.stdout) - - -# Custom logging formatter -class CustomFormatter(logging.Formatter): - # Set different formats for every logging level - time_name_stamp = "[%(asctime)s.%(msecs)03d] [" + G_APP_NAME + "]" - FORMATS = { - logging.ERROR: time_name_stamp + " ERROR in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", - logging.WARNING: time_name_stamp + " WARNING - %(msg)s", - logging.CRITICAL: time_name_stamp + " CRITICAL in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s", - logging.INFO: time_name_stamp + " %(msg)s", - logging.DEBUG: time_name_stamp + " %(funcName)s() %(msg)s", - 'DEFAULT': time_name_stamp + " %(msg)s", - } - - def format(self, record): - log_fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT']) - formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S') - return formatter.format(record) - - -def send_waku_msg(node_address, topic, payload, nonce=1): - # waku_msg = { - # 'nonce' : nonce, - # 'timestamp' : time.time_ns(), - # 'payload' : payload} - - my_payload = { - 'nonce' : nonce, - 'ts' : time.time_ns(), - 'payload' : payload - } - - waku_msg = { - 'payload' : json.dumps(my_payload).encode('utf-8').hex() - } - - data = { - 'jsonrpc': '2.0', - 'method': 'post_waku_v2_relay_v1_message', - 'id': 1, - 'params' : [topic, waku_msg]} - - G_LOGGER.debug('Waku RPC: %s from %s Topic: %s' %(data['method'], node_address, topic)) - - s_time = time.time() - - json_data = json.dumps(data) - - response = requests.post(node_address, data=json_data, headers={'content-type': 'application/json'}) - - elapsed_ms =(time.time() - s_time) * 1000 - - response_obj = response.json() - - G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms)) - - return response_obj, elapsed_ms, json.dumps(waku_msg), my_payload['ts'] - - -# Generate a random interval using a Poisson distribution -def poisson_interval(rate): - return random.expovariate(rate) - - -def make_payload(size): - payload = hex(random.getrandbits(4*size)) - G_LOGGER.debug('Payload of size %d bytes: %s' %(size, payload)) - return payload - - -def make_payload_dist(dist_type, min_size, max_size): - - # Check if min and max packet sizes are the same - if min_size == max_size: - G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' %min_size) - return make_payload(min_size), min_size - - # Payload sizes are even integers uniformly distributed in [min_size, max_size] - if dist_type == 'uniform': - size = int(random.uniform(min_size, max_size)) - - # Reject non even sizes - while(size % 2) != 0: - size = int(random.uniform(min_size, max_size)) - - return make_payload(size), size - - # Payload sizes are even integers ~"normally" distributed in [min_size, max_size] - if dist_type == 'gaussian': - σ = (max_size - min_size) / 5. - μ = (max_size - min_size) / 2. - size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - - # Reject non even sizes - while(size % 2) != 0: - size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - - return make_payload(size), size - - G_LOGGER.error('Unknown distribution type %s') - - return '0x00', 0 - - -def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time): - - if inter_msg_type == 'poisson': - return poisson_interval(msg_rate) - - if inter_msg_type == 'uniform': - return simulation_time / msg_rate - - G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' %inter_msg_type) - sys.exit() - - -def innit_logging(): - """ Init Logging """ - handler.setFormatter(CustomFormatter()) - G_LOGGER.addHandler(handler) - G_LOGGER.info('Started') - - -def configure_logging(wls_config, config_file): - G_LOGGER.setLevel(wls_config['debug_level']) - handler.setLevel(wls_config['debug_level']) - G_LOGGER.debug(wls_config) - G_LOGGER.info('Configuration loaded from %s' %config_file) - - -def parse_cli(): - """ Parse command line args. """ - parser = argparse.ArgumentParser() - parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", - default=G_DEFAULT_CONFIG_FILE) - parser.add_argument("-t", "--topology_file", help="Topology file", action="store_true", - default=G_DEFAULT_TOPOLOGY_FILE) - - args = parser.parse_args() - - return args - - -def load_config_file(config_file): - """ Load config file """ - try: - with open(config_file, 'r') as f: - config = json.load(f) - except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() - - return config - - -def load_topology(topology_file): - """ Load topology """ - try: - with open(topology_file, 'r') as read_file: - topology = json.load(read_file) - except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() - - if len(topology) == 0: - G_LOGGER.error('Cannot find valid topology. Aborting.') - sys.exit(1) - - G_LOGGER.debug(topology) - G_LOGGER.info('%d topology loaded' % len(topology)) - - return topology - - -def load_topics_into_topology(topology): - """ Load Topics """ - nodes = topology["nodes"] - for node, node_info in nodes.items(): - try: - with open("tomls/" + node_info["node_config"], mode='rb') as read_file: - toml_config = tomllib.load(read_file) - if node_info["image"] == "nim-waku": - topics = list(toml_config["topics"].split(" ")) - elif node_info["image"] == "go-waku": - topics = toml_config["topics"] - - # Load topics into topology for easier access - nodes[node]["topics"] = topics - except Exception as e: - G_LOGGER.error('%s: %s' % (e.__doc__, e)) - sys.exit() - - G_LOGGER.info('Loaded nodes topics from toml files') - - -def get_random_emitters(topology, wls_config): - nodes = topology["nodes"] - """ Define the subset of emitters """ - num_emitters = int(len(nodes) * wls_config["emitters_fraction"]) - - if num_emitters == 0: - G_LOGGER.error( - 'The number of emitters must be greater than zero. Try increasing the fraction of emitters.') - sys.exit() - - random_emitters = dict(random.sample(list(nodes.items()), num_emitters)) - G_LOGGER.info('Selected %d emitters out of %d total nodes' % (len(random_emitters), len(nodes))) - - return random_emitters - - -def start_traffic_inyection(wls_config, random_emitters): - """ Start simulation """ - s_time = time.time() - last_msg_time = 0 - next_time_to_msg = 0 - msgs_dict = {} - - G_LOGGER.info('Starting a simulation of %d seconds ...' % wls_config['simulation_time']) - - while True: - # Check end condition - elapsed_s = time.time() - s_time - - if elapsed_s >= wls_config['simulation_time']: - G_LOGGER.info( - 'Simulation ended. Sent %d messages in %ds.' % (len(msgs_dict), elapsed_s)) - break - - # Send message - # BUG: There is a constant discrepancy. The average number of messages sent by time interval is slightly less than expected - msg_elapsed = time.time() - last_msg_time - if msg_elapsed <= next_time_to_msg: - continue - - G_LOGGER.debug('Time Δ: %.6f ms.' % ((msg_elapsed - next_time_to_msg) * 1000.0)) - - # Pick an emitter at random from the emitters list - random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) - - emitter_address = f"http://{random_emitter_info['ip_address']}:{random_emitter_info['ports']['rpc_' + random_emitter][0]}/" - emitter_topics = random_emitter_info["topics"] - - # Pick a topic at random from the topics supported by the emitter - emitter_topic = random.choice(emitter_topics) - - G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' % ( - emitter_topic, emitter_address)) - - payload, size = make_payload_dist(dist_type=wls_config['dist_type'].lower(), - min_size=wls_config['min_packet_size'], - max_size=wls_config['max_packet_size']) - response, elapsed, waku_msg, ts = send_waku_msg(emitter_address, topic=emitter_topic, - payload=payload, nonce=len(msgs_dict)) - - if response['result']: - msg_hash = hashlib.sha256(waku_msg.encode('utf-8')).hexdigest() - if msg_hash in msgs_dict: - G_LOGGER.error('Hash collision. %s already exists in dictionary' % msg_hash) - continue - msgs_dict[msg_hash] = {'ts': ts, 'injection_point': emitter_address, - 'nonce': len(msgs_dict), 'topic': emitter_topic, - 'payload': payload, 'payload_size': size} - - # Compute the time to next message - next_time_to_msg = get_next_time_to_msg(wls_config['inter_msg_type'], - wls_config['message_rate'], - wls_config['simulation_time']) - G_LOGGER.debug('Next message will happen in %d ms.' % (next_time_to_msg * 1000.0)) - - last_msg_time = time.time() - - elapsed_s = time.time() - s_time - - return msgs_dict - - -def save_messages(msgs_dict): - # Save messages for further analysis - with open('./messages.json', 'w') as f: - f.write(json.dumps(msgs_dict, indent=4)) - - """ We are done """ - G_LOGGER.info('Ended') - - -def main(): - innit_logging() - - args = parse_cli() - - config_file = args.config_file - topology_file = args.topology_file - - config = load_config_file(config_file) - - # Set loglevel from config - wls_config = config['wls'] - - configure_logging(wls_config, config_file) - - # Set RPNG seed from config - random.seed(config['general']['prng_seed']) - - topology = load_topology(topology_file) - - load_topics_into_topology(topology) - - random_emitters = get_random_emitters(topology, wls_config) - - msgs_dict = start_traffic_inyection(wls_config, random_emitters) - - save_messages(msgs_dict) - - -if __name__ == "__main__": - main() From 4e64bfb811edee302cc35a20f0fb403f664da9b9 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Mon, 13 Mar 2023 16:40:49 +0100 Subject: [PATCH 107/112] Added new test file for wls.star --- config/test_files/test_config_file.json | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 config/test_files/test_config_file.json diff --git a/config/test_files/test_config_file.json b/config/test_files/test_config_file.json new file mode 100644 index 0000000..20a1a90 --- /dev/null +++ b/config/test_files/test_config_file.json @@ -0,0 +1,14 @@ +{ + "general":{ + "prng_seed" : 67 + }, + "kurtosis": { + "enclave_name": "wakurtosis", + }, + "gennet": { + "num_nodes": 5 + }, + "wls": { + "simulation_time": 60 + } +} From 09ee68e44cdffb4877333b004ba94931c718b011 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Mon, 13 Mar 2023 16:41:02 +0100 Subject: [PATCH 108/112] Changed wls payload to base64 --- wls-module/src/utils/payloads.py | 21 ++++++++++++--- wls-module/src/utils/tests/test_payloads.py | 30 ++++++++++++--------- wls-module/src/utils/waku_messaging.py | 5 ++-- 3 files changed, 38 insertions(+), 18 deletions(-) diff --git a/wls-module/src/utils/payloads.py b/wls-module/src/utils/payloads.py index 65fbeff..a875ad2 100644 --- a/wls-module/src/utils/payloads.py +++ b/wls-module/src/utils/payloads.py @@ -1,12 +1,13 @@ # Python Imports import random +import base64 # Project Imports from src.utils import wls_logger from src.utils import rtnorm -def _make_payload(bytes_size): +def _make_hex_payload(bytes_size): # Multiplied by 4 because each character in a string is one byte, so in a hex # we cannot go to two characters, this means we can only use 4 bits per byte. # We send half of the information but with the correct size, and as this is for testing purposes @@ -20,6 +21,18 @@ def _make_payload(bytes_size): return payload +def _make_base64_payload(bytes_size): + # Note this is effective payload, it does not match with base64EncodedSize + if bytes_size == 0: + raise ValueError('Payload size cannot be 0') + + random_bytes = bytes(random.choices(range(256), k=bytes_size)) + base64_bytes = base64.b64encode(random_bytes) + base64_string = base64_bytes.decode('utf-8') + + return base64_string + + def _make_uniform_dist(min_size, max_size): size = int(random.uniform(min_size, max_size)) @@ -27,7 +40,7 @@ def _make_uniform_dist(min_size, max_size): while (size % 2) != 0: size = int(random.uniform(min_size, max_size)) - return _make_payload(size), size + return _make_base64_payload(size), size def _make_gaussian_dist(min_size, max_size): @@ -39,14 +52,14 @@ def _make_gaussian_dist(min_size, max_size): while (size % 2) != 0: size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1)) - return _make_payload(size), size + return _make_base64_payload(size), size def make_payload_dist(dist_type, min_size, max_size): # Check if min and max packet sizes are the same if min_size == max_size: wls_logger.G_LOGGER.warning(f"Packet size is constant: min_size=max_size={min_size}") - return _make_payload(min_size), min_size + return _make_base64_payload(min_size), min_size # Payload sizes are even integers uniformly distributed in [min_size, max_size] if dist_type == 'uniform': diff --git a/wls-module/src/utils/tests/test_payloads.py b/wls-module/src/utils/tests/test_payloads.py index 5565527..e654e1f 100644 --- a/wls-module/src/utils/tests/test_payloads.py +++ b/wls-module/src/utils/tests/test_payloads.py @@ -10,7 +10,7 @@ class TestPayloads(unittest.TestCase): @classmethod - def setUpClass(cls): + def setUp(cls): random.seed(1) def create_patch(self, name): @@ -19,38 +19,44 @@ def create_patch(self, name): self.addCleanup(patcher.stop) return thing - def test__make_payload(self): - payload = payloads._make_payload(1) + def test__make_hex_payload(self): + payload = payloads._make_hex_payload(1) print(payload) print(b"a") - self.assertEqual(payload, '0x9') + self.assertEqual(payload, '0x2') - def test__make_payload_error(self): + def test__make_hex_payload_error(self): with self.assertRaises(ValueError): - payloads._make_payload(0) + payloads._make_hex_payload(0) + + def test__make_base64_payload_error(self): + payload = payloads._make_base64_payload(1) + print(payload) + print(b"a") + self.assertEqual(payload, 'Ig==') def test__make_uniform_dist(self): payload, size = payloads._make_uniform_dist(1, 10) - self.assertEqual(payload, '0xc386bbc4') - self.assertEqual(size, 8) + self.assertEqual(payload, '2MM=') + self.assertEqual(size, 2) def test__make_gaussian_dist(self): mock_rtnorm = self.create_patch('src.utils.rtnorm.rtnorm') mock_rtnorm.return_value = 6 payload, size = payloads._make_gaussian_dist(1, 10) - self.assertEqual(payload, '0x2265b1') + self.assertEqual(payload, 'ItjDQX5z') self.assertEqual(size, 6) def test_make_payload_dist_same(self): payload, size = payloads.make_payload_dist('test', 1, 1) - self.assertEqual(payload, '0x1') + self.assertEqual(payload, 'Ig==') self.assertEqual(size, 1) def test_make_payload_dist_uniform(self): payload, size = payloads.make_payload_dist('uniform', 1, 10) - self.assertEqual(payload, '0xc9e9c6') - self.assertEqual(size, 6) + self.assertEqual(payload, '2MM=') + self.assertEqual(size, 2) def test_make_payload_dist_gaussian(self): mock__make_gaussian_dist = self.create_patch('src.utils.payloads._make_gaussian_dist') diff --git a/wls-module/src/utils/waku_messaging.py b/wls-module/src/utils/waku_messaging.py index f52d4d5..989a697 100644 --- a/wls-module/src/utils/waku_messaging.py +++ b/wls-module/src/utils/waku_messaging.py @@ -4,6 +4,7 @@ import requests import sys import random +import base64 # Project Imports from src.utils import wls_logger @@ -26,7 +27,7 @@ def _get_waku_payload(nonce, payload): def _create_waku_msg(payload): waku_msg = { - 'payload': json.dumps(payload).encode('utf-8').hex() + 'payload': base64.b64encode(json.dumps(payload).encode('utf-8')).decode('utf-8') } return waku_msg @@ -64,7 +65,7 @@ def _send_waku_rpc(data, node_address): def send_msg_to_node(node_address, topic, payload, nonce=1): my_payload = _get_waku_payload(nonce, payload) - waku_msg = _create_waku_msg(payload) + waku_msg = _create_waku_msg(my_payload) data = _create_waku_rpc_data(topic, waku_msg, node_address) response_obj, elapsed_ms = _send_waku_rpc(data, node_address) From caaf3f0741309ccfd4e6f93ff6ef0c703782c852 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Mon, 13 Mar 2023 16:41:40 +0100 Subject: [PATCH 109/112] Fixed test --- wls-module/src/utils/tests/test_waku_messaging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wls-module/src/utils/tests/test_waku_messaging.py b/wls-module/src/utils/tests/test_waku_messaging.py index eebc4fd..45cf312 100644 --- a/wls-module/src/utils/tests/test_waku_messaging.py +++ b/wls-module/src/utils/tests/test_waku_messaging.py @@ -37,7 +37,7 @@ def test__get_waku_payload(self): def test__create_waku_msg(self): test_payload = waku_messaging._create_waku_msg('test') - self.assertEqual(test_payload, {'payload': '227465737422'}) + self.assertEqual(test_payload, {'payload': 'InRlc3Qi'}) def test__create_waku_rpc_data(self): test_data = waku_messaging._create_waku_rpc_data('test', 'test', 'test') From 2b52e688736d26943d0fa9d317079d7253933645 Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 16 Mar 2023 15:41:43 +0100 Subject: [PATCH 110/112] Added missing "_" separators into system variables --- src/nomos.star | 2 +- src/waku.star | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/nomos.star b/src/nomos.star index b17e169..aafb2f3 100644 --- a/src/nomos.star +++ b/src/nomos.star @@ -18,7 +18,7 @@ def get_nomos_peer_id(plan, service_name, port_id): def create_node_multiaddress(node_id, node_information): ip = node_information[vars.IP_KEY] - port = node_information[vars.PORTS_KEY][vars.NOMOS_LIBP2P_PORT_ID + "_" + node_id][0] + port = node_information[vars.PORTS_KEY][vars.NOMOS_LIBP2P_PORT_ID + vars.ID_STR_SEPARATOR + node_id][0] nomos_node_id = node_information[vars.PEER_ID_KEY] return '"/ip4/' + str(ip) + '/tcp/' + str(port) + '/p2p/' + nomos_node_id + '"' diff --git a/src/waku.star b/src/waku.star index 9ed68c4..78d6b5f 100644 --- a/src/waku.star +++ b/src/waku.star @@ -51,7 +51,7 @@ def make_service_wait(plan, service_name, time): def get_waku_peers(plan, waku_service_container, node_name): extract = {"peers": '.result | length'} - port_name = vars.RPC_PORT_ID +"_" + node_name + port_name = vars.RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_name response = call_protocols.send_json_rpc(plan, waku_service_container, port_name, vars.GET_PEERS_METHOD, "", extract) From 8de3cd6968a8deeeee5b431099187ec31efb38be Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 16 Mar 2023 15:51:12 +0100 Subject: [PATCH 111/112] Changed port name to match with kubernetes changes in wls --- wls-module/src/wls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wls-module/src/wls.py b/wls-module/src/wls.py index 1c0a299..a6bd3c1 100644 --- a/wls-module/src/wls.py +++ b/wls-module/src/wls.py @@ -102,7 +102,7 @@ def _select_emitter_with_topic(random_emitters): # Pick an emitter at random from the emitters list random_emitter, random_emitter_info = random.choice(list(random_emitters.items())) emitter_address = f"http://{random_emitter_info['ip_address']}:" \ - f"{random_emitter_info['ports']['rpc_' + random_emitter][0]}/" + f"{random_emitter_info['ports']['rpc-' + random_emitter][0]}/" emitter_topics = random_emitter_info["topics"] # Pick a topic at random from the topics supported by the emitter emitter_topic = random.choice(emitter_topics) From 8f34f0cdc38d71adf88455c3b5c208ae34c6172b Mon Sep 17 00:00:00 2001 From: Alberto Soutullo Date: Thu, 16 Mar 2023 17:30:44 +0100 Subject: [PATCH 112/112] Updated kurtosis version to 0.67.3 --- README.md | 2 +- build.sh | 2 +- run.sh | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index d9d1107..8fef382 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ More info about Kurtosis: https://docs.kurtosis.com/ #### Before using this repository note that: -- **You are using Kurtosis version 0.67.1**. This is important, as they are working on it and changes can be huge depending on different versions. You can find all Kurtosis versions [here](https://github.com/kurtosis-tech/kurtosis-cli-release-artifacts/releases). +- **You are using Kurtosis version 0.67.3**. This is important, as they are working on it and changes can be huge depending on different versions. You can find all Kurtosis versions [here](https://github.com/kurtosis-tech/kurtosis-cli-release-artifacts/releases). - The topology files that will be used by default are defined in `config/topology_generated/`. This topology is created with the [gennet](gennet-module/Readme.md) module. - Kurtosis can set up services in a parallel manner, defined in the `config.json` file (see below). - Only `kurtosis` and `docker` are needed to run this. diff --git a/build.sh b/build.sh index 3f2c1c5..0b262fc 100755 --- a/build.sh +++ b/build.sh @@ -5,7 +5,7 @@ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin apt-get install -y jq # Install the suitable kurtosis-cli -kurtosis_version=0.67.1 +kurtosis_version=0.67.3 echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update sudo apt-mark unhold kurtosis-cli diff --git a/run.sh b/run.sh index 67f7849..aad40ac 100755 --- a/run.sh +++ b/run.sh @@ -93,11 +93,11 @@ echo "Output of kurtosis run command written in kurtosisrun_log.txt" ### Wait for WLS to finish # Get the container prefix/uffix for the WLS service -enclave_prefix="$(kurtosis --cli-log-level $loglevel enclave inspect --full-uuids $enclave_name | grep UUID: | awk '{print $2}')" -cid_suffix="$(kurtosis --cli-log-level $loglevel enclave inspect --full-uuids $enclave_name | grep $wls_service_name | cut -f 1 -d ' ')" +service_name="$(kurtosis --cli-log-level $loglevel enclave inspect $enclave_name | grep $wls_service_name | awk '{print $2}')" +service_uuid="$(kurtosis --cli-log-level $loglevel enclave inspect --full-uuids $enclave_name | grep $wls_service_name | awk '{print $1}')" # Construct the fully qualified container name that kurtosis has created -cid="$enclave_prefix--user-service--$cid_suffix" +cid="$service_name--$service_uuid" # Wait for the container to halt; this will block echo -e "Waiting for simulation to finish ..."