Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

Message size/us133166/add message size parameter #78

Merged
merged 9 commits into from
Jan 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion api/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,11 @@ python3 -m src.main
curl --location 'http://localhost:3020/qujata-api/analyze' \
--header 'Content-Type: application/json' \
--data '{
"experimentName": "name",
"description" : "test description",
"algorithms": ["kyber512"],
"iterationsCount": 5
"iterationsCount": [5],
"messageSizes": [10]
}'
```

Expand Down
4 changes: 4 additions & 0 deletions api/src/api/analyze_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ def __validate(data):
for iterations in data['iterationsCount']:
if iterations <= 0:
raise ApiException('The number of iterations should be greater than 0', INVALID_DATA_MESSAGE, HTTP_STATUS_BAD_REQUEST)
if 'messageSizes' in data:
for message_size in data['messageSizes']:
if message_size < 0:
raise ApiException('The message size should be greater than -1', INVALID_DATA_MESSAGE, HTTP_STATUS_BAD_REQUEST)
mikekcs marked this conversation as resolved.
Show resolved Hide resolved
if process_is_running:
raise ApiException('The previous test is still running. Please try again in few minutes', 'Current test is still running', HTTP_STATUS_LOCKED)
for algorithm in data['algorithms']:
Expand Down
29 changes: 13 additions & 16 deletions api/src/services/analyze_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,7 @@
from flask import jsonify, current_app
import src.services.test_suites_service as test_suites_service
import src.services.metrics_service as metrics_service
from src.models.env_info import EnvInfo
from src.models.test_suite import TestSuite
from src.models.test_run import TestRun
from src.enums.status import Status
from src.exceptions.exceptions import ApiException



# constants
WAIT_MS = 15
Expand All @@ -26,14 +20,16 @@ def analyze(data):
start_time = int(datetime.timestamp(datetime.now() - timedelta(seconds=60)) * 1000)
iterations_count = data['iterationsCount']
algorithms = data['algorithms']
message_sizes = data['messageSizes'] if 'messageSizes' in data else [0]
first_run = True
mikekcs marked this conversation as resolved.
Show resolved Hide resolved
for algorithm in algorithms:
for iterations in iterations_count:
if not first_run:
time.sleep(WAIT_MS)
else:
first_run = False
__create_test_run(algorithm, iterations, test_suite.id)
for message_size in message_sizes:
if not first_run:
time.sleep(WAIT_MS)
else:
first_run = False
__create_test_run(algorithm, iterations, message_size, test_suite.id)

# end time is now + 90 sec, to show the graph after the test for sure finished running
end_time = int(datetime.timestamp(datetime.now() + timedelta(seconds=90)) * 1000)
Expand All @@ -45,20 +41,21 @@ def analyze(data):
return jsonify({'test_suite_id': test_suite.id})


def __create_test_run(algorithm, iterations, test_suite_id):
def __create_test_run(algorithm, iterations, message_size, test_suite_id):
start_time = datetime.now()
metrics_service.start_collecting()
status, status_message = __run(algorithm, iterations)
status, status_message = __run(algorithm, iterations, message_size)
metrics_service.stop_collecting()
end_time = datetime.now()
test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, test_suite_id, status, status_message, *metrics_service.get_metrics())
test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, *metrics_service.get_metrics())


def __run(algorithm, iterations):
def __run(algorithm, iterations, message_size):
logging.debug('Running test for algorithm: %s ', algorithm)
payload = {
'algorithm': algorithm,
'iterationsCount': iterations
'iterationsCount': iterations,
'messageSize': message_size
}
headers = { 'Content-Type': 'application/json' }
response = requests.post(current_app.configurations.curl_url + "/curl", headers=headers, json=payload, timeout=int(current_app.configurations.request_timeout))
Expand Down
3 changes: 1 addition & 2 deletions api/src/services/cadvisor_service.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import src.services.k8s_service as k8s_service
import requests
import pandas as pd
import logging
from src.enums.environment import Environment

DOCKER_METRICS_URL = "{}/api/v1.3/docker/{}"
Expand All @@ -28,7 +27,7 @@ def get_metrics_url(service_name):
elif __environment == Environment.KUBERNETES.value:
return __build_k8s_metrics_url(service_name)
else:
raise RuntimeError("Invalid Environemnt: " + __environment)
raise RuntimeError("Invalid Environment: " + __environment)


def __build_docker_metrics_url(service_name):
Expand Down
2 changes: 0 additions & 2 deletions api/src/services/metrics_service.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from flask import current_app
from src.models.test_run_metric import TestRunMetric
from src.utils.metrics_collector import MetricsCollector
import logging

Expand Down
4 changes: 2 additions & 2 deletions api/src/services/test_suites_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,15 @@ def create_test_suite(data):
current_app.database_manager.create(test_suite)
return test_suite

def create_test_run(start_time, end_time, algorithm, iterations, test_suite_id, status, status_message, client_metrics, server_metrics):
def create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, client_metrics, server_metrics):
test_run = TestRun(
start_time=start_time,
end_time=end_time,
algorithm=algorithm,
iterations=iterations,
status=status,
status_message=status_message,
# message_size=1024,
message_size=message_size,
test_suite_id=test_suite_id
)
current_app.database_manager.create(test_run)
Expand Down
5 changes: 3 additions & 2 deletions api/src/utils/test_suite_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ def serialize(test_suite):
"id": test_suite.id,
"name": test_suite.name,
"description": test_suite.description,
"codeRelease": test_suite.code_release,
"code_release": test_suite.code_release,
"start_time": test_suite.start_time,
"end_time": test_suite.end_time,
"environment_info": __get_environment_info(test_suite.env_info),
"testRuns": __get_test_runs_metrics(test_suite.test_runs)
"test_runs": __get_test_runs_metrics(test_suite.test_runs)
}
return response_data

Expand All @@ -35,6 +35,7 @@ def __get_test_runs_metrics(test_runs):
"id": test_run.id,
"algorithm": test_run.algorithm,
"iterations": test_run.iterations,
"message_size": test_run.message_size,
"results": {
"averageCPU": round(cpu_avg, 2),
"averageMemory": int(memory_avg),
Expand Down
43 changes: 33 additions & 10 deletions api/tests/test_analyze_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ def test_analyze(self, mock_start_collecting, mock_stop_collecting, mock_get_met
"algorithms":["kyber512"],
"iterationsCount": [1000, 2000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
# Mock the requests.post call
with patch(POST_REQUEST) as mock_post:
Expand Down Expand Up @@ -78,7 +79,8 @@ def test_analyze_return_general_error(self, mock_start_collecting, mock_stop_col
"algorithms":["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]

}
# Mock the requests.post call to raise an exception
Expand All @@ -98,7 +100,8 @@ def test_analyze_with_invalid_iterations_count(self, mock_start_collecting, mock
"algorithms": ["kyber512"],
"iterationsCount": [-1],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
response = self.client.post(PATH,
data=json.dumps(input_data),
Expand All @@ -108,13 +111,30 @@ def test_analyze_with_invalid_iterations_count(self, mock_start_collecting, mock
self.assertEqual(response_json["error"], INVALID_DATA_PROVIDED)
self.assertEqual(response_json["message"], "The number of iterations should be greater than 0")

def test_analyze_with_invalid_message_sizes(self, mock_start_collecting, mock_stop_collecting, mock_get_metrics):
input_data = {
"algorithms": ["kyber512"],
"iterationsCount": [10],
"experimentName": "name",
"description": "name",
"messageSizes": [-1]
}
response = self.client.post(PATH,
data=json.dumps(input_data),
content_type=CONTENT_TYPE)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.data)
self.assertEqual(response_json["error"], INVALID_DATA_PROVIDED)
self.assertEqual(response_json["message"], "The message size should be greater than -1")


def test_analyze_with_invalid_algorithm(self, mock_start_collecting, mock_stop_collecting, mock_get_metrics):
input_data = {
"algorithms":["invalid_algorithm"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
response = self.client.post(PATH,
data=json.dumps(input_data),
Expand Down Expand Up @@ -144,7 +164,8 @@ def test_analyze_with_curl_failure(self, mock_start_collecting, mock_stop_collec
"algorithms":["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
# Mock the requests.post call
with patch(POST_REQUEST) as mock_post:
Expand All @@ -160,13 +181,13 @@ def test_analyze_with_curl_failure(self, mock_start_collecting, mock_stop_collec
self.assertEqual(actual_test_run[0].status_message, '{"result": "failed"}')



def test_analyze_with_missing_env_info(self, mock_start_collecting, mock_stop_collecting, mock_get_metrics):
input_data = {
"algorithms":["kyber512"],
"algorithms": ["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
self.app.database_manager.get_latest.return_value = None
response = self.client.post(PATH,
Expand All @@ -185,7 +206,8 @@ def test_analyze_with_423(self, mock_start_collecting, mock_stop_collecting, moc
"algorithms":["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
analyze_api.process_is_running = True
# Mock the requests.post call
Expand All @@ -203,7 +225,8 @@ def test_analyze_sleep_between_tests(self, mock_start_collecting, mock_stop_coll
"algorithms":["kyber512","frodo640aes"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
with patch(GET_REQUEST) as mock_get:
mock_get.return_value.status_code = 200
Expand Down
2 changes: 1 addition & 1 deletion api/tests/test_tests_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_get_test_suite(self):
self.app.database_manager.get_by_id.return_value = test_suite
response = self.client.get(TEST_SUITES_GET_URL)
result = json.loads(response.data)
expected = {'codeRelease': '1.1.0', 'description': 'description', 'end_time': None, 'environment_info': {'cpu': None, 'cpuArchitecture': None, 'cpuClockSpeed': None, 'cpuCores': None, 'nodeSize': None, 'operatingSystem': None, 'resourceName': None}, 'id': None, 'name': 'name', 'start_time': None, 'testRuns': [{'algorithm': None, 'id': 1, 'iterations': None, 'results': {'averageCPU': 9.0, 'averageMemory': 14}}]}
expected = {'code_release': '1.1.0', 'description': 'description', 'end_time': None, 'environment_info': {'cpu': None, 'cpuArchitecture': None, 'cpuClockSpeed': None, 'cpuCores': None, 'nodeSize': None, 'operatingSystem': None, 'resourceName': None}, 'id': None, 'name': 'name', 'start_time': None, 'test_runs': [{'algorithm': None, 'id': 1, 'iterations': None, 'message_size': None, 'results': {'averageCPU': 9.0, 'averageMemory': 14}}]}
self.assertEqual(result, expected)

def test_get_test_suite_return_not_found(self):
Expand Down
39 changes: 31 additions & 8 deletions curl/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 4 additions & 3 deletions curl/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,14 @@
"test:debug": "node --inspect-brk -r tsconfig-paths/register -r ts-node/register node_modules/.bin/jest --runInBand"
},
"dependencies": {
"@nestjs/common": "^9.0.0",
"@nestjs/config": "3.0.0",
"@nestjs/common": "^9.4.3",
"@nestjs/config": "^3.0.0",
"@nestjs/core": "^9.0.0",
"@nestjs/mapped-types": "*",
"@nestjs/platform-express": "^9.0.0",
"class-transformer": "^0.5.1",
"class-validator": "^0.14.0",
"crypto": "^1.0.1",
"reflect-metadata": "^0.1.13",
"rxjs": "^7.2.0"
},
Expand All @@ -35,7 +36,7 @@
"@nestjs/testing": "^9.4.3",
"@types/express": "^4.17.13",
"@types/jest": "29.2.4",
"@types/node": "18.11.18",
"@types/node": "^18.11.18",
"@types/supertest": "^2.0.11",
"@typescript-eslint/eslint-plugin": "^5.0.0",
"@typescript-eslint/parser": "^5.0.0",
Expand Down
5 changes: 3 additions & 2 deletions curl/scripts/run-curl-loop.sh
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
#!/bin/bash

# This script expects four arguments
# This script expects five arguments
nginx_host="$1"
nginx_port="$2"
iteration_count="$3"
algorithm="$4"
payload="$5"
num_processes=$(($(getconf _NPROCESSORS_ONLN) * 2))

seq ${iteration_count} | xargs -P $num_processes -n 1 -I % curl https://${nginx_host}:${nginx_port} -k --curves ${algorithm} -so /dev/null
seq ${iteration_count} | xargs -P $num_processes -n 1 -I % curl https://${nginx_host}:${nginx_port} -k --curves ${algorithm} -XPOST -d "$payload" -H "Content-Type: text/plain" -o /dev/null
Loading
Loading