Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

Message size/us133166/add message size parameter #78

Merged
merged 9 commits into from
Jan 31, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from src.api.tests_api import api as tests_blueprint
import src.services.k8s_service as k8s_service
import src.services.cadvisor_service as cadvisor_service
from src.enums.environemnt import Environment
from src.enums.environment import Environment
from config.settings import load_config
from flask_cors import CORS
from src.utils.database_manager import DatabaseManager
Expand Down
4 changes: 4 additions & 0 deletions api/src/api/analyze_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ def __validate(data):
for iterations in data['iterationsCount']:
if iterations <= 0:
raise ApiException('The number of iterations should be greater than 0', INVALID_DATA_MESSAGE, HTTP_STATUS_BAD_REQUEST)
if 'messageSizes' in data:
for message_size in data['messageSizes']:
if message_size < 0:
raise ApiException('The message size should be greater than -1', INVALID_DATA_MESSAGE, HTTP_STATUS_BAD_REQUEST)
mikekcs marked this conversation as resolved.
Show resolved Hide resolved
if process_is_running:
raise ApiException('The previous test is still running. Please try again in few minutes', 'Current test is still running', HTTP_STATUS_LOCKED)
for algorithm in data['algorithms']:
Expand Down
31 changes: 15 additions & 16 deletions api/src/services/analyze_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,7 @@
from flask import jsonify, current_app
import src.services.test_suites_service as test_suites_service
import src.services.metrics_service as metrics_service
from src.models.env_info import EnvInfo
from src.models.test_suite import TestSuite
from src.models.test_run import TestRun
from src.enums.status import Status
from src.exceptions.exceptions import ApiException



# constants
WAIT_MS = 15
Expand All @@ -26,14 +20,18 @@ def analyze(data):
start_time = int(datetime.timestamp(datetime.now() - timedelta(seconds=60)) * 1000)
iterations_count = data['iterationsCount']
algorithms = data['algorithms']
message_sizes = [0]
if 'messageSizes' in data:
message_sizes = data['messageSizes']
first_run = True
mikekcs marked this conversation as resolved.
Show resolved Hide resolved
for algorithm in algorithms:
for iterations in iterations_count:
if not first_run:
time.sleep(WAIT_MS)
else:
first_run = False
__create_test_run(algorithm, iterations, test_suite.id)
for message_size in message_sizes:
if not first_run:
time.sleep(WAIT_MS)
else:
first_run = False
__create_test_run(algorithm, iterations, message_size, test_suite.id)

# end time is now + 90 sec, to show the graph after the test for sure finished running
end_time = int(datetime.timestamp(datetime.now() + timedelta(seconds=90)) * 1000)
Expand All @@ -45,20 +43,21 @@ def analyze(data):
return jsonify({'test_suite_id': test_suite.id})


def __create_test_run(algorithm, iterations, test_suite_id):
def __create_test_run(algorithm, iterations, message_size, test_suite_id):
start_time = datetime.now()
metrics_service.start_collecting()
status, status_message = __run(algorithm, iterations)
status, status_message = __run(algorithm, iterations, message_size)
metrics_service.stop_collecting()
end_time = datetime.now()
test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, test_suite_id, status, status_message, *metrics_service.get_metrics())
test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, *metrics_service.get_metrics())


def __run(algorithm, iterations):
def __run(algorithm, iterations, message_size):
logging.debug('Running test for algorithm: %s ', algorithm)
payload = {
'algorithm': algorithm,
'iterationsCount': iterations
'iterationsCount': iterations,
'messageSize': message_size
}
headers = { 'Content-Type': 'application/json' }
response = requests.post(current_app.configurations.curl_url + "/curl", headers=headers, json=payload, timeout=int(current_app.configurations.request_timeout))
Expand Down
3 changes: 1 addition & 2 deletions api/src/services/cadvisor_service.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import src.services.k8s_service as k8s_service
import requests
import pandas as pd
import logging
from src.enums.environment import Environment

DOCKER_METRICS_URL = "{}/api/v1.3/docker/{}"
Expand All @@ -28,7 +27,7 @@ def get_metrics_url(service_name):
elif __environment == Environment.KUBERNETES.value:
return __build_k8s_metrics_url(service_name)
else:
raise RuntimeError("Invalid Environemnt: " + __environment)
raise RuntimeError("Invalid Environment: " + __environment)


def __build_docker_metrics_url(service_name):
Expand Down
2 changes: 0 additions & 2 deletions api/src/services/metrics_service.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from flask import current_app
from src.models.test_run_metric import TestRunMetric
from src.utils.metrics_collector import MetricsCollector
import logging

Expand Down
4 changes: 2 additions & 2 deletions api/src/services/test_suites_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,15 @@ def create_test_suite(data):
current_app.database_manager.create(test_suite)
return test_suite

def create_test_run(start_time, end_time, algorithm, iterations, test_suite_id, status, status_message, client_metrics, server_metrics):
def create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, client_metrics, server_metrics):
test_run = TestRun(
start_time=start_time,
end_time=end_time,
algorithm=algorithm,
iterations=iterations,
status=status,
status_message=status_message,
# message_size=1024,
message_size=message_size,
test_suite_id=test_suite_id
)
current_app.database_manager.create(test_run)
Expand Down
43 changes: 33 additions & 10 deletions api/tests/test_analyze_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ def test_analyze(self, mock_start_collecting, mock_stop_collecting, mock_get_met
"algorithms":["kyber512"],
"iterationsCount": [1000, 2000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
# Mock the requests.post call
with patch(POST_REQUEST) as mock_post:
Expand Down Expand Up @@ -78,7 +79,8 @@ def test_analyze_return_general_error(self, mock_start_collecting, mock_stop_col
"algorithms":["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]

}
# Mock the requests.post call to raise an exception
Expand All @@ -98,7 +100,8 @@ def test_analyze_with_invalid_iterations_count(self, mock_start_collecting, mock
"algorithms": ["kyber512"],
"iterationsCount": [-1],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
response = self.client.post(PATH,
data=json.dumps(input_data),
Expand All @@ -108,13 +111,30 @@ def test_analyze_with_invalid_iterations_count(self, mock_start_collecting, mock
self.assertEqual(response_json["error"], INVALID_DATA_PROVIDED)
self.assertEqual(response_json["message"], "The number of iterations should be greater than 0")

def test_analyze_with_invalid_message_sizes(self, mock_start_collecting, mock_stop_collecting, mock_get_metrics):
input_data = {
"algorithms": ["kyber512"],
"iterationsCount": [10],
"experimentName": "name",
"description": "name",
"messageSizes": [-1]
}
response = self.client.post(PATH,
data=json.dumps(input_data),
content_type=CONTENT_TYPE)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.data)
self.assertEqual(response_json["error"], INVALID_DATA_PROVIDED)
self.assertEqual(response_json["message"], "The message size should be greater than -1")


def test_analyze_with_invalid_algorithm(self, mock_start_collecting, mock_stop_collecting, mock_get_metrics):
input_data = {
"algorithms":["invalid_algorithm"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
response = self.client.post(PATH,
data=json.dumps(input_data),
Expand Down Expand Up @@ -144,7 +164,8 @@ def test_analyze_with_curl_failure(self, mock_start_collecting, mock_stop_collec
"algorithms":["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
# Mock the requests.post call
with patch(POST_REQUEST) as mock_post:
Expand All @@ -160,13 +181,13 @@ def test_analyze_with_curl_failure(self, mock_start_collecting, mock_stop_collec
self.assertEqual(actual_test_run[0].status_message, '{"result": "failed"}')



def test_analyze_with_missing_env_info(self, mock_start_collecting, mock_stop_collecting, mock_get_metrics):
input_data = {
"algorithms":["kyber512"],
"algorithms": ["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
self.app.database_manager.get_latest.return_value = None
response = self.client.post(PATH,
Expand All @@ -185,7 +206,8 @@ def test_analyze_with_423(self, mock_start_collecting, mock_stop_collecting, moc
"algorithms":["kyber512"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
analyze_api.process_is_running = True
# Mock the requests.post call
Expand All @@ -203,7 +225,8 @@ def test_analyze_sleep_between_tests(self, mock_start_collecting, mock_stop_coll
"algorithms":["kyber512","frodo640aes"],
"iterationsCount": [1000],
"experimentName": "name",
"description": "name"
"description": "name",
"messageSizes": [100]
}
with patch(GET_REQUEST) as mock_get:
mock_get.return_value.status_code = 200
Expand Down
39 changes: 31 additions & 8 deletions curl/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 4 additions & 3 deletions curl/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,14 @@
"test:debug": "node --inspect-brk -r tsconfig-paths/register -r ts-node/register node_modules/.bin/jest --runInBand"
},
"dependencies": {
"@nestjs/common": "^9.0.0",
"@nestjs/config": "3.0.0",
"@nestjs/common": "^9.4.3",
"@nestjs/config": "^3.0.0",
"@nestjs/core": "^9.0.0",
"@nestjs/mapped-types": "*",
"@nestjs/platform-express": "^9.0.0",
"class-transformer": "^0.5.1",
"class-validator": "^0.14.0",
"crypto": "^1.0.1",
"reflect-metadata": "^0.1.13",
"rxjs": "^7.2.0"
},
Expand All @@ -35,7 +36,7 @@
"@nestjs/testing": "^9.4.3",
"@types/express": "^4.17.13",
"@types/jest": "29.2.4",
"@types/node": "18.11.18",
"@types/node": "^18.11.18",
"@types/supertest": "^2.0.11",
"@typescript-eslint/eslint-plugin": "^5.0.0",
"@typescript-eslint/parser": "^5.0.0",
Expand Down
5 changes: 3 additions & 2 deletions curl/scripts/run-curl-loop.sh
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
#!/bin/bash

# This script expects four arguments
# This script expects five arguments
nginx_host="$1"
nginx_port="$2"
iteration_count="$3"
algorithm="$4"
payload="$5"
num_processes=$(($(getconf _NPROCESSORS_ONLN) * 2))

seq ${iteration_count} | xargs -P $num_processes -n 1 -I % curl https://${nginx_host}:${nginx_port} -k --curves ${algorithm} -so /dev/null
seq ${iteration_count} | xargs -P $num_processes -n 1 -I % curl https://${nginx_host}:${nginx_port} -k --curves ${algorithm} -XPOST -d "$payload" -H "Content-Type: text/plain" -o /dev/null
3 changes: 3 additions & 0 deletions curl/src/curl/curl.controller.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ describe('CurlController', () => {
const curlRequest: CurlRequest = {
algorithm: 'kyber512',
iterationsCount: 500,
messageSize: 10
};
const runSpy = jest.spyOn(curlService, 'run');
await curlController.create(curlRequest);
Expand All @@ -38,6 +39,7 @@ describe('CurlController', () => {
const curlRequest: CurlRequest = {
algorithm: 'kyber512',
iterationsCount: 500,
messageSize: 10
};
const expectedResult = undefined;
jest.spyOn(curlService, 'run').mockResolvedValue(expectedResult);
Expand All @@ -48,6 +50,7 @@ describe('CurlController', () => {
const curlRequest: CurlRequest = {
algorithm: 'kyber512',
iterationsCount: 500,
messageSize: 10
};
const error = new HttpException('Exception', 409);
jest.spyOn(curlService, 'run').mockRejectedValue(error);
Expand Down
Loading
Loading