|
1 | 1 | import subprocess
|
2 |
| -from perfTestInput import phases, load_configs, ZINGG |
| 2 | +import json |
3 | 3 | import time
|
4 |
| -from datetime import date, datetime |
5 |
| -from subprocess import PIPE |
6 | 4 | import os
|
| 5 | +from datetime import datetime, date |
| 6 | +from perfTestInput import phases, load_configs, ZINGG |
7 | 7 |
|
8 |
| -#set working directory |
9 |
| -os.chdir(os.path.dirname("../")) |
10 |
| - |
11 |
| -ZINGG = ZINGG |
12 |
| -#phases to run: ftd, match |
| 8 | +ZINGG = ZINGG |
13 | 9 | phases_to_test = phases
|
| 10 | +load = load_configs |
14 | 11 |
|
15 | 12 | now = datetime.now()
|
16 | 13 | current_time = now.strftime("%H:%M:%S")
|
17 | 14 |
|
18 |
| -#load to test: 65, 120k, 5m |
19 |
| -load = load_configs |
| 15 | +reportFile = "./perf_test/perf_test_report/loadTestReport.json" |
20 | 16 |
|
21 |
| -start_time = time.time() |
| 17 | +propertyFile = "./config/zingg.conf" |
| 18 | +PERFORMANCE_THRESHOLD = 1.05 # 5% increase threshold |
22 | 19 |
|
23 |
| -reportFile = os.path.abspath(os.curdir)+"/zingg/perf_test/perf_test_report/loadTestReport" |
24 | 20 |
|
25 |
| -def perf_test_small_all(): |
26 |
| - return "small_test_running_all" |
| 21 | +def load_results(): |
| 22 | + """Load previous test results if available.""" |
| 23 | + if os.path.exists(reportFile): |
| 24 | + with open(reportFile, "r") as f: |
| 25 | + try: |
| 26 | + return json.load(f) |
| 27 | + except json.JSONDecodeError: |
| 28 | + return {} |
| 29 | + return {} |
27 | 30 |
|
28 |
| -propertyFile = "./config/zingg.conf" |
29 | 31 |
|
30 |
| -def run_phase(phase, conf): |
31 |
| - print("Running phase - " + phase) |
32 |
| - return subprocess.call(ZINGG + " %s %s %s %s %s %s" % ("--phase", phase, "--conf", conf, "--properties-file", propertyFile), shell=True) |
| 32 | +def save_results(data): |
| 33 | + """Save current test results to the report file.""" |
| 34 | + with open(reportFile, "w") as f: |
| 35 | + json.dump(data, f, indent=4) |
33 | 36 |
|
34 |
| -def perf_test_small(phase): |
35 |
| - return "small_test_running" |
| 37 | + |
| 38 | +def run_phase(phase, conf): |
| 39 | + """Run a single test phase.""" |
| 40 | + print(f"Running phase - {phase}") |
| 41 | + return subprocess.call( |
| 42 | + f"{ZINGG} --phase {phase} --conf {conf} --properties-file {propertyFile}", |
| 43 | + shell=True |
| 44 | + ) |
36 | 45 |
|
37 | 46 |
|
38 | 47 | def write_on_start():
|
39 |
| - f = open(reportFile, "w+") |
40 |
| - f.write("******************************** perf test report, " + str(date.today()) + ", " + current_time + " ********************************\n\n"); |
41 |
| - f.write("------------ Test bed details ------------\n") |
42 |
| - f.write("Load samples: ") |
43 |
| - for load, config in load_configs.items(): |
44 |
| - f.write(str(load) + " ") |
45 |
| - f.write("\n") |
46 |
| - f.write("Phases: ") |
47 |
| - for phase in phases: |
48 |
| - f.write(phase + " ") |
49 |
| - f.write("\n") |
50 |
| - f.write("------------------------------------------\n\n") |
51 |
| - f.close() |
52 |
| - |
53 |
| -def write_on_complete(): |
54 |
| - f = open(reportFile, "a+") |
55 |
| - f.write("********************************************************************************************************\n\n\n\n\n\n") |
56 |
| - |
57 |
| - |
58 |
| - |
59 |
| - |
60 |
| -def write_success_stats(phase_time, load): |
61 |
| - f = open(reportFile, "a+") |
62 |
| - f.write("{:>50}".format("capturing for " + load) + "\n") |
63 |
| - f.write("PHASE {:>65}".format("TIME_TAKEN_IN_MINUTES") + "\n") |
64 |
| - for phase, time in phase_time.items(): |
65 |
| - f.write(success_message(phase, round(time/60, 1)) + "\n") |
66 |
| - f.write("\n") |
67 |
| - f.close() |
68 |
| - |
69 |
| -def write_failure_stats(phase_error): |
70 |
| - f = open(reportFile, "a+") |
71 |
| - for phase, error in phase_error.items(): |
72 |
| - f.write(error_message(phase, error) + "\n\n") |
73 |
| - f.close() |
| 48 | + """Initialize test report with metadata.""" |
| 49 | + test_data = { |
| 50 | + "date": str(date.today()), |
| 51 | + "time": current_time, |
| 52 | + "load_samples": list(load_configs.keys()), |
| 53 | + "phases": phases, |
| 54 | + "results": {} |
| 55 | + } |
| 56 | + return test_data # Return instead of saving immediately |
| 57 | + |
| 58 | + |
| 59 | +def compare_results(prev_results, new_results): |
| 60 | + """Compare new results with previous ones and check for performance degradation.""" |
| 61 | + |
| 62 | + test_fail = False |
| 63 | + |
| 64 | + for load_size, phases in new_results.items(): |
| 65 | + if load_size in prev_results: |
| 66 | + for phase, new_time_seconds in phases.items(): |
| 67 | + prev_phase_data = prev_results[load_size].get(phase, {}) |
| 68 | + |
| 69 | + if "time_taken_minutes" in prev_phase_data: |
| 70 | + prev_time = prev_phase_data["time_taken_minutes"] |
| 71 | + new_time = round(new_time_seconds / 60, 2) # Convert seconds to minutes |
| 72 | + |
| 73 | + if new_time > prev_time * PERFORMANCE_THRESHOLD: |
| 74 | + print(f"Performance degradation detected in phase {phase} (Load: {load_size})!") |
| 75 | + print(f"Previous time: {prev_time} min, New time: {new_time} min") |
| 76 | + test_fail = True |
74 | 77 |
|
75 | 78 |
|
76 | 79 | def perform_load_test():
|
| 80 | + """Execute the test and compare with previous results.""" |
77 | 81 | if not load_configs:
|
78 | 82 | print("No load configured to test, first set it!")
|
79 | 83 | return
|
80 | 84 | if not phases_to_test:
|
81 | 85 | print("No phase set for test, first set it!")
|
82 | 86 | return
|
83 | 87 |
|
84 |
| - for load, config in load_configs.items(): |
85 |
| - phase_time = {} |
86 |
| - phase_error = {} |
| 88 | + prev_results = load_results().get("results", {}) |
| 89 | + |
| 90 | + test_data = write_on_start() # Initialize metadata |
| 91 | + |
| 92 | + phase_time = {} |
| 93 | + phase_error = {} |
| 94 | + |
| 95 | + for load_size, config in load_configs.items(): |
| 96 | + phase_time[load_size] = {} |
| 97 | + phase_error[load_size] = {} |
| 98 | + |
87 | 99 | for phase in phases_to_test:
|
88 | 100 | try:
|
89 | 101 | t1 = time.time()
|
90 |
| - r = run_phase(phase, config) |
| 102 | + result = run_phase(phase, config) |
91 | 103 | t2 = time.time()
|
92 |
| - phase_time[phase] = t2 - t1 |
| 104 | + phase_time[load_size][phase] = t2 - t1 |
93 | 105 | except Exception as e:
|
94 |
| - phase_error[phase] = e |
| 106 | + phase_error[load_size][phase] = e |
95 | 107 |
|
| 108 | + # Compare results **before** writing |
| 109 | + compare_results(prev_results, phase_time) |
96 | 110 |
|
97 |
| - #write success data to file |
98 |
| - if phase_time: |
99 |
| - write_success_stats(phase_time, load) |
100 |
| - #write failure data to file |
101 |
| - if phase_error: |
102 |
| - write_failure_stats(phase_error) |
| 111 | + test_data["results"] = {} |
103 | 112 |
|
| 113 | + for load_size, times in phase_time.items(): |
| 114 | + test_data["results"][load_size] = { |
| 115 | + phase: { |
| 116 | + "time_taken_minutes": round(duration / 60, 2), |
| 117 | + "status": "success" |
| 118 | + } for phase, duration in times.items() |
| 119 | + } |
104 | 120 |
|
105 |
| -def success_message(phase, time): |
106 |
| - return "{:<20} {:>50}".format(phase, str(time)) |
| 121 | + for load_size, errors in phase_error.items(): |
| 122 | + if load_size not in test_data["results"]: |
| 123 | + test_data["results"][load_size] = {} |
107 | 124 |
|
108 |
| -def error_message(phase, error): |
109 |
| - return phase + " failed with error " + str(error) + "\n" |
| 125 | + for phase, error in errors.items(): |
| 126 | + test_data["results"][load_size][phase] = { |
| 127 | + "error": str(error), |
| 128 | + "status": "failure" |
| 129 | + } |
110 | 130 |
|
| 131 | + # Save results after successful test execution |
| 132 | + save_results(test_data) |
| 133 | + |
| 134 | + if test_fail: |
| 135 | + exit(1) |
111 | 136 |
|
112 | 137 | def main():
|
113 |
| - write_on_start() |
114 | 138 | perform_load_test()
|
115 |
| - write_on_complete() |
| 139 | + |
116 | 140 |
|
117 | 141 | if __name__ == "__main__":
|
118 | 142 | main()
|
0 commit comments