diff --git a/utbot-python/samples/run_tests.py b/utbot-python/samples/run_tests.py index 2be8fdc00f..7cb0bf4acd 100644 --- a/utbot-python/samples/run_tests.py +++ b/utbot-python/samples/run_tests.py @@ -61,6 +61,7 @@ def generate_tests( command += f" -m {','.join(method_names)}" print(command) code = os.system(command) + print(code) return code @@ -81,9 +82,10 @@ def check_coverage( ): config = parse_config(config_file) report: typing.Dict[str, bool] = {} - for part in config['parts'][:2]: - for file in part['files'][:2]: - for group in file['groups'][:2]: + coverage: typing.Dict[str, typing.Tuple[float, float]] = {} + for part in config['parts']: + for file in part['files']: + for group in file['groups']: expected_coverage = group.get('coverage', 0) file_suffix = f"{part['path'].replace('/', '_')}_{file['name']}" @@ -92,8 +94,9 @@ def check_coverage( actual_coverage_json = json.loads(fin.readline()) actual_covered = sum(lines['end'] - lines['start'] + 1 for lines in actual_coverage_json['covered']) actual_not_covered = sum(lines['end'] - lines['start'] + 1 for lines in actual_coverage_json['notCovered']) - actual_coverage = round(actual_covered / (actual_not_covered + actual_covered)) * 100 + actual_coverage = round(actual_covered / (actual_not_covered + actual_covered) * 100) + coverage[file_suffix] = (actual_coverage, expected_coverage) report[file_suffix] = actual_coverage >= expected_coverage if all(report.values()): return True @@ -102,15 +105,15 @@ def check_coverage( print("-------------") for file, good_coverage in report.items(): if not good_coverage: - print(file) + print(f"{file}: {coverage[file][0]}/{coverage[file][1]}") return False def main_test_generation(args): config = parse_config(args.config_file) - for part in config['parts'][:2]: - for file in part['files'][:2]: - for group in file['groups'][:2]: + for part in config['parts']: + for file in part['files']: + for group in file['groups']: full_name = pathlib.PurePath(args.path_to_test_dir, part['path'], file['name']) output_file = pathlib.PurePath(args.output_dir, f"utbot_tests_{part['path'].replace('/', '_')}_{file['name']}.py") coverage_output_file = pathlib.PurePath(args.coverage_output_dir, f"coverage_{part['path'].replace('/', '_')}_{file['name']}.json") diff --git a/utbot-python/samples/samples/structures/graph_matrix.py b/utbot-python/samples/samples/structures/graph_matrix.py new file mode 100644 index 0000000000..785c295724 --- /dev/null +++ b/utbot-python/samples/samples/structures/graph_matrix.py @@ -0,0 +1,40 @@ +# An island in matrix is a group of linked areas, all having the same value. +# This code counts number of islands in a given matrix, with including diagonal +# connections. + + +class Matrix: # Public class to implement a graph + def __init__(self, row: int, col: int, graph: list[list[bool]]) -> None: + self.ROW = row + self.COL = col + self.graph = graph + + def __eq__(self, other): + return self.graph == other.graph + + def is_safe(self, i: int, j: int, visited: list[list[bool]]) -> bool: + return ( + 0 <= i < self.ROW + and 0 <= j < self.COL + and not visited[i][j] + and self.graph[i][j] + ) + + def diffs(self, i: int, j: int, visited: list[list[bool]]) -> None: + # Checking all 8 elements surrounding nth element + row_nbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order + col_nbr = [-1, 0, 1, -1, 1, -1, 0, 1] + visited[i][j] = True # Make those cells visited + for k in range(8): + if self.is_safe(i + row_nbr[k], j + col_nbr[k], visited): + self.diffs(i + row_nbr[k], j + col_nbr[k], visited) + + def count_islands(self) -> int: # And finally, count all islands. + visited = [[False for j in range(self.COL)] for i in range(self.ROW)] + count = 0 + for i in range(self.ROW): + for j in range(self.COL): + if visited[i][j] is False and self.graph[i][j] == 1: + self.diffs(i, j, visited) + count += 1 + return count \ No newline at end of file diff --git a/utbot-python/samples/samples/structures/multi_level_feedback_queue.py b/utbot-python/samples/samples/structures/multi_level_feedback_queue.py new file mode 100644 index 0000000000..284c758b8f --- /dev/null +++ b/utbot-python/samples/samples/structures/multi_level_feedback_queue.py @@ -0,0 +1,312 @@ +from collections import deque + + +class Process: + def __init__(self, process_name: str, arrival_time: int, burst_time: int) -> None: + self.process_name = process_name # process name + self.arrival_time = arrival_time # arrival time of the process + # completion time of finished process or last interrupted time + self.stop_time = arrival_time + self.burst_time = burst_time # remaining burst time + self.waiting_time = 0 # total time of the process wait in ready queue + self.turnaround_time = 0 # time from arrival time to completion time + + +class MLFQ: + """ + MLFQ(Multi Level Feedback Queue) + https://en.wikipedia.org/wiki/Multilevel_feedback_queue + MLFQ has a lot of queues that have different priority + In this MLFQ, + The first Queue(0) to last second Queue(N-2) of MLFQ have Round Robin Algorithm + The last Queue(N-1) has First Come, First Served Algorithm + """ + + def __init__( + self, + number_of_queues: int, + time_slices: list[int], + queue: deque[Process], + current_time: int, + ) -> None: + # total number of mlfq's queues + self.number_of_queues = number_of_queues + # time slice of queues that round robin algorithm applied + self.time_slices = time_slices + # unfinished process is in this ready_queue + self.ready_queue = queue + # current time + self.current_time = current_time + # finished process is in this sequence queue + self.finish_queue: deque[Process] = deque() + + def calculate_sequence_of_finish_queue(self) -> list[str]: + """ + This method returns the sequence of finished processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_sequence_of_finish_queue() + ['P2', 'P4', 'P1', 'P3'] + """ + sequence = [] + for i in range(len(self.finish_queue)): + sequence.append(self.finish_queue[i].process_name) + return sequence + + def calculate_waiting_time(self, queue: list[Process]) -> list[int]: + """ + This method calculates waiting time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_waiting_time([P1, P2, P3, P4]) + [83, 17, 94, 101] + """ + waiting_times = [] + for i in range(len(queue)): + waiting_times.append(queue[i].waiting_time) + return waiting_times + + def calculate_turnaround_time(self, queue: list[Process]) -> list[int]: + """ + This method calculates turnaround time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_turnaround_time([P1, P2, P3, P4]) + [136, 34, 162, 125] + """ + turnaround_times = [] + for i in range(len(queue)): + turnaround_times.append(queue[i].turnaround_time) + return turnaround_times + + def calculate_completion_time(self, queue: list[Process]) -> list[int]: + """ + This method calculates completion time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_turnaround_time([P1, P2, P3, P4]) + [136, 34, 162, 125] + """ + completion_times = [] + for i in range(len(queue)): + completion_times.append(queue[i].stop_time) + return completion_times + + def calculate_remaining_burst_time_of_processes( + self, queue: deque[Process] + ) -> list[int]: + """ + This method calculate remaining burst time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> finish_queue, ready_queue = mlfq.round_robin(deque([P1, P2, P3, P4]), 17) + >>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue) + [0] + >>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue) + [36, 51, 7] + >>> finish_queue, ready_queue = mlfq.round_robin(ready_queue, 25) + >>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue) + [0, 0] + >>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue) + [11, 26] + """ + return [q.burst_time for q in queue] + + def update_waiting_time(self, process: Process) -> int: + """ + This method updates waiting times of unfinished processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> mlfq.current_time = 10 + >>> P1.stop_time = 5 + >>> mlfq.update_waiting_time(P1) + 5 + """ + process.waiting_time += self.current_time - process.stop_time + return process.waiting_time + + def first_come_first_served(self, ready_queue: deque[Process]) -> deque[Process]: + """ + FCFS(First Come, First Served) + FCFS will be applied to MLFQ's last queue + A first came process will be finished at first + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.first_come_first_served(mlfq.ready_queue) + >>> mlfq.calculate_sequence_of_finish_queue() + ['P1', 'P2', 'P3', 'P4'] + """ + finished: deque[Process] = deque() # sequence deque of finished process + while len(ready_queue) != 0: + cp = ready_queue.popleft() # current process + + # if process's arrival time is later than current time, update current time + if self.current_time < cp.arrival_time: + self.current_time += cp.arrival_time + + # update waiting time of current process + self.update_waiting_time(cp) + # update current time + self.current_time += cp.burst_time + # finish the process and set the process's burst-time 0 + cp.burst_time = 0 + # set the process's turnaround time because it is finished + cp.turnaround_time = self.current_time - cp.arrival_time + # set the completion time + cp.stop_time = self.current_time + # add the process to queue that has finished queue + finished.append(cp) + + self.finish_queue.extend(finished) # add finished process to finish queue + # FCFS will finish all remaining processes + return finished + + def round_robin( + self, ready_queue: deque[Process], time_slice: int + ) -> tuple[deque[Process], deque[Process]]: + """ + RR(Round Robin) + RR will be applied to MLFQ's all queues except last queue + All processes can't use CPU for time more than time_slice + If the process consume CPU up to time_slice, it will go back to ready queue + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> finish_queue, ready_queue = mlfq.round_robin(mlfq.ready_queue, 17) + >>> mlfq.calculate_sequence_of_finish_queue() + ['P2'] + """ + finished: deque[Process] = deque() # sequence deque of terminated process + # just for 1 cycle and unfinished processes will go back to queue + for _ in range(len(ready_queue)): + cp = ready_queue.popleft() # current process + + # if process's arrival time is later than current time, update current time + if self.current_time < cp.arrival_time: + self.current_time += cp.arrival_time + + # update waiting time of unfinished processes + self.update_waiting_time(cp) + # if the burst time of process is bigger than time-slice + if cp.burst_time > time_slice: + # use CPU for only time-slice + self.current_time += time_slice + # update remaining burst time + cp.burst_time -= time_slice + # update end point time + cp.stop_time = self.current_time + # locate the process behind the queue because it is not finished + ready_queue.append(cp) + else: + # use CPU for remaining burst time + self.current_time += cp.burst_time + # set burst time 0 because the process is finished + cp.burst_time = 0 + # set the finish time + cp.stop_time = self.current_time + # update the process' turnaround time because it is finished + cp.turnaround_time = self.current_time - cp.arrival_time + # add the process to queue that has finished queue + finished.append(cp) + + self.finish_queue.extend(finished) # add finished process to finish queue + # return finished processes queue and remaining processes queue + return finished, ready_queue + + def multi_level_feedback_queue(self) -> deque[Process]: + """ + MLFQ(Multi Level Feedback Queue) + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> finish_queue = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_sequence_of_finish_queue() + ['P2', 'P4', 'P1', 'P3'] + """ + + # all queues except last one have round_robin algorithm + for i in range(self.number_of_queues - 1): + finished, self.ready_queue = self.round_robin( + self.ready_queue, self.time_slices[i] + ) + # the last queue has first_come_first_served algorithm + self.first_come_first_served(self.ready_queue) + + return self.finish_queue + + +if __name__ == "__main__": + import doctest + + P1 = Process("P1", 0, 53) + P2 = Process("P2", 0, 17) + P3 = Process("P3", 0, 68) + P4 = Process("P4", 0, 24) + number_of_queues = 3 + time_slices = [17, 25] + queue = deque([P1, P2, P3, P4]) + + if len(time_slices) != number_of_queues - 1: + raise SystemExit(0) + + doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])}) + + P1 = Process("P1", 0, 53) + P2 = Process("P2", 0, 17) + P3 = Process("P3", 0, 68) + P4 = Process("P4", 0, 24) + number_of_queues = 3 + time_slices = [17, 25] + queue = deque([P1, P2, P3, P4]) + mlfq = MLFQ(number_of_queues, time_slices, queue, 0) + finish_queue = mlfq.multi_level_feedback_queue() + + # print total waiting times of processes(P1, P2, P3, P4) + print( + f"waiting time:\ + \t\t\t{MLFQ.calculate_waiting_time(mlfq, [P1, P2, P3, P4])}" + ) + # print completion times of processes(P1, P2, P3, P4) + print( + f"completion time:\ + \t\t{MLFQ.calculate_completion_time(mlfq, [P1, P2, P3, P4])}" + ) + # print total turnaround times of processes(P1, P2, P3, P4) + print( + f"turnaround time:\ + \t\t{MLFQ.calculate_turnaround_time(mlfq, [P1, P2, P3, P4])}" + ) + # print sequence of finished processes + print( + f"sequence of finished processes:\ + {mlfq.calculate_sequence_of_finish_queue()}" + ) \ No newline at end of file diff --git a/utbot-python/samples/test_configuration.json b/utbot-python/samples/test_configuration.json index b51ab7c6cb..1746e07ba0 100644 --- a/utbot-python/samples/test_configuration.json +++ b/utbot-python/samples/test_configuration.json @@ -32,7 +32,7 @@ "classes": null, "methods": null, "timeout": 10, - "coverage": 100 + "coverage": 85 } ] } @@ -48,7 +48,7 @@ "classes": null, "methods": null, "timeout": 10, - "coverage": 100 + "coverage": 80 } ] }, @@ -70,7 +70,7 @@ "classes": ["Dictionary"], "methods": ["translate"], "timeout": 10, - "coverage": 100 + "coverage": 89 } ] }, @@ -195,8 +195,8 @@ { "classes": null, "methods": null, - "timeout": 10, - "coverage": 100 + "timeout": 60, + "coverage": 88 } ] } @@ -211,7 +211,7 @@ { "classes": null, "methods": null, - "timeout": 10, + "timeout": 100, "coverage": 100 } ] @@ -222,8 +222,8 @@ { "classes": null, "methods": null, - "timeout": 10, - "coverage": 100 + "timeout": 30, + "coverage": 72 } ] }, @@ -233,8 +233,8 @@ { "classes": null, "methods": null, - "timeout": 10, - "coverage": 100 + "timeout": 30, + "coverage": 75 } ] } @@ -271,7 +271,7 @@ { "classes": null, "methods": null, - "timeout": 150, + "timeout": 300, "coverage": 100 } ] @@ -298,17 +298,6 @@ } ] }, - { - "name": "general", - "groups": [ - { - "classes": null, - "methods": null, - "timeout": 300, - "coverage": 100 - } - ] - }, { "name": "long_function_coverage", "groups": [ @@ -348,7 +337,7 @@ "classes": null, "methods": null, "timeout": 180, - "coverage": 100 + "coverage": 86 } ] } @@ -433,7 +422,7 @@ { "classes": null, "methods": null, - "timeout": 40, + "timeout": 60, "coverage": 100 } ] @@ -503,6 +492,17 @@ } ] }, + { + "name": "graph_matrix", + "groups": [ + { + "classes": null, + "methods": null, + "timeout": 120, + "coverage": 100 + } + ] + }, { "name": "matrix", "groups": [ @@ -513,6 +513,17 @@ "coverage": 100 } ] + }, + { + "name": "multi_level_feedback_queue", + "groups": [ + { + "classes": null, + "methods": null, + "timeout": 180, + "coverage": 100 + } + ] } ] }, @@ -559,7 +570,7 @@ "classes": ["LoggedVar"], "methods": null, "timeout": 30, - "coverage": 100 + "coverage": 80 } ] }, @@ -581,7 +592,7 @@ "classes": null, "methods": null, "timeout": 30, - "coverage": 100 + "coverage": 50 } ] }, @@ -614,7 +625,7 @@ "classes": null, "methods": null, "timeout": 40, - "coverage": 100 + "coverage": 62 } ] }