From c10bf27a16caf4ac386581758b0cdac6e4955b64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Montan=CC=83ana?= Date: Mon, 22 May 2023 11:15:19 +0200 Subject: [PATCH] Fix tests --- benchmark/Manager.py | 147 +++++++++--------- benchmark/tests/Report_test.py | 21 +++ benchmark/tests/scripts/Be_List_test.py | 6 + .../test_files/be_list_compare_fault.test | 8 + 4 files changed, 112 insertions(+), 70 deletions(-) create mode 100644 benchmark/tests/test_files/be_list_compare_fault.test diff --git a/benchmark/Manager.py b/benchmark/Manager.py index cf0f453..07293aa 100644 --- a/benchmark/Manager.py +++ b/benchmark/Manager.py @@ -15,48 +15,48 @@ def get_input(message="", is_test=False): class Manage: def __init__(self, summary): self.summary = summary + self.cmd = SimpleNamespace( + quit="q", relist="r", delete="d", hide="h", excel="e" + ) + + def process_file(self, num, command, path): + num = int(num) + name = self.summary.data_filtered[num]["file"] + file_name_result = os.path.join(path, name) + verb1, verb2 = ( + ("delete", "Deleting") + if command == self.cmd.delete + else ( + "hide", + "Hiding", + ) + ) + conf_message = ( + TextColor.RED + + f"Are you sure to {verb1} {file_name_result} (y/n)? " + ) + confirm = get_input(message=conf_message) + if confirm == "y": + print(TextColor.YELLOW + f"{verb2} {file_name_result}") + if command == self.cmd.delete: + os.unlink(file_name_result) + else: + os.rename( + os.path.join(Folders.results, name), + os.path.join(Folders.hidden_results, name), + ) + self.summary.data_filtered.pop(num) + get_input(message="Press enter to continue") + self.summary.list_results() def manage_results(self): """Manage results showed in the summary return True if excel file is created False otherwise """ - def process_file(num, command, path): - num = int(num) - name = self.summary.data_filtered[num]["file"] - file_name_result = os.path.join(path, name) - verb1, verb2 = ( - ("delete", "Deleting") - if command == cmd.delete - else ( - "hide", - "Hiding", - ) - ) - conf_message = ( - TextColor.RED - + f"Are you sure to {verb1} {file_name_result} (y/n)? " - ) - confirm = get_input(message=conf_message) - if confirm == "y": - print(TextColor.YELLOW + f"{verb2} {file_name_result}") - if command == cmd.delete: - os.unlink(file_name_result) - else: - os.rename( - os.path.join(Folders.results, name), - os.path.join(Folders.hidden_results, name), - ) - self.summary.data_filtered.pop(num) - get_input(message="Press enter to continue") - self.summary.list_results() - - cmd = SimpleNamespace( - quit="q", relist="r", delete="d", hide="h", excel="e" - ) message = ( TextColor.ENDC - + f"Choose option {str(cmd).replace('namespace', '')}: " + + f"Choose option {str(self.cmd).replace('namespace', '')}: " ) path = ( Folders.hidden_results if self.summary.hidden else Folders.results @@ -65,56 +65,63 @@ class Manage: max_value = len(self.summary.data_filtered) while True: match get_input(message=message).split(): - case [cmd.relist]: + case [self.cmd.relist]: self.summary.list_results() - case [cmd.quit]: + case [self.cmd.quit]: if book is not None: book.close() return True return False - case [cmd.hide, num] if num.isdigit() and int(num) < max_value: + case [self.cmd.hide, num] if num.isdigit() and int( + num + ) < max_value: if self.summary.hidden: print("Already hidden") else: - process_file(num, path=path, command=cmd.hide) - case [cmd.delete, num] if num.isdigit() and int( + self.process_file( + num, path=path, command=self.cmd.hide + ) + case [self.cmd.delete, num] if num.isdigit() and int( num ) < max_value: - process_file(num=num, path=path, command=cmd.delete) - case [cmd.excel, num] if num.isdigit() and int( + self.process_file( + num=num, path=path, command=self.cmd.delete + ) + case [self.cmd.excel, num] if num.isdigit() and int( num ) < max_value: # Add to excel file result #num - num = int(num) - file_name_result = os.path.join( - path, self.summary.data_filtered[num]["file"] - ) - if book is None: - file_name = os.path.join( - Folders.excel, Files.be_list_excel - ) - book = xlsxwriter.Workbook( - file_name, {"nan_inf_to_errors": True} - ) - excel = Excel( - file_name=file_name_result, - book=book, - compare=self.summary.compare, - ) - excel.report() - print(f"Added {file_name_result} to {Files.be_list_excel}") + book = self.add_to_excel(num, path, book) case [num] if num.isdigit() and int(num) < max_value: # Report the result #num - num = int(num) - file_name_result = os.path.join( - path, self.summary.data_filtered[num]["file"] - ) - try: - rep = Report( - file_name_result, compare=self.summary.compare - ) - rep.report() - except ValueError as e: - print(e) + self.report(num, path) case _: print("Invalid option. Try again!") + + def report(self, num, path): + num = int(num) + file_name_result = os.path.join( + path, self.summary.data_filtered[num]["file"] + ) + try: + rep = Report(file_name_result, compare=self.summary.compare) + rep.report() + except ValueError as e: + print(e) + + def add_to_excel(self, num, path, book): + num = int(num) + file_name_result = os.path.join( + path, self.summary.data_filtered[num]["file"] + ) + if book is None: + file_name = os.path.join(Folders.excel, Files.be_list_excel) + book = xlsxwriter.Workbook(file_name, {"nan_inf_to_errors": True}) + excel = Excel( + file_name=file_name_result, + book=book, + compare=self.summary.compare, + ) + excel.report() + print(f"Added {file_name_result} to {Files.be_list_excel}") + return book diff --git a/benchmark/tests/Report_test.py b/benchmark/tests/Report_test.py index 3390ca7..884082d 100644 --- a/benchmark/tests/Report_test.py +++ b/benchmark/tests/Report_test.py @@ -66,6 +66,27 @@ class ReportTest(TestBase): self.assertEqual(res, Symbols.better_best) res = report._compute_status("balloons", 1.0) self.assertEqual(res, Symbols.better_best) + report = Report(file_name=file_name) + with patch(self.output, new=StringIO()): + report.report() + res = report._compute_status("balloons", 0.99) + self.assertEqual(res, Symbols.upward_arrow) + report.margin = 0.9 + res = report._compute_status("balloons", 0.99) + self.assertEqual(res, Symbols.cross) + + def test_reportbase_compute_status(self): + with patch.multiple(BaseReport, __abstractmethods__=set()): + file_name = os.path.join( + "results", + "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json", + ) + temp = BaseReport(file_name) + temp.compare = False + temp._compare_totals = {} + temp.score_name = "f1" + res = temp._compute_status("balloons", 0.99) + self.assertEqual(res, " ") def test_report_file_not_found(self): with self.assertRaises(FileNotFoundError): diff --git a/benchmark/tests/scripts/Be_List_test.py b/benchmark/tests/scripts/Be_List_test.py index 76f47cc..519907b 100644 --- a/benchmark/tests/scripts/Be_List_test.py +++ b/benchmark/tests/scripts/Be_List_test.py @@ -143,6 +143,12 @@ class BeListTest(TestBase): self.assertEqual(stderr.getvalue(), "") self.check_output_file(stdout, "be_list_hidden") + @patch("benchmark.Manager.get_input", side_effect=iter(["0", "q"])) + def test_be_list_compare(self, input_data): + stdout, stderr = self.execute_script("be_list", ["--compare"]) + self.assertEqual(stderr.getvalue(), "") + self.check_output_file(stdout, "be_list_compare_fault") + def test_be_no_env(self): path = os.getcwd() os.chdir("..") diff --git a/benchmark/tests/test_files/be_list_compare_fault.test b/benchmark/tests/test_files/be_list_compare_fault.test new file mode 100644 index 0000000..8e0dc25 --- /dev/null +++ b/benchmark/tests/test_files/be_list_compare_fault.test @@ -0,0 +1,8 @@ + # Date File Score Time(h) Title +=== ========== =============================================================== ======== ======= ============================================ + 0 2022-04-20 results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json 0.04341 6.275 Gridsearched hyperparams v022.1b random_init + 1 2022-01-14 results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json 0.03627 0.076 Test default paramters with RandomForest + 2 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B + 3 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A + 4 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters +results/best_results_accuracy_ODTE.json does not exist