diff --git a/benchmark/tests/BestResults_test.py b/benchmark/tests/BestResults_test.py index 6ee0bb1..f6a4b32 100644 --- a/benchmark/tests/BestResults_test.py +++ b/benchmark/tests/BestResults_test.py @@ -8,7 +8,7 @@ class BestResultTest(TestBase): expected = { "balance-scale": [ 0.98, - {"splitter": "iwss", "max_features": "auto"}, + {"splitter": "best", "max_features": "auto"}, "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json", ], "balloons": [ diff --git a/benchmark/tests/Experiment_test.py b/benchmark/tests/Experiment_test.py index 53df765..ca5b37f 100644 --- a/benchmark/tests/Experiment_test.py +++ b/benchmark/tests/Experiment_test.py @@ -36,7 +36,7 @@ class ExperimentTest(TestBase): expected = { "balance-scale": [ 0.98, - {"splitter": "iwss", "max_features": "auto"}, + {"splitter": "best", "max_features": "auto"}, "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json", ], "balloons": [ diff --git a/benchmark/tests/results/best_results_accuracy_STree.json b/benchmark/tests/results/best_results_accuracy_STree.json index ece4a7a..1cbd82e 100644 --- a/benchmark/tests/results/best_results_accuracy_STree.json +++ b/benchmark/tests/results/best_results_accuracy_STree.json @@ -1 +1 @@ -{"balance-scale": [0.98, {"splitter": "iwss", "max_features": "auto"}, "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"], "balloons": [0.86, {"C": 7, "gamma": 0.1, "kernel": "rbf", "max_iter": 10000.0, "multiclass_strategy": "ovr"}, "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"]} \ No newline at end of file +{"balance-scale": [0.98, {"splitter": "best", "max_features": "auto"}, "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"], "balloons": [0.86, {"C": 7, "gamma": 0.1, "kernel": "rbf", "max_iter": 10000.0, "multiclass_strategy": "ovr"}, "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"]} \ No newline at end of file diff --git a/benchmark/tests/results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json b/benchmark/tests/results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json index bdf4003..e5fd58e 100644 --- a/benchmark/tests/results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json +++ b/benchmark/tests/results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json @@ -15,7 +15,7 @@ "features": 4, "classes": 3, "hyperparameters": { - "splitter": "iwss", + "splitter": "best", "max_features": "auto" }, "nodes": 11.08, @@ -32,7 +32,7 @@ "features": 4, "classes": 2, "hyperparameters": { - "splitter": "iwss", + "splitter": "best", "max_features": "auto" }, "nodes": 4.12, diff --git a/benchmark/tests/scripts/Be_Main_test.py b/benchmark/tests/scripts/Be_Main_test.py index 8b4ec8d..95f205a 100644 --- a/benchmark/tests/scripts/Be_Main_test.py +++ b/benchmark/tests/scripts/Be_Main_test.py @@ -1,5 +1,8 @@ import os +from io import StringIO +from unittest.mock import patch from ...Utils import Folders +from ...Results import Report from ..TestBase import TestBase @@ -7,26 +10,16 @@ class BeMainTest(TestBase): def setUp(self): self.prepare_scripts_env() self.score = "accuracy" + self.files = [] def tearDown(self) -> None: - files = [] - - self.remove_files(files, Folders.exreport) + self.remove_files(self.files, ".") return super().tearDown() def test_be_benchmark_dataset(self): stdout, _ = self.execute_script( "be_main", - [ - "-s", - self.score, - "-m", - "STree", - "-d", - "balloons", - "--title", - "test", - ], + ["-m", "STree", "-d", "balloons", "--title", "test"], ) with open(os.path.join(self.test_files, "be_main_dataset.test")) as f: expected = f.read() @@ -40,6 +33,48 @@ class BeMainTest(TestBase): self.assertEqual(computed, expected, n_line) n_line += 1 + def test_be_benchmark_complete(self): + stdout, _ = self.execute_script( + "be_main", + ["-s", self.score, "-m", "STree", "--title", "test", "-r", "1"], + ) + with open(os.path.join(self.test_files, "be_main_complete.test")) as f: + expected = f.read() + n_line = 0 + # keep the report name to delete it after + self.files.append(stdout.getvalue().splitlines()[-1].split("in ")[1]) + # compare only report lines without date, time, duration... + lines_to_compare = [0, 2, 3, 5, 6, 7, 8, 9, 12, 13, 14] + for expected, computed in zip( + expected.splitlines(), stdout.getvalue().splitlines() + ): + if n_line in lines_to_compare: + self.assertEqual(computed, expected, n_line) + n_line += 1 + + def test_be_benchmark_no_report(self): + stdout, _ = self.execute_script( + "be_main", + ["-s", self.score, "-m", "STree", "--title", "test"], + ) + with open(os.path.join(self.test_files, "be_main_complete.test")) as f: + expected = f.read() + # keep the report name to delete it after + report_name = stdout.getvalue().splitlines()[-1].split("in ")[1] + self.files.append(report_name) + report = Report(file_name=report_name) + with patch(self.output, new=StringIO()) as stdout: + report.report() + # compare only report lines without date, time, duration... + lines_to_compare = [0, 2, 3, 5, 6, 7, 8, 9, 12, 13, 14] + n_line = 0 + for expected, computed in zip( + expected.splitlines(), stdout.getvalue().splitlines() + ): + if n_line in lines_to_compare: + self.assertEqual(computed, expected, n_line) + n_line += 1 + def test_be_benchmark_no_data(self): stdout, _ = self.execute_script( "be_main", ["-m", "STree", "-d", "unknown", "--title", "test"]