mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 16:35:54 +00:00
Refactor fake_out variable
This commit is contained in:
@@ -35,19 +35,19 @@ class BenchmarkTest(TestBase):
|
||||
benchmark = Benchmark("accuracy", visualize=False)
|
||||
benchmark.compile_results()
|
||||
benchmark.save_results()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
benchmark.report(tex_output=False)
|
||||
self.check_output_file(fake_out, "exreport_report")
|
||||
self.check_output_file(stdout, "exreport_report")
|
||||
|
||||
def test_exreport(self):
|
||||
benchmark = Benchmark("accuracy", visualize=False)
|
||||
benchmark.compile_results()
|
||||
benchmark.save_results()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
benchmark.exreport()
|
||||
with open(os.path.join(self.test_files, "exreport.test")) as f:
|
||||
expected_t = f.read()
|
||||
computed_t = fake_out.getvalue()
|
||||
computed_t = stdout.getvalue()
|
||||
computed_t = computed_t.split("\n")
|
||||
computed_t.pop(0)
|
||||
for computed, expected in zip(computed_t, expected_t.split("\n")):
|
||||
@@ -71,17 +71,17 @@ class BenchmarkTest(TestBase):
|
||||
benchmark = Benchmark("unknown", visualize=False)
|
||||
benchmark.compile_results()
|
||||
benchmark.save_results()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
benchmark.exreport()
|
||||
self.check_output_file(fake_out, "exreport_error")
|
||||
self.check_output_file(stdout, "exreport_error")
|
||||
|
||||
def test_tex_output(self):
|
||||
benchmark = Benchmark("accuracy", visualize=False)
|
||||
benchmark.compile_results()
|
||||
benchmark.save_results()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
benchmark.report(tex_output=True)
|
||||
self.check_output_file(fake_out, "exreport_report")
|
||||
self.check_output_file(stdout, "exreport_report")
|
||||
self.assertTrue(os.path.exists(benchmark.get_tex_file()))
|
||||
self.check_file_file(benchmark.get_tex_file(), "exreport_tex")
|
||||
|
||||
|
@@ -18,32 +18,32 @@ class PairCheckTest(TestBase):
|
||||
def test_pair_check(self):
|
||||
report = self.build_model(model1="ODTE", model2="STree")
|
||||
report.compute()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "paircheck")
|
||||
self.check_output_file(stdout, "paircheck")
|
||||
|
||||
def test_pair_check_win(self):
|
||||
report = self.build_model(win=True)
|
||||
report.compute()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "paircheck_win")
|
||||
self.check_output_file(stdout, "paircheck_win")
|
||||
|
||||
def test_pair_check_lose(self):
|
||||
report = self.build_model(
|
||||
model1="RandomForest", model2="STree", lose=True
|
||||
)
|
||||
report.compute()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "paircheck_lose")
|
||||
self.check_output_file(stdout, "paircheck_lose")
|
||||
|
||||
def test_pair_check_win_lose(self):
|
||||
report = self.build_model(win=True, lose=True)
|
||||
report.compute()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "paircheck_win_lose")
|
||||
self.check_output_file(stdout, "paircheck_win_lose")
|
||||
|
||||
def test_pair_check_store_result(self):
|
||||
report = self.build_model(win=True, lose=True)
|
||||
|
@@ -25,18 +25,18 @@ class ReportTest(TestBase):
|
||||
"results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json",
|
||||
)
|
||||
)
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "report")
|
||||
self.check_output_file(stdout, "report")
|
||||
|
||||
def test_report_without_folder(self):
|
||||
report = Report(
|
||||
file_name="results_accuracy_STree_iMac27_2021-09-30_11:42:07_0"
|
||||
".json"
|
||||
)
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "report")
|
||||
self.check_output_file(stdout, "report")
|
||||
|
||||
def test_report_compared(self):
|
||||
report = Report(
|
||||
@@ -44,9 +44,9 @@ class ReportTest(TestBase):
|
||||
".json",
|
||||
compare=True,
|
||||
)
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "report_compared")
|
||||
self.check_output_file(stdout, "report_compared")
|
||||
|
||||
def test_compute_status(self):
|
||||
file_name = "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
|
||||
@@ -67,21 +67,21 @@ class ReportTest(TestBase):
|
||||
|
||||
def test_report_best(self):
|
||||
report = ReportBest("accuracy", "STree", best=True, grid=False)
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "report_best")
|
||||
self.check_output_file(stdout, "report_best")
|
||||
|
||||
def test_report_grid(self):
|
||||
report = ReportBest("accuracy", "STree", best=False, grid=True)
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "report_grid")
|
||||
self.check_output_file(stdout, "report_grid")
|
||||
|
||||
def test_report_best_both(self):
|
||||
report = ReportBest("accuracy", "STree", best=True, grid=True)
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.report()
|
||||
self.check_output_file(fake_out, "report_best")
|
||||
self.check_output_file(stdout, "report_best")
|
||||
|
||||
@patch("sys.stdout", new_callable=StringIO)
|
||||
def test_report_datasets(self, mock_output):
|
||||
|
@@ -130,60 +130,60 @@ class SummaryTest(TestBase):
|
||||
def test_summary_list_results_model(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.list_results(model="STree")
|
||||
self.check_output_file(fake_out, "summary_list_model")
|
||||
self.check_output_file(stdout, "summary_list_model")
|
||||
|
||||
def test_summary_list_results_score(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.list_results(score="accuracy")
|
||||
self.check_output_file(fake_out, "summary_list_score")
|
||||
self.check_output_file(stdout, "summary_list_score")
|
||||
|
||||
def test_summary_list_results_n(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.list_results(score="accuracy", number=3)
|
||||
self.check_output_file(fake_out, "summary_list_n")
|
||||
self.check_output_file(stdout, "summary_list_n")
|
||||
|
||||
def test_summary_list_hidden(self):
|
||||
report = Summary(hidden=True)
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.list_results(score="accuracy")
|
||||
self.check_output_file(fake_out, "summary_list_hidden")
|
||||
self.check_output_file(stdout, "summary_list_hidden")
|
||||
|
||||
def test_show_result_no_title(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
title = ""
|
||||
best = report.best_result(
|
||||
criterion="model", value="STree", score="accuracy"
|
||||
)
|
||||
report.show_result(data=best, title=title)
|
||||
self.check_output_file(fake_out, "summary_show_results")
|
||||
self.check_output_file(stdout, "summary_show_results")
|
||||
|
||||
def test_show_result_title(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
title = "**Title**"
|
||||
best = report.best_result(
|
||||
criterion="model", value="STree", score="accuracy"
|
||||
)
|
||||
report.show_result(data=best, title=title)
|
||||
self.check_output_file(fake_out, "summary_show_results_title")
|
||||
self.check_output_file(stdout, "summary_show_results_title")
|
||||
|
||||
def test_show_result_no_data(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
title = "**Test**"
|
||||
report.show_result(data={}, title=title)
|
||||
computed = fake_out.getvalue()
|
||||
computed = stdout.getvalue()
|
||||
expected = "** **Test** has No data **\n"
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
@@ -212,16 +212,16 @@ class SummaryTest(TestBase):
|
||||
def test_show_top(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch(self.output, new=StringIO()) as fake_out:
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
report.show_top()
|
||||
self.check_output_file(fake_out, "summary_show_top")
|
||||
self.check_output_file(stdout, "summary_show_top")
|
||||
|
||||
@patch("sys.stdout", new_callable=StringIO)
|
||||
def test_show_top_no_data(self, fake_out):
|
||||
def test_show_top_no_data(self, stdout):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
report.show_top(score="f1-macro")
|
||||
self.assertEqual(fake_out.getvalue(), "** No results found **\n")
|
||||
self.assertEqual(stdout.getvalue(), "** No results found **\n")
|
||||
|
||||
def test_no_data(self):
|
||||
report = Summary()
|
||||
|
@@ -129,7 +129,11 @@ class UtilTest(TestBase):
|
||||
)
|
||||
self.assertCountEqual(
|
||||
Files().get_all_results(hidden=True),
|
||||
["results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json"],
|
||||
[
|
||||
"results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json",
|
||||
"results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_"
|
||||
"0.json",
|
||||
],
|
||||
)
|
||||
|
||||
def test_Files_get_results_Error(self):
|
||||
|
File diff suppressed because one or more lines are too long
@@ -1,3 +1,4 @@
|
||||
[92mDate File Score Time(h) Title
|
||||
========== ======================================================== ======== ======= =======
|
||||
[93m2021-11-01 results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json 0.97446 0.098 default
|
||||
[92mDate File Score Time(h) Title
|
||||
========== ================================================================ ======== ======= =======================
|
||||
[93m2022-05-04 results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json nan 3.091 Default hyperparameters
|
||||
[92m2021-11-01 results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json 0.97446 0.098 default
|
||||
|
Reference in New Issue
Block a user