mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-18 17:05:54 +00:00
Refactor fake_out variable
This commit is contained in:
@@ -35,19 +35,19 @@ class BenchmarkTest(TestBase):
|
|||||||
benchmark = Benchmark("accuracy", visualize=False)
|
benchmark = Benchmark("accuracy", visualize=False)
|
||||||
benchmark.compile_results()
|
benchmark.compile_results()
|
||||||
benchmark.save_results()
|
benchmark.save_results()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
benchmark.report(tex_output=False)
|
benchmark.report(tex_output=False)
|
||||||
self.check_output_file(fake_out, "exreport_report")
|
self.check_output_file(stdout, "exreport_report")
|
||||||
|
|
||||||
def test_exreport(self):
|
def test_exreport(self):
|
||||||
benchmark = Benchmark("accuracy", visualize=False)
|
benchmark = Benchmark("accuracy", visualize=False)
|
||||||
benchmark.compile_results()
|
benchmark.compile_results()
|
||||||
benchmark.save_results()
|
benchmark.save_results()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
benchmark.exreport()
|
benchmark.exreport()
|
||||||
with open(os.path.join(self.test_files, "exreport.test")) as f:
|
with open(os.path.join(self.test_files, "exreport.test")) as f:
|
||||||
expected_t = f.read()
|
expected_t = f.read()
|
||||||
computed_t = fake_out.getvalue()
|
computed_t = stdout.getvalue()
|
||||||
computed_t = computed_t.split("\n")
|
computed_t = computed_t.split("\n")
|
||||||
computed_t.pop(0)
|
computed_t.pop(0)
|
||||||
for computed, expected in zip(computed_t, expected_t.split("\n")):
|
for computed, expected in zip(computed_t, expected_t.split("\n")):
|
||||||
@@ -71,17 +71,17 @@ class BenchmarkTest(TestBase):
|
|||||||
benchmark = Benchmark("unknown", visualize=False)
|
benchmark = Benchmark("unknown", visualize=False)
|
||||||
benchmark.compile_results()
|
benchmark.compile_results()
|
||||||
benchmark.save_results()
|
benchmark.save_results()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
benchmark.exreport()
|
benchmark.exreport()
|
||||||
self.check_output_file(fake_out, "exreport_error")
|
self.check_output_file(stdout, "exreport_error")
|
||||||
|
|
||||||
def test_tex_output(self):
|
def test_tex_output(self):
|
||||||
benchmark = Benchmark("accuracy", visualize=False)
|
benchmark = Benchmark("accuracy", visualize=False)
|
||||||
benchmark.compile_results()
|
benchmark.compile_results()
|
||||||
benchmark.save_results()
|
benchmark.save_results()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
benchmark.report(tex_output=True)
|
benchmark.report(tex_output=True)
|
||||||
self.check_output_file(fake_out, "exreport_report")
|
self.check_output_file(stdout, "exreport_report")
|
||||||
self.assertTrue(os.path.exists(benchmark.get_tex_file()))
|
self.assertTrue(os.path.exists(benchmark.get_tex_file()))
|
||||||
self.check_file_file(benchmark.get_tex_file(), "exreport_tex")
|
self.check_file_file(benchmark.get_tex_file(), "exreport_tex")
|
||||||
|
|
||||||
|
@@ -18,32 +18,32 @@ class PairCheckTest(TestBase):
|
|||||||
def test_pair_check(self):
|
def test_pair_check(self):
|
||||||
report = self.build_model(model1="ODTE", model2="STree")
|
report = self.build_model(model1="ODTE", model2="STree")
|
||||||
report.compute()
|
report.compute()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "paircheck")
|
self.check_output_file(stdout, "paircheck")
|
||||||
|
|
||||||
def test_pair_check_win(self):
|
def test_pair_check_win(self):
|
||||||
report = self.build_model(win=True)
|
report = self.build_model(win=True)
|
||||||
report.compute()
|
report.compute()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "paircheck_win")
|
self.check_output_file(stdout, "paircheck_win")
|
||||||
|
|
||||||
def test_pair_check_lose(self):
|
def test_pair_check_lose(self):
|
||||||
report = self.build_model(
|
report = self.build_model(
|
||||||
model1="RandomForest", model2="STree", lose=True
|
model1="RandomForest", model2="STree", lose=True
|
||||||
)
|
)
|
||||||
report.compute()
|
report.compute()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "paircheck_lose")
|
self.check_output_file(stdout, "paircheck_lose")
|
||||||
|
|
||||||
def test_pair_check_win_lose(self):
|
def test_pair_check_win_lose(self):
|
||||||
report = self.build_model(win=True, lose=True)
|
report = self.build_model(win=True, lose=True)
|
||||||
report.compute()
|
report.compute()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "paircheck_win_lose")
|
self.check_output_file(stdout, "paircheck_win_lose")
|
||||||
|
|
||||||
def test_pair_check_store_result(self):
|
def test_pair_check_store_result(self):
|
||||||
report = self.build_model(win=True, lose=True)
|
report = self.build_model(win=True, lose=True)
|
||||||
|
@@ -25,18 +25,18 @@ class ReportTest(TestBase):
|
|||||||
"results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json",
|
"results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "report")
|
self.check_output_file(stdout, "report")
|
||||||
|
|
||||||
def test_report_without_folder(self):
|
def test_report_without_folder(self):
|
||||||
report = Report(
|
report = Report(
|
||||||
file_name="results_accuracy_STree_iMac27_2021-09-30_11:42:07_0"
|
file_name="results_accuracy_STree_iMac27_2021-09-30_11:42:07_0"
|
||||||
".json"
|
".json"
|
||||||
)
|
)
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "report")
|
self.check_output_file(stdout, "report")
|
||||||
|
|
||||||
def test_report_compared(self):
|
def test_report_compared(self):
|
||||||
report = Report(
|
report = Report(
|
||||||
@@ -44,9 +44,9 @@ class ReportTest(TestBase):
|
|||||||
".json",
|
".json",
|
||||||
compare=True,
|
compare=True,
|
||||||
)
|
)
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "report_compared")
|
self.check_output_file(stdout, "report_compared")
|
||||||
|
|
||||||
def test_compute_status(self):
|
def test_compute_status(self):
|
||||||
file_name = "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
|
file_name = "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
|
||||||
@@ -67,21 +67,21 @@ class ReportTest(TestBase):
|
|||||||
|
|
||||||
def test_report_best(self):
|
def test_report_best(self):
|
||||||
report = ReportBest("accuracy", "STree", best=True, grid=False)
|
report = ReportBest("accuracy", "STree", best=True, grid=False)
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "report_best")
|
self.check_output_file(stdout, "report_best")
|
||||||
|
|
||||||
def test_report_grid(self):
|
def test_report_grid(self):
|
||||||
report = ReportBest("accuracy", "STree", best=False, grid=True)
|
report = ReportBest("accuracy", "STree", best=False, grid=True)
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "report_grid")
|
self.check_output_file(stdout, "report_grid")
|
||||||
|
|
||||||
def test_report_best_both(self):
|
def test_report_best_both(self):
|
||||||
report = ReportBest("accuracy", "STree", best=True, grid=True)
|
report = ReportBest("accuracy", "STree", best=True, grid=True)
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.report()
|
report.report()
|
||||||
self.check_output_file(fake_out, "report_best")
|
self.check_output_file(stdout, "report_best")
|
||||||
|
|
||||||
@patch("sys.stdout", new_callable=StringIO)
|
@patch("sys.stdout", new_callable=StringIO)
|
||||||
def test_report_datasets(self, mock_output):
|
def test_report_datasets(self, mock_output):
|
||||||
|
@@ -130,60 +130,60 @@ class SummaryTest(TestBase):
|
|||||||
def test_summary_list_results_model(self):
|
def test_summary_list_results_model(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.list_results(model="STree")
|
report.list_results(model="STree")
|
||||||
self.check_output_file(fake_out, "summary_list_model")
|
self.check_output_file(stdout, "summary_list_model")
|
||||||
|
|
||||||
def test_summary_list_results_score(self):
|
def test_summary_list_results_score(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.list_results(score="accuracy")
|
report.list_results(score="accuracy")
|
||||||
self.check_output_file(fake_out, "summary_list_score")
|
self.check_output_file(stdout, "summary_list_score")
|
||||||
|
|
||||||
def test_summary_list_results_n(self):
|
def test_summary_list_results_n(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.list_results(score="accuracy", number=3)
|
report.list_results(score="accuracy", number=3)
|
||||||
self.check_output_file(fake_out, "summary_list_n")
|
self.check_output_file(stdout, "summary_list_n")
|
||||||
|
|
||||||
def test_summary_list_hidden(self):
|
def test_summary_list_hidden(self):
|
||||||
report = Summary(hidden=True)
|
report = Summary(hidden=True)
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.list_results(score="accuracy")
|
report.list_results(score="accuracy")
|
||||||
self.check_output_file(fake_out, "summary_list_hidden")
|
self.check_output_file(stdout, "summary_list_hidden")
|
||||||
|
|
||||||
def test_show_result_no_title(self):
|
def test_show_result_no_title(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
title = ""
|
title = ""
|
||||||
best = report.best_result(
|
best = report.best_result(
|
||||||
criterion="model", value="STree", score="accuracy"
|
criterion="model", value="STree", score="accuracy"
|
||||||
)
|
)
|
||||||
report.show_result(data=best, title=title)
|
report.show_result(data=best, title=title)
|
||||||
self.check_output_file(fake_out, "summary_show_results")
|
self.check_output_file(stdout, "summary_show_results")
|
||||||
|
|
||||||
def test_show_result_title(self):
|
def test_show_result_title(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
title = "**Title**"
|
title = "**Title**"
|
||||||
best = report.best_result(
|
best = report.best_result(
|
||||||
criterion="model", value="STree", score="accuracy"
|
criterion="model", value="STree", score="accuracy"
|
||||||
)
|
)
|
||||||
report.show_result(data=best, title=title)
|
report.show_result(data=best, title=title)
|
||||||
self.check_output_file(fake_out, "summary_show_results_title")
|
self.check_output_file(stdout, "summary_show_results_title")
|
||||||
|
|
||||||
def test_show_result_no_data(self):
|
def test_show_result_no_data(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
title = "**Test**"
|
title = "**Test**"
|
||||||
report.show_result(data={}, title=title)
|
report.show_result(data={}, title=title)
|
||||||
computed = fake_out.getvalue()
|
computed = stdout.getvalue()
|
||||||
expected = "** **Test** has No data **\n"
|
expected = "** **Test** has No data **\n"
|
||||||
self.assertEqual(computed, expected)
|
self.assertEqual(computed, expected)
|
||||||
|
|
||||||
@@ -212,16 +212,16 @@ class SummaryTest(TestBase):
|
|||||||
def test_show_top(self):
|
def test_show_top(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
with patch(self.output, new=StringIO()) as fake_out:
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
report.show_top()
|
report.show_top()
|
||||||
self.check_output_file(fake_out, "summary_show_top")
|
self.check_output_file(stdout, "summary_show_top")
|
||||||
|
|
||||||
@patch("sys.stdout", new_callable=StringIO)
|
@patch("sys.stdout", new_callable=StringIO)
|
||||||
def test_show_top_no_data(self, fake_out):
|
def test_show_top_no_data(self, stdout):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
report.acquire()
|
report.acquire()
|
||||||
report.show_top(score="f1-macro")
|
report.show_top(score="f1-macro")
|
||||||
self.assertEqual(fake_out.getvalue(), "** No results found **\n")
|
self.assertEqual(stdout.getvalue(), "** No results found **\n")
|
||||||
|
|
||||||
def test_no_data(self):
|
def test_no_data(self):
|
||||||
report = Summary()
|
report = Summary()
|
||||||
|
@@ -129,7 +129,11 @@ class UtilTest(TestBase):
|
|||||||
)
|
)
|
||||||
self.assertCountEqual(
|
self.assertCountEqual(
|
||||||
Files().get_all_results(hidden=True),
|
Files().get_all_results(hidden=True),
|
||||||
["results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json"],
|
[
|
||||||
|
"results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json",
|
||||||
|
"results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_"
|
||||||
|
"0.json",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_Files_get_results_Error(self):
|
def test_Files_get_results_Error(self):
|
||||||
|
File diff suppressed because one or more lines are too long
@@ -1,3 +1,4 @@
|
|||||||
[92mDate File Score Time(h) Title
|
[92mDate File Score Time(h) Title
|
||||||
========== ======================================================== ======== ======= =======
|
========== ================================================================ ======== ======= =======================
|
||||||
[93m2021-11-01 results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json 0.97446 0.098 default
|
[93m2022-05-04 results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json nan 3.091 Default hyperparameters
|
||||||
|
[92m2021-11-01 results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json 0.97446 0.098 default
|
||||||
|
Reference in New Issue
Block a user