From 2c8646c8d8533c4388a31e072035ebd5625d7b84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Montan=CC=83ana?= Date: Sun, 8 May 2022 16:06:14 +0200 Subject: [PATCH] Begin be_benchmark test --- benchmark/Experiments.py | 4 +-- benchmark/Results.py | 13 +++++++--- benchmark/Utils.py | 1 + benchmark/scripts/be_benchmark.py | 26 +++++++++++--------- benchmark/tests/Benchmark_test.py | 8 ++++++ benchmark/tests/Summary_test.py | 3 ++- benchmark/tests/__init__.py | 2 ++ benchmark/tests/scripts/Be_Benchmark_test.py | 24 ++++++++++++++++++ 8 files changed, 64 insertions(+), 17 deletions(-) create mode 100644 benchmark/tests/scripts/Be_Benchmark_test.py diff --git a/benchmark/Experiments.py b/benchmark/Experiments.py index 8cea570..cc4e066 100644 --- a/benchmark/Experiments.py +++ b/benchmark/Experiments.py @@ -13,7 +13,7 @@ from sklearn.model_selection import ( GridSearchCV, cross_validate, ) -from .Utils import Folders, Files +from .Utils import Folders, Files, NO_RESULTS from .Models import Models from .Arguments import EnvData @@ -157,7 +157,7 @@ class BestResults: self._process_datafile(results, data, name) found = True if not found: - raise ValueError("** No results found **") + raise ValueError(NO_RESULTS) # Build best results json file output = {} datasets = Datasets() diff --git a/benchmark/Results.py b/benchmark/Results.py index 766079f..c365118 100644 --- a/benchmark/Results.py +++ b/benchmark/Results.py @@ -8,7 +8,14 @@ import subprocess import xlsxwriter import numpy as np from .Experiments import Datasets, BestResults -from .Utils import Folders, Files, Symbols, BEST_ACCURACY_STREE, TextColor +from .Utils import ( + Folders, + Files, + Symbols, + BEST_ACCURACY_STREE, + TextColor, + NO_RESULTS, +) class BaseReport(abc.ABC): @@ -1140,7 +1147,7 @@ class Summary: score, model, input_data, sort_key, number ) if data == []: - raise ValueError("** No results found **") + raise ValueError(NO_RESULTS) max_file = max(len(x["file"]) for x in data) max_title = max(len(x["title"]) for x in data) if self.hidden: @@ -1278,7 +1285,7 @@ class Summary: else [x for x in haystack if x[criterion] == value] ) if haystack == []: - raise ValueError("** No results found **") + raise ValueError(NO_RESULTS) return ( sorted( haystack, diff --git a/benchmark/Utils.py b/benchmark/Utils.py index 842099b..57a646f 100644 --- a/benchmark/Utils.py +++ b/benchmark/Utils.py @@ -2,6 +2,7 @@ import os import subprocess BEST_ACCURACY_STREE = 40.282203 +NO_RESULTS = "** No results found **" class Folders: diff --git a/benchmark/scripts/be_benchmark.py b/benchmark/scripts/be_benchmark.py index 81b562c..8440c1b 100755 --- a/benchmark/scripts/be_benchmark.py +++ b/benchmark/scripts/be_benchmark.py @@ -7,14 +7,18 @@ from benchmark.Arguments import Arguments def main(args_test=None): arguments = Arguments() arguments.xset("score").xset("excel").xset("tex_output") - ar = arguments.parse(args_test) - benchmark = Benchmark(score=ar.score, visualize=True) - benchmark.compile_results() - benchmark.save_results() - benchmark.report(ar.tex_output) - benchmark.exreport() - if ar.excel: - benchmark.excel() - Files.open(benchmark.get_excel_file_name()) - if ar.tex_output: - print(f"File {benchmark.get_tex_file()} generated") + args = arguments.parse(args_test) + benchmark = Benchmark(score=args.score, visualize=True) + try: + benchmark.compile_results() + except ValueError as e: + print(e) + else: + benchmark.save_results() + benchmark.report(args.tex_output) + benchmark.exreport() + if args.excel: + benchmark.excel() + Files.open(benchmark.get_excel_file_name()) + if args.tex_output: + print(f"File {benchmark.get_tex_file()} generated") diff --git a/benchmark/tests/Benchmark_test.py b/benchmark/tests/Benchmark_test.py index 21c7aa9..0d70c75 100644 --- a/benchmark/tests/Benchmark_test.py +++ b/benchmark/tests/Benchmark_test.py @@ -75,6 +75,14 @@ class BenchmarkTest(TestBase): benchmark.exreport() self.check_output_file(stdout, "exreport_error") + def test_exreport_no_data(self): + benchmark = Benchmark("f1-weighted", visualize=False) + benchmark.compile_results() + benchmark.save_results() + with patch(self.output, new=StringIO()) as stdout: + benchmark.exreport() + self.check_output_file(stdout, "exreport_error") + def test_tex_output(self): benchmark = Benchmark("accuracy", visualize=False) benchmark.compile_results() diff --git a/benchmark/tests/Summary_test.py b/benchmark/tests/Summary_test.py index 6b43344..8d68abc 100644 --- a/benchmark/tests/Summary_test.py +++ b/benchmark/tests/Summary_test.py @@ -2,6 +2,7 @@ from io import StringIO from unittest.mock import patch from .TestBase import TestBase from ..Results import Summary +from ..Utils import NO_RESULTS class SummaryTest(TestBase): @@ -228,4 +229,4 @@ class SummaryTest(TestBase): report.acquire() with self.assertRaises(ValueError) as msg: report.list_results(score="f1-macro", model="STree") - self.assertEqual(str(msg.exception), "** No results found **") + self.assertEqual(str(msg.exception), NO_RESULTS) diff --git a/benchmark/tests/__init__.py b/benchmark/tests/__init__.py index eae59a7..edb715e 100644 --- a/benchmark/tests/__init__.py +++ b/benchmark/tests/__init__.py @@ -17,6 +17,7 @@ from .scripts.Be_Report_test import BeReportTest from .scripts.Be_Summary_test import BeSummaryTest from .scripts.Be_Grid_test import BeGridTest from .scripts.Be_Best_test import BeBestTest +from .scripts.Be_Benchmark_test import BeBenchmarkTest all = [ "UtilTest", @@ -38,4 +39,5 @@ all = [ "BeSummaryTest", "BeGridTest", "BeBestTest", + "BeBenchmarkTest", ] diff --git a/benchmark/tests/scripts/Be_Benchmark_test.py b/benchmark/tests/scripts/Be_Benchmark_test.py new file mode 100644 index 0000000..0cfd5aa --- /dev/null +++ b/benchmark/tests/scripts/Be_Benchmark_test.py @@ -0,0 +1,24 @@ +import os +import json +from ...Utils import Folders, Files +from ..TestBase import TestBase + + +class BeBenchmarkTest(TestBase): + def setUp(self): + self.prepare_scripts_env() + + def tearDown(self) -> None: + # self.remove_files( + # [Files.best_results("accuracy", "ODTE")], + # Folders.results, + # ) + return super().tearDown() + + def test_be_benchmark(self): + # stdout, stderr = self.execute_script( + # "be_benchmark", ["-s", "accuracy"] + # ) + # self.assertEqual(stderr.getvalue(), "") + # self.check_output_file(stdout, "be_best_all") + pass