From 1b8a424ad38acfcf1e83596966232e30656884e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Monta=C3=B1ana?= Date: Sun, 20 Nov 2022 18:23:26 +0100 Subject: [PATCH] Add subparser to be_report & tests --- benchmark/Arguments.py | 35 +++++---- benchmark/Results.py | 3 +- benchmark/scripts/be_build_best.py | 2 +- benchmark/scripts/be_report.py | 93 +++++++++++++++-------- benchmark/tests/Arguments_test.py | 3 - benchmark/tests/Report_test.py | 10 +-- benchmark/tests/scripts/Be_Report_test.py | 39 ++++++---- 7 files changed, 109 insertions(+), 76 deletions(-) diff --git a/benchmark/Arguments.py b/benchmark/Arguments.py index 38904ee..1c8be8e 100644 --- a/benchmark/Arguments.py +++ b/benchmark/Arguments.py @@ -49,10 +49,11 @@ class EnvDefault(argparse.Action): class Arguments(argparse.ArgumentParser): - def __init__(self): - super().__init__() + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) models_data = Models.define_models(random_state=0) self._overrides = {} + self._subparser = None self.parameters = { "best": [ ("-b", "--best"), @@ -100,19 +101,6 @@ class Arguments(argparse.ArgumentParser): "help": "Generate Excel File", }, ], - "file": [ - ("-f", "--file"), - {"type": str, "required": False, "help": "Result file"}, - ], - "grid": [ - ("-g", "--grid"), - { - "action": "store_true", - "required": False, - "default": False, - "help": "grid results of model", - }, - ], "grid_paramfile": [ ("-g", "--grid_paramfile"), { @@ -309,6 +297,23 @@ class Arguments(argparse.ArgumentParser): ) return self + def add_subparser( + self, dest="subcommand", help_text="help for subcommand" + ): + self._subparser = self.add_subparsers(dest=dest, help=help_text) + + def add_subparsers_options(self, subparser, arguments): + command, help_text = subparser + parser = self._subparser.add_parser(command, help=help_text) + for name, args in arguments: + try: + names, parameters = self.parameters[name] + except KeyError: + names = (name,) + parameters = {} + # Order of args is important + parser.add_argument(*names, **{**args, **parameters}) + def parse(self, args=None): for key, (dest_key, value) in self._overrides.items(): if args is None: diff --git a/benchmark/Results.py b/benchmark/Results.py index 1124dec..487a54a 100644 --- a/benchmark/Results.py +++ b/benchmark/Results.py @@ -251,7 +251,7 @@ class ReportBest(BaseReport): "Hyperparameters", ] - def __init__(self, score, model, best, grid): + def __init__(self, score, model, best): name = ( Files.best_results(score, model) if best @@ -259,7 +259,6 @@ class ReportBest(BaseReport): ) file_name = os.path.join(Folders.results, name) self.best = best - self.grid = grid self.score_name = score self.model = model super().__init__(file_name, best_file=True) diff --git a/benchmark/scripts/be_build_best.py b/benchmark/scripts/be_build_best.py index 233bf22..7f13ac5 100755 --- a/benchmark/scripts/be_build_best.py +++ b/benchmark/scripts/be_build_best.py @@ -21,5 +21,5 @@ def main(args_test=None): print(e) else: if args.report: - report = ReportBest(args.score, args.model, best=True, grid=False) + report = ReportBest(args.score, args.model, best=True) report.report() diff --git a/benchmark/scripts/be_report.py b/benchmark/scripts/be_report.py index 8407e6a..01a6208 100755 --- a/benchmark/scripts/be_report.py +++ b/benchmark/scripts/be_report.py @@ -11,40 +11,71 @@ If no argument is set, displays the datasets and its characteristics def main(args_test=None): - arguments = Arguments() - arguments.xset("file").xset("excel").xset("sql").xset("compare") - arguments.xset("best").xset("grid").xset("model", required=False) - arguments.xset("score", required=False) + arguments = Arguments(prog="be_report") + arguments.add_subparser() + arguments.add_subparsers_options( + ( + "best", + "Report best results obtained by any model/score. " + "See be_build_best", + ), + [ + ("model", dict(required=False)), + ("score", dict(required=False)), + ], + ) + arguments.add_subparsers_options( + ( + "grid", + "Report grid results obtained by any model/score. " + "See be_build_grid", + ), + [ + ("model", dict(required=False)), + ("score", dict(required=False)), + ], + ) + arguments.add_subparsers_options( + ("file", "Report file results"), + [ + ("file_name", {}), + ("excel", {}), + ("sql", {}), + ("compare", {}), + ], + ) + arguments.add_subparsers_options( + ("datasets", "Report datasets information"), + [ + ("excel", {}), + ], + ) args = arguments.parse(args_test) - if args.best: - args.grid = False - if args.grid: - args.best = False - if args.file is None and not args.best and not args.grid: + if args.subcommand == "best" or args.subcommand == "grid": + best = args.subcommand == "best" + report = ReportBest(args.score, args.model, best) + report.report() + elif args.subcommand == "file": + try: + report = Report(args.file_name, args.compare) + report.report() + except FileNotFoundError as e: + print(e) + return + if args.sql: + sql = SQL(args.file_name) + sql.report() + if args.excel: + excel = Excel( + file_name=args.file_name, + compare=args.compare, + ) + excel.report() + is_test = args_test is not None + Files.open(excel.get_file_name(), is_test) + else: report = ReportDatasets(args.excel) report.report() if args.excel: is_test = args_test is not None Files.open(report.get_file_name(), is_test) - else: - if args.best or args.grid: - report = ReportBest(args.score, args.model, args.best, args.grid) - report.report() - else: - try: - report = Report(args.file, args.compare) - except FileNotFoundError as e: - print(e) - else: - report.report() - if args.excel: - excel = Excel( - file_name=args.file, - compare=args.compare, - ) - excel.report() - is_test = args_test is not None - Files.open(excel.get_file_name(), is_test) - if args.sql: - sql = SQL(args.file) - sql.report() diff --git a/benchmark/tests/Arguments_test.py b/benchmark/tests/Arguments_test.py index b56ee0e..2431479 100644 --- a/benchmark/tests/Arguments_test.py +++ b/benchmark/tests/Arguments_test.py @@ -24,13 +24,10 @@ class ArgumentsTest(TestBase): def test_parameters(self): expected_parameters = { - "best": ("-b", "--best"), "color": ("-c", "--color"), "compare": ("-c", "--compare"), "dataset": ("-d", "--dataset"), "excel": ("-x", "--excel"), - "file": ("-f", "--file"), - "grid": ("-g", "--grid"), "grid_paramfile": ("-g", "--grid_paramfile"), "hidden": ("--hidden",), "hyperparameters": ("-p", "--hyperparameters"), diff --git a/benchmark/tests/Report_test.py b/benchmark/tests/Report_test.py index 5970cdb..6518b98 100644 --- a/benchmark/tests/Report_test.py +++ b/benchmark/tests/Report_test.py @@ -69,13 +69,13 @@ class ReportTest(TestBase): _ = Report("unknown_file") def test_report_best(self): - report = ReportBest("accuracy", "STree", best=True, grid=False) + report = ReportBest("accuracy", "STree", best=True) with patch(self.output, new=StringIO()) as stdout: report.report() self.check_output_file(stdout, "report_best") def test_report_grid(self): - report = ReportBest("accuracy", "STree", best=False, grid=True) + report = ReportBest("accuracy", "STree", best=False) with patch(self.output, new=StringIO()) as stdout: report.report() file_name = "report_grid.test" @@ -90,12 +90,6 @@ class ReportTest(TestBase): self.assertEqual(line, output_text[index]) - def test_report_best_both(self): - report = ReportBest("accuracy", "STree", best=True, grid=True) - with patch(self.output, new=StringIO()) as stdout: - report.report() - self.check_output_file(stdout, "report_best") - @patch("sys.stdout", new_callable=StringIO) def test_report_datasets(self, mock_output): report = ReportDatasets() diff --git a/benchmark/tests/scripts/Be_Report_test.py b/benchmark/tests/scripts/Be_Report_test.py index d8a3980..b371906 100644 --- a/benchmark/tests/scripts/Be_Report_test.py +++ b/benchmark/tests/scripts/Be_Report_test.py @@ -1,5 +1,7 @@ import os from openpyxl import load_workbook +from io import StringIO +from unittest.mock import patch from ...Utils import Folders, Files from ..TestBase import TestBase from ..._version import __version__ @@ -23,25 +25,25 @@ class BeReportTest(TestBase): "results", "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json", ) - stdout, stderr = self.execute_script("be_report", ["-f", file_name]) + stdout, stderr = self.execute_script("be_report", ["file", file_name]) self.assertEqual(stderr.getvalue(), "") self.check_output_file(stdout, "report") def test_be_report_not_found(self): - stdout, stderr = self.execute_script("be_report", ["-f", "unknown"]) + stdout, stderr = self.execute_script("be_report", ["file", "unknown"]) self.assertEqual(stderr.getvalue(), "") self.assertEqual(stdout.getvalue(), "unknown does not exists!\n") def test_be_report_compare(self): file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json" stdout, stderr = self.execute_script( - "be_report", ["-f", file_name, "-c"] + "be_report", ["file", file_name, "-c"] ) self.assertEqual(stderr.getvalue(), "") self.check_output_file(stdout, "report_compared") def test_be_report_datatsets(self): - stdout, stderr = self.execute_script("be_report", []) + stdout, stderr = self.execute_script("be_report", ["datasets"]) self.assertEqual(stderr.getvalue(), "") file_name = f"report_datasets{self.ext}" with open(os.path.join(self.test_files, file_name)) as f: @@ -54,7 +56,7 @@ class BeReportTest(TestBase): self.assertEqual(line, output_text[index]) def test_be_report_datasets_excel(self): - stdout, stderr = self.execute_script("be_report", ["-x"]) + stdout, stderr = self.execute_script("be_report", ["datasets", "-x"]) self.assertEqual(stderr.getvalue(), "") file_name = f"report_datasets{self.ext}" with open(os.path.join(self.test_files, file_name)) as f: @@ -77,14 +79,14 @@ class BeReportTest(TestBase): def test_be_report_best(self): stdout, stderr = self.execute_script( - "be_report", ["-s", "accuracy", "-m", "STree", "-b"] + "be_report", ["best", "-s", "accuracy", "-m", "STree"] ) self.assertEqual(stderr.getvalue(), "") self.check_output_file(stdout, "report_best") def test_be_report_grid(self): stdout, stderr = self.execute_script( - "be_report", ["-s", "accuracy", "-m", "STree", "-g"] + "be_report", ["grid", "-s", "accuracy", "-m", "STree"] ) self.assertEqual(stderr.getvalue(), "") file_name = "report_grid.test" @@ -98,19 +100,24 @@ class BeReportTest(TestBase): line = self.replace_STree_version(line, output_text, index) self.assertEqual(line, output_text[index]) - def test_be_report_best_both(self): - stdout, stderr = self.execute_script( - "be_report", - ["-s", "accuracy", "-m", "STree", "-b", "-g"], + @patch("sys.stderr", new_callable=StringIO) + def test_be_report_unknown_subcommand(self, stderr): + with self.assertRaises(SystemExit) as msg: + module = self.search_script("be_report") + module.main(["unknown", "accuracy", "-m", "STree"]) + self.assertEqual(msg.exception.code, 2) + self.assertEqual( + stderr.getvalue(), + "usage: be_report [-h] {best,grid,file,datasets} ...\n" + "be_report: error: argument subcommand: invalid choice: " + "'unknown' (choose from 'best', 'grid', 'file', 'datasets')\n", ) - self.assertEqual(stderr.getvalue(), "") - self.check_output_file(stdout, "report_best") def test_be_report_excel_compared(self): file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json" stdout, stderr = self.execute_script( "be_report", - ["-f", file_name, "-x", "-c"], + ["file", file_name, "-x", "-c"], ) file_name = os.path.join( Folders.results, file_name.replace(".json", ".xlsx") @@ -125,7 +132,7 @@ class BeReportTest(TestBase): file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json" stdout, stderr = self.execute_script( "be_report", - ["-f", file_name, "-x"], + ["file", file_name, "-x"], ) file_name = os.path.join( Folders.results, file_name.replace(".json", ".xlsx") @@ -140,7 +147,7 @@ class BeReportTest(TestBase): file_name = "results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json" stdout, stderr = self.execute_script( "be_report", - ["-f", file_name, "-q"], + ["file", file_name, "-q"], ) file_name = os.path.join( Folders.results, file_name.replace(".json", ".sql")