diff --git a/benchmark/Utils.py b/benchmark/Utils.py index baf6539..238eed6 100644 --- a/benchmark/Utils.py +++ b/benchmark/Utils.py @@ -148,6 +148,9 @@ class EnvDefault(argparse.Action): default=default, required=required, **kwargs ) + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + class TextColor: BLUE = "\033[94m" diff --git a/benchmark/scripts/be_best b/benchmark/scripts/be_best index 0d80c3b..5504770 100755 --- a/benchmark/scripts/be_best +++ b/benchmark/scripts/be_best @@ -1,8 +1,8 @@ #!/usr/bin/env python import argparse import json -from ..Results import Summary -from ..Utils import EnvDefault +from benchmark.Results import Summary +from benchmark.Utils import EnvDefault def parse_arguments(): diff --git a/benchmark/scripts/be_build_best b/benchmark/scripts/be_build_best index 5802947..9ee6aa5 100755 --- a/benchmark/scripts/be_build_best +++ b/benchmark/scripts/be_build_best @@ -44,5 +44,5 @@ datasets = Datasets() best = BestResults(score, model, datasets) best.build() if report: - report = ReportBest(score, model) + report = ReportBest(score, model, best=True, grid=False) report.report() diff --git a/benchmark/scripts/be_td b/benchmark/scripts/be_td index 4ccd257..254b86f 100755 --- a/benchmark/scripts/be_td +++ b/benchmark/scripts/be_td @@ -2,7 +2,7 @@ import sys import time from benchmark.Experiments import Datasets -from benchmark.mufs import MUFS +from mufs import MUFS mufs_i = MUFS() mufs_c = MUFS() diff --git a/benchmark/tests/BestResults_test.py b/benchmark/tests/BestResults_test.py index 15154a6..17e7024 100644 --- a/benchmark/tests/BestResults_test.py +++ b/benchmark/tests/BestResults_test.py @@ -1,6 +1,5 @@ import os import unittest -from ..Models import Models from ..Experiments import BestResults, Datasets @@ -9,9 +8,6 @@ class BestResultTest(unittest.TestCase): os.chdir(os.path.dirname(os.path.abspath(__file__))) super().__init__(*args, **kwargs) - def tearDown(self) -> None: - return super().tearDown() - def test_load(self): expected = { "balance-scale": [ diff --git a/benchmark/tests/Experiment_test.py b/benchmark/tests/Experiment_test.py new file mode 100644 index 0000000..b3e7ab3 --- /dev/null +++ b/benchmark/tests/Experiment_test.py @@ -0,0 +1,61 @@ +import os +import unittest +from ..Models import Models +from ..Experiments import Experiment, Datasets + + +class ExperimentTest(unittest.TestCase): + def __init__(self, *args, **kwargs): + os.chdir(os.path.dirname(os.path.abspath(__file__))) + self.exp = self.build_exp() + super().__init__(*args, **kwargs) + + def build_exp(self, hyperparams=False, grid=False): + params = { + "score_name": "accuracy", + "model_name": "STree", + "stratified": "0", + "datasets": Datasets(), + "hyperparams_dict": "{}", + "hyperparams_file": hyperparams, + "grid_paramfile": grid, + "platform": "test", + "title": "Test", + "progress_bar": False, + "folds": 1, + } + return Experiment(**params) + + def tearDown(self) -> None: + return super().tearDown() + + def test_build_hyperparams_and_grid_file(self): + expected = { + "balance-scale": [ + 0.98, + {"splitter": "iwss", "max_features": "auto"}, + "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json", + ], + "balloons": [ + 0.86, + { + "C": 7, + "gamma": 0.1, + "kernel": "rbf", + "max_iter": 10000.0, + "multiclass_strategy": "ovr", + }, + "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json", + ], + } + exp = self.build_exp(hyperparams=True) + self.assertSequenceEqual(exp.hyperparameters_dict, expected) + exp = self.build_exp(grid=True) + self.assertSequenceEqual(exp.hyperparameters_dict, expected) + + def test_get_output_file(self): + file_name = self.exp.get_output_file() + self.assertTrue( + file_name.startswith("results/results_accuracy_STree_test_") + ) + self.assertTrue(file_name.endswith("_0.json")) diff --git a/benchmark/tests/Util_test.py b/benchmark/tests/Util_test.py index c40753b..b4c341a 100644 --- a/benchmark/tests/Util_test.py +++ b/benchmark/tests/Util_test.py @@ -179,7 +179,7 @@ class UtilTest(unittest.TestCase): "score": "accuracy", "platform": "iMac27", "n_folds": 5, - "model": "ODTE", + "model": "STree", "stratified": "0", } ap = argparse.ArgumentParser() @@ -234,16 +234,14 @@ class UtilTest(unittest.TestCase): required=True, ) ap.add_argument( - "--test", - type=str, + "-r", + "--report", + type=bool, required=False, - default=None, + help="Generate Report", ) args = ap.parse_args( - [ - "--title", - "test", - ], + ["--title", "test", "-m", "STree"], ) computed = args.__dict__ for key, value in expected.items(): diff --git a/benchmark/tests/__init__.py b/benchmark/tests/__init__.py index c1b6f20..2783f17 100644 --- a/benchmark/tests/__init__.py +++ b/benchmark/tests/__init__.py @@ -2,5 +2,12 @@ from .Util_test import UtilTest from .Models_test import ModelTest from .Dataset_test import DatasetTest from .BestResults_test import BestResultTest +from .Experiment_test import ExperimentTest -all = ["UtilTest", "ModelTest", "DatasetTest", "BestResultTest"] +all = [ + "UtilTest", + "ModelTest", + "DatasetTest", + "BestResultTest", + "ExperimentTest", +] diff --git a/benchmark/tests/results/grid_output_accuracy_STree.json b/benchmark/tests/results/grid_output_accuracy_STree.json new file mode 100644 index 0000000..ece4a7a --- /dev/null +++ b/benchmark/tests/results/grid_output_accuracy_STree.json @@ -0,0 +1 @@ +{"balance-scale": [0.98, {"splitter": "iwss", "max_features": "auto"}, "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"], "balloons": [0.86, {"C": 7, "gamma": 0.1, "kernel": "rbf", "max_iter": 10000.0, "multiclass_strategy": "ovr"}, "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"]} \ No newline at end of file