mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-15 23:45:54 +00:00
Complete Summary tests
This commit is contained in:
@@ -24,19 +24,10 @@ def parse_arguments():
|
||||
required=True,
|
||||
help="score name {accuracy, f1_micro, f1_macro, all}",
|
||||
)
|
||||
ap.add_argument(
|
||||
"-l",
|
||||
"--list",
|
||||
type=bool,
|
||||
required=False,
|
||||
default=False,
|
||||
help="List all results",
|
||||
)
|
||||
args = ap.parse_args()
|
||||
return (
|
||||
args.score,
|
||||
args.model,
|
||||
args.list,
|
||||
)
|
||||
|
||||
|
||||
@@ -44,7 +35,6 @@ if __name__ == "__main__":
|
||||
(
|
||||
score,
|
||||
model,
|
||||
list_results,
|
||||
) = parse_arguments()
|
||||
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
||||
metrics = all_metrics if score == "all" else [score]
|
||||
@@ -60,5 +50,3 @@ if __name__ == "__main__":
|
||||
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
||||
)
|
||||
summary.show_top(score=metric, n=10)
|
||||
if list_results:
|
||||
summary.list_results()
|
||||
|
@@ -80,7 +80,8 @@ class BenchmarkTest(unittest.TestCase):
|
||||
benchmark = Benchmark("accuracy", visualize=False)
|
||||
benchmark.compile_results()
|
||||
benchmark.save_results()
|
||||
benchmark.exreport()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
benchmark.exreport()
|
||||
self.assertFalse(os.path.exists(Files.exreport_pdf))
|
||||
self.assertFalse(os.path.exists(Folders.report))
|
||||
|
||||
|
120
benchmark/tests/PairCheck_test.py
Normal file
120
benchmark/tests/PairCheck_test.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import os
|
||||
import unittest
|
||||
from io import StringIO
|
||||
from unittest.mock import patch
|
||||
from ..Results import Summary
|
||||
from ..Utils import Symbols
|
||||
|
||||
|
||||
class PairCheckTest(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def test_summary_list_results_model(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(model="STree")
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_list_model.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_summary_list_results_score(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(score="accuracy")
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_list_score.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_summary_list_results_n(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(score="accuracy", number=3)
|
||||
computed = fake_out.getvalue()
|
||||
with open(os.path.join("test_files", "summary_list_n.test"), "r") as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_summary_list_hiden(self):
|
||||
report = Summary(hidden=True)
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(score="accuracy")
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_list_hidden.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_show_result_no_title(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
title = ""
|
||||
best = report.best_result(
|
||||
criterion="model", value="STree", score="accuracy"
|
||||
)
|
||||
report.show_result(data=best, title=title)
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_show_results.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_show_result_title(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
title = "**Title**"
|
||||
best = report.best_result(
|
||||
criterion="model", value="STree", score="accuracy"
|
||||
)
|
||||
report.show_result(data=best, title=title)
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_show_results_title.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_show_result_no_data(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
title = "**Test**"
|
||||
report.show_result(data={}, title=title)
|
||||
computed = fake_out.getvalue()
|
||||
expected = f"** **Test** has No data **\n"
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_best_results_datasets(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
computed = report.best_results_datasets()
|
||||
expected = {
|
||||
"balance-scale": (
|
||||
0.83616,
|
||||
{},
|
||||
"results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json",
|
||||
"Test default paramters with RandomForest",
|
||||
),
|
||||
"balloons": (
|
||||
0.5566666666666668,
|
||||
{"max_features": "auto", "splitter": "mutual"},
|
||||
"results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json",
|
||||
"default B",
|
||||
),
|
||||
}
|
||||
self.assertSequenceEqual(computed, expected)
|
234
benchmark/tests/Summary_test.py
Normal file
234
benchmark/tests/Summary_test.py
Normal file
@@ -0,0 +1,234 @@
|
||||
import os
|
||||
import unittest
|
||||
from io import StringIO
|
||||
from unittest.mock import patch
|
||||
from ..Results import Summary
|
||||
from ..Utils import Symbols
|
||||
|
||||
|
||||
class SummaryTest(unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def test_summary_without_model(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
computed = report.best_results(score="accuracy")
|
||||
expected = [
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "STree",
|
||||
"title": "With gridsearched hyperparameters",
|
||||
"platform": "iMac27",
|
||||
"date": "2021-09-30",
|
||||
"time": "11:42:07",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json",
|
||||
"metric": 0.04544339345094904,
|
||||
"duration": 624.2505249977112,
|
||||
},
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "ODTE",
|
||||
"title": "Gridsearched hyperparams v022.1b random_init",
|
||||
"platform": "Galgo",
|
||||
"date": "2022-04-20",
|
||||
"time": "10:52:20",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json",
|
||||
"metric": 0.04340676203831255,
|
||||
"duration": 22591.471411943436,
|
||||
},
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "STree",
|
||||
"title": "default A",
|
||||
"platform": "iMac27",
|
||||
"date": "2021-10-27",
|
||||
"time": "09:40:40",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json",
|
||||
"metric": 0.04158163842230773,
|
||||
"duration": 3395.009148836136,
|
||||
},
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "STree",
|
||||
"title": "default B",
|
||||
"platform": "macbook-pro",
|
||||
"date": "2021-11-01",
|
||||
"time": "19:17:07",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json",
|
||||
"metric": 0.03789928437296904,
|
||||
"duration": 4115.042420864105,
|
||||
},
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "RandomForest",
|
||||
"title": "Test default paramters with RandomForest",
|
||||
"platform": "iMac27",
|
||||
"date": "2022-01-14",
|
||||
"time": "12:39:30",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json",
|
||||
"metric": 0.03627309062515771,
|
||||
"duration": 272.7363500595093,
|
||||
},
|
||||
]
|
||||
self.assertListEqual(computed, expected)
|
||||
|
||||
def test_summary_with_model(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
computed = report.best_results(
|
||||
criterion="model", value="STree", score="accuracy"
|
||||
)
|
||||
expected = [
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "STree",
|
||||
"title": "With gridsearched hyperparameters",
|
||||
"platform": "iMac27",
|
||||
"date": "2021-09-30",
|
||||
"time": "11:42:07",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json",
|
||||
"metric": 0.04544339345094904,
|
||||
"duration": 624.2505249977112,
|
||||
},
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "STree",
|
||||
"title": "default A",
|
||||
"platform": "iMac27",
|
||||
"date": "2021-10-27",
|
||||
"time": "09:40:40",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json",
|
||||
"metric": 0.04158163842230773,
|
||||
"duration": 3395.009148836136,
|
||||
},
|
||||
{
|
||||
"score": "accuracy",
|
||||
"model": "STree",
|
||||
"title": "default B",
|
||||
"platform": "macbook-pro",
|
||||
"date": "2021-11-01",
|
||||
"time": "19:17:07",
|
||||
"stratified": "0",
|
||||
"file": "results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json",
|
||||
"metric": 0.03789928437296904,
|
||||
"duration": 4115.042420864105,
|
||||
},
|
||||
]
|
||||
self.assertListEqual(computed, expected)
|
||||
|
||||
def test_summary_list_results_model(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(model="STree")
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_list_model.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_summary_list_results_score(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(score="accuracy")
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_list_score.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_summary_list_results_n(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(score="accuracy", number=3)
|
||||
computed = fake_out.getvalue()
|
||||
with open(os.path.join("test_files", "summary_list_n.test"), "r") as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_summary_list_hiden(self):
|
||||
report = Summary(hidden=True)
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
report.list_results(score="accuracy")
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_list_hidden.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_show_result_no_title(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
title = ""
|
||||
best = report.best_result(
|
||||
criterion="model", value="STree", score="accuracy"
|
||||
)
|
||||
report.show_result(data=best, title=title)
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_show_results.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_show_result_title(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
title = "**Title**"
|
||||
best = report.best_result(
|
||||
criterion="model", value="STree", score="accuracy"
|
||||
)
|
||||
report.show_result(data=best, title=title)
|
||||
computed = fake_out.getvalue()
|
||||
with open(
|
||||
os.path.join("test_files", "summary_show_results_title.test"), "r"
|
||||
) as f:
|
||||
expected = f.read()
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_show_result_no_data(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
title = "**Test**"
|
||||
report.show_result(data={}, title=title)
|
||||
computed = fake_out.getvalue()
|
||||
expected = f"** **Test** has No data **\n"
|
||||
self.assertEqual(computed, expected)
|
||||
|
||||
def test_best_results_datasets(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
computed = report.best_results_datasets()
|
||||
expected = {
|
||||
"balance-scale": (
|
||||
0.83616,
|
||||
{},
|
||||
"results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json",
|
||||
"Test default paramters with RandomForest",
|
||||
),
|
||||
"balloons": (
|
||||
0.5566666666666668,
|
||||
{"max_features": "auto", "splitter": "mutual"},
|
||||
"results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json",
|
||||
"default B",
|
||||
),
|
||||
}
|
||||
self.assertSequenceEqual(computed, expected)
|
@@ -8,6 +8,8 @@ from .Report_test import ReportTest
|
||||
from .Excel_test import ExcelTest
|
||||
from .SQL_test import SQLTest
|
||||
from .Benchmark_test import BenchmarkTest
|
||||
from .Summary_test import SummaryTest
|
||||
from .PairCheck_test import PairCheckTest
|
||||
|
||||
all = [
|
||||
"UtilTest",
|
||||
@@ -20,4 +22,6 @@ all = [
|
||||
"ExcelTest",
|
||||
"SQLTest",
|
||||
"BenchmarkTest",
|
||||
"SummaryTest",
|
||||
"PairCheckTest",
|
||||
]
|
||||
|
3
benchmark/tests/test_files/summary_list_hidden.test
Normal file
3
benchmark/tests/test_files/summary_list_hidden.test
Normal file
@@ -0,0 +1,3 @@
|
||||
[92mDate File Score Time(h) Title
|
||||
========== ======================================================== ======== ======= =======
|
||||
[93m2021-11-01 results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json 0.97446 0.098 default
|
5
benchmark/tests/test_files/summary_list_model.test
Normal file
5
benchmark/tests/test_files/summary_list_model.test
Normal file
@@ -0,0 +1,5 @@
|
||||
[94mDate File Score Time(h) Title
|
||||
========== ============================================================= ======== ======= =================================
|
||||
[96m2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
|
||||
[94m2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
|
||||
[96m2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
|
5
benchmark/tests/test_files/summary_list_n.test
Normal file
5
benchmark/tests/test_files/summary_list_n.test
Normal file
@@ -0,0 +1,5 @@
|
||||
[94mDate File Score Time(h) Title
|
||||
========== =============================================================== ======== ======= ============================================
|
||||
[96m2022-04-20 results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json 0.04341 6.275 Gridsearched hyperparams v022.1b random_init
|
||||
[94m2022-01-14 results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json 0.03627 0.076 Test default paramters with RandomForest
|
||||
[96m2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
|
7
benchmark/tests/test_files/summary_list_score.test
Normal file
7
benchmark/tests/test_files/summary_list_score.test
Normal file
@@ -0,0 +1,7 @@
|
||||
[94mDate File Score Time(h) Title
|
||||
========== =============================================================== ======== ======= ============================================
|
||||
[96m2022-04-20 results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json 0.04341 6.275 Gridsearched hyperparams v022.1b random_init
|
||||
[94m2022-01-14 results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json 0.03627 0.076 Test default paramters with RandomForest
|
||||
[96m2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
|
||||
[94m2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
|
||||
[96m2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
|
12
benchmark/tests/test_files/summary_show_results.test
Normal file
12
benchmark/tests/test_files/summary_show_results.test
Normal file
@@ -0,0 +1,12 @@
|
||||
*********************************************************************************
|
||||
* *
|
||||
* With gridsearched hyperparameters *
|
||||
* *
|
||||
* Model: STree Ver. 1.2.3 Score: accuracy Metric: 0.0454434 *
|
||||
* *
|
||||
* Date : 2021-09-30 Time: 11:42:07 Time Spent: 624.25 secs. *
|
||||
* Seeds: [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1] Platform: iMac27 *
|
||||
* Stratified: False *
|
||||
* results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json *
|
||||
* *
|
||||
*********************************************************************************
|
14
benchmark/tests/test_files/summary_show_results_title.test
Normal file
14
benchmark/tests/test_files/summary_show_results_title.test
Normal file
@@ -0,0 +1,14 @@
|
||||
*********************************************************************************
|
||||
* **Title** *
|
||||
*-------------------------------------------------------------------------------*
|
||||
* *
|
||||
* With gridsearched hyperparameters *
|
||||
* *
|
||||
* Model: STree Ver. 1.2.3 Score: accuracy Metric: 0.0454434 *
|
||||
* *
|
||||
* Date : 2021-09-30 Time: 11:42:07 Time Spent: 624.25 secs. *
|
||||
* Seeds: [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1] Platform: iMac27 *
|
||||
* Stratified: False *
|
||||
* results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json *
|
||||
* *
|
||||
*********************************************************************************
|
Reference in New Issue
Block a user