mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 00:15:55 +00:00
Continue benchmark tests
This commit is contained in:
@@ -6,9 +6,9 @@ from benchmark.Arguments import Arguments
|
||||
|
||||
def main(args_test=None):
|
||||
arguments = Arguments()
|
||||
arguments.xset("score").xset("excel").xset("tex_output")
|
||||
arguments.xset("score").xset("excel").xset("tex_output").xset("quiet")
|
||||
args = arguments.parse(args_test)
|
||||
benchmark = Benchmark(score=args.score, visualize=True)
|
||||
benchmark = Benchmark(score=args.score, visualize=not args.quiet)
|
||||
try:
|
||||
benchmark.compile_results()
|
||||
except ValueError as e:
|
||||
|
@@ -8,19 +8,16 @@ from ..Results import Benchmark
|
||||
|
||||
|
||||
class BenchmarkTest(TestBase):
|
||||
def tearDown(self) -> None:
|
||||
def tearDown(self):
|
||||
benchmark = Benchmark("accuracy", visualize=False)
|
||||
files = [
|
||||
"exreport_accuracy.csv",
|
||||
"exreport_accuracy.txt",
|
||||
"exreport_accuracy.xlsx",
|
||||
"exreport_err_accuracy.txt",
|
||||
"exreport_err_unknown.txt",
|
||||
"exreport_unknown.csv",
|
||||
"exreport_unknown.txt",
|
||||
"Rplots.pdf",
|
||||
benchmark.get_tex_file(),
|
||||
]
|
||||
files = []
|
||||
for score in ["accuracy", "unknown"]:
|
||||
files.append(Files.exreport(score))
|
||||
files.append(Files.exreport_output(score))
|
||||
files.append(Files.exreport_err(score))
|
||||
files.append(Files.exreport_excel("accuracy"))
|
||||
files.append(Files.exreport_pdf)
|
||||
files.append(Files.tex_output("accuracy"))
|
||||
self.remove_files(files, Folders.exreport)
|
||||
self.remove_files(files, ".")
|
||||
return super().tearDown()
|
||||
|
@@ -2,6 +2,7 @@ from io import StringIO
|
||||
from unittest.mock import patch
|
||||
from .TestBase import TestBase
|
||||
from ..Results import Summary
|
||||
from ..Utils import NO_RESULTS
|
||||
|
||||
|
||||
class SummaryTest(TestBase):
|
||||
@@ -221,11 +222,11 @@ class SummaryTest(TestBase):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
report.show_top(score="f1-macro")
|
||||
self.assertEqual(stdout.getvalue(), "** No results found **\n")
|
||||
self.assertEqual(stdout.getvalue(), f"{NO_RESULTS}\n")
|
||||
|
||||
def test_no_data(self):
|
||||
report = Summary()
|
||||
report.acquire()
|
||||
with self.assertRaises(ValueError) as msg:
|
||||
report.list_results(score="f1-macro", model="STree")
|
||||
self.assertEqual(str(msg.exception), "** No results found **")
|
||||
self.assertEqual(str(msg.exception), NO_RESULTS)
|
||||
|
@@ -9,16 +9,28 @@ class BeBenchmarkTest(TestBase):
|
||||
self.prepare_scripts_env()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
# self.remove_files(
|
||||
# [Files.best_results("accuracy", "ODTE")],
|
||||
# Folders.results,
|
||||
# )
|
||||
files = []
|
||||
for score in ["accuracy", "unknown"]:
|
||||
files.append(Files.exreport(score))
|
||||
files.append(Files.exreport_output(score))
|
||||
files.append(Files.exreport_err(score))
|
||||
|
||||
files.append(Files.exreport_excel("accuracy"))
|
||||
files.append(Files.exreport_pdf)
|
||||
files.append(Files.tex_output("accuracy"))
|
||||
self.remove_files(files, Folders.exreport)
|
||||
self.remove_files(files, ".")
|
||||
return super().tearDown()
|
||||
|
||||
def test_be_benchmark(self):
|
||||
# stdout, stderr = self.execute_script(
|
||||
# "be_benchmark", ["-s", "accuracy"]
|
||||
# )
|
||||
# self.assertEqual(stderr.getvalue(), "")
|
||||
# self.check_output_file(stdout, "be_best_all")
|
||||
pass
|
||||
stdout, stderr = self.execute_script(
|
||||
"be_benchmark", ["-s", "accuracy", "-q", "1", "-t", "1", "-x", "1"]
|
||||
)
|
||||
self.assertEqual(stderr.getvalue(), "")
|
||||
# Check output
|
||||
self.check_output_file(stdout, "exreport_report")
|
||||
# Check csv file
|
||||
file_name = os.path.join(Folders.exreport, Files.exreport("accuracy"))
|
||||
self.check_file_file(file_name, "exreport_csv")
|
||||
# Check tex file
|
||||
# Check excel file
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import os
|
||||
import json
|
||||
from ...Utils import Folders, Files
|
||||
from ...Utils import Folders, Files, NO_RESULTS
|
||||
from ..TestBase import TestBase
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ class BeBestTest(TestBase):
|
||||
stdout, _ = self.execute_script(
|
||||
"be_build_best", ["-s", "accuracy", "-m", "SVC"]
|
||||
)
|
||||
self.assertEqual(stdout.getvalue(), "** No results found **\n")
|
||||
self.assertEqual(stdout.getvalue(), f"{NO_RESULTS}\n")
|
||||
|
||||
def test_be_build_best(self):
|
||||
self.execute_script("be_build_best", ["-s", "accuracy", "-m", "ODTE"])
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import os
|
||||
from ...Utils import Folders
|
||||
from ...Utils import Folders, NO_RESULTS
|
||||
from ..TestBase import TestBase
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ class BeListTest(TestBase):
|
||||
"be_list", ["-m", "Wodt", "-s", "f1-macro"]
|
||||
)
|
||||
self.assertEqual(stderr.getvalue(), "")
|
||||
self.assertEqual(stdout.getvalue(), "** No results found **\n")
|
||||
self.assertEqual(stdout.getvalue(), f"{NO_RESULTS}\n")
|
||||
|
||||
def test_be_list_nan(self):
|
||||
def swap_files(source_folder, target_folder, file_name):
|
||||
@@ -25,18 +25,19 @@ class BeListTest(TestBase):
|
||||
target = os.path.join(target_folder, file_name)
|
||||
os.rename(source, target)
|
||||
|
||||
# move nan result from hidden to results
|
||||
file_name = (
|
||||
"results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:"
|
||||
"35_0.json"
|
||||
)
|
||||
# move nan result from hidden to results
|
||||
swap_files(Folders.hidden_results, Folders.results, file_name)
|
||||
try:
|
||||
# list and move nan result to hidden
|
||||
stdout, stderr = self.execute_script("be_list", ["--nan", "1"])
|
||||
self.assertEqual(stderr.getvalue(), "")
|
||||
self.check_output_file(stdout, "be_list_nan")
|
||||
except Exception:
|
||||
# move back nan result file
|
||||
# move back nan result file if be_list couldn't
|
||||
swap_files(Folders.results, Folders.hidden_results, file_name)
|
||||
self.fail("test_be_list_nan() should not raise exception")
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
from ..TestBase import TestBase
|
||||
from ...Utils import NO_RESULTS
|
||||
|
||||
|
||||
class BePairCheckTest(TestBase):
|
||||
@@ -17,11 +18,11 @@ class BePairCheckTest(TestBase):
|
||||
"be_pair_check", ["-m1", "SVC", "-m2", "ODTE"]
|
||||
)
|
||||
self.assertEqual(stderr.getvalue(), "")
|
||||
self.assertEqual(stdout.getvalue(), "** No results found **\n")
|
||||
self.assertEqual(stdout.getvalue(), f"{NO_RESULTS}\n")
|
||||
|
||||
def test_be_pair_check_no_data_b(self):
|
||||
stdout, stderr = self.execute_script(
|
||||
"be_pair_check", ["-m1", "STree", "-m2", "SVC"]
|
||||
)
|
||||
self.assertEqual(stderr.getvalue(), "")
|
||||
self.assertEqual(stdout.getvalue(), "** No results found **\n")
|
||||
self.assertEqual(stdout.getvalue(), f"{NO_RESULTS}\n")
|
||||
|
Reference in New Issue
Block a user