mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-16 07:55:54 +00:00
Begin be_benchmark test
This commit is contained in:
@@ -13,7 +13,7 @@ from sklearn.model_selection import (
|
||||
GridSearchCV,
|
||||
cross_validate,
|
||||
)
|
||||
from .Utils import Folders, Files
|
||||
from .Utils import Folders, Files, NO_RESULTS
|
||||
from .Models import Models
|
||||
from .Arguments import EnvData
|
||||
|
||||
@@ -157,7 +157,7 @@ class BestResults:
|
||||
self._process_datafile(results, data, name)
|
||||
found = True
|
||||
if not found:
|
||||
raise ValueError("** No results found **")
|
||||
raise ValueError(NO_RESULTS)
|
||||
# Build best results json file
|
||||
output = {}
|
||||
datasets = Datasets()
|
||||
|
@@ -8,7 +8,14 @@ import subprocess
|
||||
import xlsxwriter
|
||||
import numpy as np
|
||||
from .Experiments import Datasets, BestResults
|
||||
from .Utils import Folders, Files, Symbols, BEST_ACCURACY_STREE, TextColor
|
||||
from .Utils import (
|
||||
Folders,
|
||||
Files,
|
||||
Symbols,
|
||||
BEST_ACCURACY_STREE,
|
||||
TextColor,
|
||||
NO_RESULTS,
|
||||
)
|
||||
|
||||
|
||||
class BaseReport(abc.ABC):
|
||||
@@ -1140,7 +1147,7 @@ class Summary:
|
||||
score, model, input_data, sort_key, number
|
||||
)
|
||||
if data == []:
|
||||
raise ValueError("** No results found **")
|
||||
raise ValueError(NO_RESULTS)
|
||||
max_file = max(len(x["file"]) for x in data)
|
||||
max_title = max(len(x["title"]) for x in data)
|
||||
if self.hidden:
|
||||
@@ -1278,7 +1285,7 @@ class Summary:
|
||||
else [x for x in haystack if x[criterion] == value]
|
||||
)
|
||||
if haystack == []:
|
||||
raise ValueError("** No results found **")
|
||||
raise ValueError(NO_RESULTS)
|
||||
return (
|
||||
sorted(
|
||||
haystack,
|
||||
|
@@ -2,6 +2,7 @@ import os
|
||||
import subprocess
|
||||
|
||||
BEST_ACCURACY_STREE = 40.282203
|
||||
NO_RESULTS = "** No results found **"
|
||||
|
||||
|
||||
class Folders:
|
||||
|
@@ -7,14 +7,18 @@ from benchmark.Arguments import Arguments
|
||||
def main(args_test=None):
|
||||
arguments = Arguments()
|
||||
arguments.xset("score").xset("excel").xset("tex_output")
|
||||
ar = arguments.parse(args_test)
|
||||
benchmark = Benchmark(score=ar.score, visualize=True)
|
||||
benchmark.compile_results()
|
||||
benchmark.save_results()
|
||||
benchmark.report(ar.tex_output)
|
||||
benchmark.exreport()
|
||||
if ar.excel:
|
||||
benchmark.excel()
|
||||
Files.open(benchmark.get_excel_file_name())
|
||||
if ar.tex_output:
|
||||
print(f"File {benchmark.get_tex_file()} generated")
|
||||
args = arguments.parse(args_test)
|
||||
benchmark = Benchmark(score=args.score, visualize=True)
|
||||
try:
|
||||
benchmark.compile_results()
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
else:
|
||||
benchmark.save_results()
|
||||
benchmark.report(args.tex_output)
|
||||
benchmark.exreport()
|
||||
if args.excel:
|
||||
benchmark.excel()
|
||||
Files.open(benchmark.get_excel_file_name())
|
||||
if args.tex_output:
|
||||
print(f"File {benchmark.get_tex_file()} generated")
|
||||
|
@@ -75,6 +75,14 @@ class BenchmarkTest(TestBase):
|
||||
benchmark.exreport()
|
||||
self.check_output_file(stdout, "exreport_error")
|
||||
|
||||
def test_exreport_no_data(self):
|
||||
benchmark = Benchmark("f1-weighted", visualize=False)
|
||||
benchmark.compile_results()
|
||||
benchmark.save_results()
|
||||
with patch(self.output, new=StringIO()) as stdout:
|
||||
benchmark.exreport()
|
||||
self.check_output_file(stdout, "exreport_error")
|
||||
|
||||
def test_tex_output(self):
|
||||
benchmark = Benchmark("accuracy", visualize=False)
|
||||
benchmark.compile_results()
|
||||
|
@@ -2,6 +2,7 @@ from io import StringIO
|
||||
from unittest.mock import patch
|
||||
from .TestBase import TestBase
|
||||
from ..Results import Summary
|
||||
from ..Utils import NO_RESULTS
|
||||
|
||||
|
||||
class SummaryTest(TestBase):
|
||||
@@ -228,4 +229,4 @@ class SummaryTest(TestBase):
|
||||
report.acquire()
|
||||
with self.assertRaises(ValueError) as msg:
|
||||
report.list_results(score="f1-macro", model="STree")
|
||||
self.assertEqual(str(msg.exception), "** No results found **")
|
||||
self.assertEqual(str(msg.exception), NO_RESULTS)
|
||||
|
@@ -17,6 +17,7 @@ from .scripts.Be_Report_test import BeReportTest
|
||||
from .scripts.Be_Summary_test import BeSummaryTest
|
||||
from .scripts.Be_Grid_test import BeGridTest
|
||||
from .scripts.Be_Best_test import BeBestTest
|
||||
from .scripts.Be_Benchmark_test import BeBenchmarkTest
|
||||
|
||||
all = [
|
||||
"UtilTest",
|
||||
@@ -38,4 +39,5 @@ all = [
|
||||
"BeSummaryTest",
|
||||
"BeGridTest",
|
||||
"BeBestTest",
|
||||
"BeBenchmarkTest",
|
||||
]
|
||||
|
24
benchmark/tests/scripts/Be_Benchmark_test.py
Normal file
24
benchmark/tests/scripts/Be_Benchmark_test.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import os
|
||||
import json
|
||||
from ...Utils import Folders, Files
|
||||
from ..TestBase import TestBase
|
||||
|
||||
|
||||
class BeBenchmarkTest(TestBase):
|
||||
def setUp(self):
|
||||
self.prepare_scripts_env()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
# self.remove_files(
|
||||
# [Files.best_results("accuracy", "ODTE")],
|
||||
# Folders.results,
|
||||
# )
|
||||
return super().tearDown()
|
||||
|
||||
def test_be_benchmark(self):
|
||||
# stdout, stderr = self.execute_script(
|
||||
# "be_benchmark", ["-s", "accuracy"]
|
||||
# )
|
||||
# self.assertEqual(stderr.getvalue(), "")
|
||||
# self.check_output_file(stdout, "be_best_all")
|
||||
pass
|
Reference in New Issue
Block a user