mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 16:35:54 +00:00
Begin be_benchmark test
This commit is contained in:
@@ -13,7 +13,7 @@ from sklearn.model_selection import (
|
|||||||
GridSearchCV,
|
GridSearchCV,
|
||||||
cross_validate,
|
cross_validate,
|
||||||
)
|
)
|
||||||
from .Utils import Folders, Files
|
from .Utils import Folders, Files, NO_RESULTS
|
||||||
from .Models import Models
|
from .Models import Models
|
||||||
from .Arguments import EnvData
|
from .Arguments import EnvData
|
||||||
|
|
||||||
@@ -157,7 +157,7 @@ class BestResults:
|
|||||||
self._process_datafile(results, data, name)
|
self._process_datafile(results, data, name)
|
||||||
found = True
|
found = True
|
||||||
if not found:
|
if not found:
|
||||||
raise ValueError("** No results found **")
|
raise ValueError(NO_RESULTS)
|
||||||
# Build best results json file
|
# Build best results json file
|
||||||
output = {}
|
output = {}
|
||||||
datasets = Datasets()
|
datasets = Datasets()
|
||||||
|
@@ -8,7 +8,14 @@ import subprocess
|
|||||||
import xlsxwriter
|
import xlsxwriter
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from .Experiments import Datasets, BestResults
|
from .Experiments import Datasets, BestResults
|
||||||
from .Utils import Folders, Files, Symbols, BEST_ACCURACY_STREE, TextColor
|
from .Utils import (
|
||||||
|
Folders,
|
||||||
|
Files,
|
||||||
|
Symbols,
|
||||||
|
BEST_ACCURACY_STREE,
|
||||||
|
TextColor,
|
||||||
|
NO_RESULTS,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BaseReport(abc.ABC):
|
class BaseReport(abc.ABC):
|
||||||
@@ -1140,7 +1147,7 @@ class Summary:
|
|||||||
score, model, input_data, sort_key, number
|
score, model, input_data, sort_key, number
|
||||||
)
|
)
|
||||||
if data == []:
|
if data == []:
|
||||||
raise ValueError("** No results found **")
|
raise ValueError(NO_RESULTS)
|
||||||
max_file = max(len(x["file"]) for x in data)
|
max_file = max(len(x["file"]) for x in data)
|
||||||
max_title = max(len(x["title"]) for x in data)
|
max_title = max(len(x["title"]) for x in data)
|
||||||
if self.hidden:
|
if self.hidden:
|
||||||
@@ -1278,7 +1285,7 @@ class Summary:
|
|||||||
else [x for x in haystack if x[criterion] == value]
|
else [x for x in haystack if x[criterion] == value]
|
||||||
)
|
)
|
||||||
if haystack == []:
|
if haystack == []:
|
||||||
raise ValueError("** No results found **")
|
raise ValueError(NO_RESULTS)
|
||||||
return (
|
return (
|
||||||
sorted(
|
sorted(
|
||||||
haystack,
|
haystack,
|
||||||
|
@@ -2,6 +2,7 @@ import os
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
BEST_ACCURACY_STREE = 40.282203
|
BEST_ACCURACY_STREE = 40.282203
|
||||||
|
NO_RESULTS = "** No results found **"
|
||||||
|
|
||||||
|
|
||||||
class Folders:
|
class Folders:
|
||||||
|
@@ -7,14 +7,18 @@ from benchmark.Arguments import Arguments
|
|||||||
def main(args_test=None):
|
def main(args_test=None):
|
||||||
arguments = Arguments()
|
arguments = Arguments()
|
||||||
arguments.xset("score").xset("excel").xset("tex_output")
|
arguments.xset("score").xset("excel").xset("tex_output")
|
||||||
ar = arguments.parse(args_test)
|
args = arguments.parse(args_test)
|
||||||
benchmark = Benchmark(score=ar.score, visualize=True)
|
benchmark = Benchmark(score=args.score, visualize=True)
|
||||||
benchmark.compile_results()
|
try:
|
||||||
benchmark.save_results()
|
benchmark.compile_results()
|
||||||
benchmark.report(ar.tex_output)
|
except ValueError as e:
|
||||||
benchmark.exreport()
|
print(e)
|
||||||
if ar.excel:
|
else:
|
||||||
benchmark.excel()
|
benchmark.save_results()
|
||||||
Files.open(benchmark.get_excel_file_name())
|
benchmark.report(args.tex_output)
|
||||||
if ar.tex_output:
|
benchmark.exreport()
|
||||||
print(f"File {benchmark.get_tex_file()} generated")
|
if args.excel:
|
||||||
|
benchmark.excel()
|
||||||
|
Files.open(benchmark.get_excel_file_name())
|
||||||
|
if args.tex_output:
|
||||||
|
print(f"File {benchmark.get_tex_file()} generated")
|
||||||
|
@@ -75,6 +75,14 @@ class BenchmarkTest(TestBase):
|
|||||||
benchmark.exreport()
|
benchmark.exreport()
|
||||||
self.check_output_file(stdout, "exreport_error")
|
self.check_output_file(stdout, "exreport_error")
|
||||||
|
|
||||||
|
def test_exreport_no_data(self):
|
||||||
|
benchmark = Benchmark("f1-weighted", visualize=False)
|
||||||
|
benchmark.compile_results()
|
||||||
|
benchmark.save_results()
|
||||||
|
with patch(self.output, new=StringIO()) as stdout:
|
||||||
|
benchmark.exreport()
|
||||||
|
self.check_output_file(stdout, "exreport_error")
|
||||||
|
|
||||||
def test_tex_output(self):
|
def test_tex_output(self):
|
||||||
benchmark = Benchmark("accuracy", visualize=False)
|
benchmark = Benchmark("accuracy", visualize=False)
|
||||||
benchmark.compile_results()
|
benchmark.compile_results()
|
||||||
|
@@ -2,6 +2,7 @@ from io import StringIO
|
|||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
from .TestBase import TestBase
|
from .TestBase import TestBase
|
||||||
from ..Results import Summary
|
from ..Results import Summary
|
||||||
|
from ..Utils import NO_RESULTS
|
||||||
|
|
||||||
|
|
||||||
class SummaryTest(TestBase):
|
class SummaryTest(TestBase):
|
||||||
@@ -228,4 +229,4 @@ class SummaryTest(TestBase):
|
|||||||
report.acquire()
|
report.acquire()
|
||||||
with self.assertRaises(ValueError) as msg:
|
with self.assertRaises(ValueError) as msg:
|
||||||
report.list_results(score="f1-macro", model="STree")
|
report.list_results(score="f1-macro", model="STree")
|
||||||
self.assertEqual(str(msg.exception), "** No results found **")
|
self.assertEqual(str(msg.exception), NO_RESULTS)
|
||||||
|
@@ -17,6 +17,7 @@ from .scripts.Be_Report_test import BeReportTest
|
|||||||
from .scripts.Be_Summary_test import BeSummaryTest
|
from .scripts.Be_Summary_test import BeSummaryTest
|
||||||
from .scripts.Be_Grid_test import BeGridTest
|
from .scripts.Be_Grid_test import BeGridTest
|
||||||
from .scripts.Be_Best_test import BeBestTest
|
from .scripts.Be_Best_test import BeBestTest
|
||||||
|
from .scripts.Be_Benchmark_test import BeBenchmarkTest
|
||||||
|
|
||||||
all = [
|
all = [
|
||||||
"UtilTest",
|
"UtilTest",
|
||||||
@@ -38,4 +39,5 @@ all = [
|
|||||||
"BeSummaryTest",
|
"BeSummaryTest",
|
||||||
"BeGridTest",
|
"BeGridTest",
|
||||||
"BeBestTest",
|
"BeBestTest",
|
||||||
|
"BeBenchmarkTest",
|
||||||
]
|
]
|
||||||
|
24
benchmark/tests/scripts/Be_Benchmark_test.py
Normal file
24
benchmark/tests/scripts/Be_Benchmark_test.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
from ...Utils import Folders, Files
|
||||||
|
from ..TestBase import TestBase
|
||||||
|
|
||||||
|
|
||||||
|
class BeBenchmarkTest(TestBase):
|
||||||
|
def setUp(self):
|
||||||
|
self.prepare_scripts_env()
|
||||||
|
|
||||||
|
def tearDown(self) -> None:
|
||||||
|
# self.remove_files(
|
||||||
|
# [Files.best_results("accuracy", "ODTE")],
|
||||||
|
# Folders.results,
|
||||||
|
# )
|
||||||
|
return super().tearDown()
|
||||||
|
|
||||||
|
def test_be_benchmark(self):
|
||||||
|
# stdout, stderr = self.execute_script(
|
||||||
|
# "be_benchmark", ["-s", "accuracy"]
|
||||||
|
# )
|
||||||
|
# self.assertEqual(stderr.getvalue(), "")
|
||||||
|
# self.check_output_file(stdout, "be_best_all")
|
||||||
|
pass
|
Reference in New Issue
Block a user