mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-16 07:55:54 +00:00
Begin refactor be_list arguments
This commit is contained in:
@@ -1352,13 +1352,14 @@ class StubReport(BaseReport):
|
||||
|
||||
|
||||
class Summary:
|
||||
def __init__(self, hidden=False) -> None:
|
||||
def __init__(self, hidden=False, compare=False) -> None:
|
||||
self.results = Files().get_all_results(hidden=hidden)
|
||||
self.data = []
|
||||
self.data_filtered = []
|
||||
self.datasets = {}
|
||||
self.models = set()
|
||||
self.hidden = hidden
|
||||
self.compare = compare
|
||||
|
||||
def get_models(self):
|
||||
return sorted(self.models)
|
||||
@@ -1498,17 +1499,24 @@ class Summary:
|
||||
return True
|
||||
return False
|
||||
if num.isdigit() and int(num) < len(self.data) and int(num) >= 0:
|
||||
rep = Report(self.data_filtered[int(num)]["file"], self.hidden)
|
||||
path = (
|
||||
Folders.hidden_results if self.hidden else Folders.results
|
||||
)
|
||||
file_name_result = os.path.join(
|
||||
path, self.data_filtered[int(num)]["file"]
|
||||
)
|
||||
rep = Report(file_name_result, compare=self.compare)
|
||||
rep.report()
|
||||
if excel and not self.hidden:
|
||||
if excel:
|
||||
if book is None:
|
||||
file_name = Files.be_list_excel
|
||||
book = xlsxwriter.Workbook(
|
||||
file_name, {"nan_inf_to_errors": True}
|
||||
)
|
||||
excel = Excel(
|
||||
file_name=self.data_filtered[int(num)]["file"],
|
||||
file_name=file_name_result,
|
||||
book=book,
|
||||
compare=self.compare,
|
||||
)
|
||||
excel.report()
|
||||
else:
|
||||
|
@@ -113,7 +113,8 @@ class Files:
|
||||
if os.path.isdir(result_path):
|
||||
files_list = os.listdir(result_path)
|
||||
else:
|
||||
raise ValueError(f"{result_path} does not exist")
|
||||
os.makedirs(result_path)
|
||||
files_list = []
|
||||
result = []
|
||||
prefix, suffix = self.results_suffixes()
|
||||
for result_file in files_list:
|
||||
|
@@ -5,7 +5,7 @@ from benchmark.Arguments import Arguments
|
||||
|
||||
|
||||
def main(args_test=None):
|
||||
arguments = Arguments()
|
||||
arguments = Arguments(prog="be_benchmark")
|
||||
arguments.xset("score").xset("excel").xset("tex_output").xset("quiet")
|
||||
args = arguments.parse(args_test)
|
||||
benchmark = Benchmark(score=args.score, visualize=not args.quiet)
|
||||
|
@@ -9,12 +9,12 @@ from benchmark.Arguments import Arguments
|
||||
|
||||
|
||||
def main(args_test=None):
|
||||
arguments = Arguments()
|
||||
arguments = Arguments(prog="be_list")
|
||||
arguments.xset("number").xset("model", required=False).xset("key")
|
||||
arguments.xset("hidden").xset("nan").xset("score", required=False)
|
||||
arguments.xset("excel")
|
||||
arguments.add_exclusive(["hidden", "nan"])
|
||||
arguments.xset("score", required=False).xset("compare").xset("excel")
|
||||
args = arguments.parse(args_test)
|
||||
data = Summary(hidden=args.hidden)
|
||||
data = Summary(hidden=args.hidden, compare=args.compare)
|
||||
data.acquire()
|
||||
try:
|
||||
data.list_results(
|
||||
@@ -24,11 +24,10 @@ def main(args_test=None):
|
||||
number=args.number,
|
||||
)
|
||||
is_test = args_test is not None
|
||||
if not args.nan:
|
||||
excel_generated = data.manage_results(args.excel, is_test)
|
||||
if args.excel and excel_generated:
|
||||
print(f"Generated file: {Files.be_list_excel}")
|
||||
Files.open(Files.be_list_excel, is_test)
|
||||
excel_generated = data.manage_results(args.excel, is_test)
|
||||
if args.excel and excel_generated:
|
||||
print(f"Generated file: {Files.be_list_excel}")
|
||||
Files.open(Files.be_list_excel, is_test)
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
return
|
||||
|
Reference in New Issue
Block a user