mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 16:35:54 +00:00
37 lines
805 B
Python
Executable File
37 lines
805 B
Python
Executable File
#!/usr/bin/env python
|
|
from Results import Benchmark
|
|
from Utils import Files, EnvDefault
|
|
import argparse
|
|
|
|
|
|
def parse_arguments():
|
|
ap = argparse.ArgumentParser()
|
|
ap.add_argument(
|
|
"-s",
|
|
"--score",
|
|
action=EnvDefault,
|
|
envvar="score",
|
|
type=str,
|
|
required=True,
|
|
help="score name {accuracy, f1_macro, ...}",
|
|
)
|
|
ap.add_argument(
|
|
"-x",
|
|
"--excel",
|
|
type=bool,
|
|
required=False,
|
|
help="Generate Excel File",
|
|
)
|
|
args = ap.parse_args()
|
|
return (args.score, args.excel)
|
|
|
|
|
|
(score, excel) = parse_arguments()
|
|
benchmark = Benchmark()
|
|
benchmark.compile_results(score)
|
|
benchmark.report(score)
|
|
benchmark.exreport(score)
|
|
if excel:
|
|
benchmark.excel(score)
|
|
Files.open(benchmark.get_excel_file_name(score))
|