mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 00:15:55 +00:00
43 lines
992 B
Python
43 lines
992 B
Python
import argparse
|
|
import json
|
|
from Results import Summary
|
|
from Utils import EnvDefault
|
|
|
|
|
|
def parse_arguments():
|
|
ap = argparse.ArgumentParser()
|
|
ap.add_argument(
|
|
"-s",
|
|
"--score",
|
|
type=str,
|
|
action=EnvDefault,
|
|
envvar="score",
|
|
required=True,
|
|
help="score name {accuracy, f1_micro, f1_macro, all}",
|
|
)
|
|
args = ap.parse_args()
|
|
return (args.score,)
|
|
|
|
|
|
(score,) = parse_arguments()
|
|
|
|
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
|
|
|
metrics = all_metrics if score == "all" else [score]
|
|
|
|
summary = Summary()
|
|
summary.acquire()
|
|
|
|
nl = 50
|
|
num = 100
|
|
for metric in metrics:
|
|
title = f"BEST RESULTS of {metric} for datasets"
|
|
best = summary.best_results_datasets(score=metric)
|
|
for key, item in best.items():
|
|
print(f"{key:30s} {item[2]:{nl}s}")
|
|
print("-" * num)
|
|
print(f"{item[0]:30.7f} {json.dumps(item[1]):{nl}s}")
|
|
print("-" * num)
|
|
print(f"{item[3]:{nl+num}s}")
|
|
print("*" * num)
|