mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-15 23:45:54 +00:00
Begin best results per dataset report
This commit is contained in:
@@ -758,3 +758,23 @@ class Summary:
|
||||
if len(haystack) > 0
|
||||
else {}
|
||||
)
|
||||
|
||||
def best_results_datasets(self, score="accuracy") -> dict:
|
||||
"""Get the best results for each dataset"""
|
||||
dt = Datasets()
|
||||
best_results = {}
|
||||
for dataset in dt:
|
||||
best_results[dataset] = (1, "", "", "")
|
||||
haystack = [x for x in self.data if x["score"] == score]
|
||||
# Search for the best results for each dataset
|
||||
for entry in haystack:
|
||||
print(entry)
|
||||
for dataset in entry["results"]:
|
||||
if dataset["score"] < best_results[dataset][0]:
|
||||
best_results[dataset] = (
|
||||
dataset["score"],
|
||||
dataset["hyperparameters"],
|
||||
entry["file"],
|
||||
entry["title"],
|
||||
)
|
||||
return best_results
|
||||
|
34
src/best.py
Normal file
34
src/best.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import argparse
|
||||
from Results import Summary
|
||||
from Utils import EnvDefault
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument(
|
||||
"-s",
|
||||
"--score",
|
||||
type=str,
|
||||
action=EnvDefault,
|
||||
envvar="score",
|
||||
required=True,
|
||||
help="score name {accuracy, f1_micro, f1_macro, all}",
|
||||
)
|
||||
args = ap.parse_args()
|
||||
return (args.score,)
|
||||
|
||||
|
||||
(score,) = parse_arguments()
|
||||
|
||||
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
||||
|
||||
metrics = all_metrics if score == "all" else [score]
|
||||
|
||||
summary = Summary()
|
||||
summary.acquire()
|
||||
|
||||
for metric in metrics:
|
||||
title = f"BEST RESULTS of {metric} for datasets"
|
||||
best = summary.best_results_datasets(score=metric)
|
||||
for key, item in best.items():
|
||||
print(f"{key}: {item}")
|
Reference in New Issue
Block a user