diff --git a/src/Results.py b/src/Results.py index caf4e12..1f8bba2 100644 --- a/src/Results.py +++ b/src/Results.py @@ -659,6 +659,7 @@ class Summary: def __init__(self) -> None: self.results = Files().get_all_results() self.data = [] + self.datasets = {} def acquire(self) -> None: """Get all results""" @@ -684,6 +685,7 @@ class Summary: file=result, metric=report.accuracy / BEST_ACCURACY_STREE, ) + self.datasets[result] = report.lines self.data.append(entry) def list(self) -> None: @@ -768,10 +770,9 @@ class Summary: haystack = [x for x in self.data if x["score"] == score] # Search for the best results for each dataset for entry in haystack: - print(entry) - for dataset in entry["results"]: - if dataset["score"] < best_results[dataset][0]: - best_results[dataset] = ( + for dataset in self.datasets[entry["file"]]: + if dataset["score"] < best_results[dataset["dataset"]][0]: + best_results[dataset["dataset"]] = ( dataset["score"], dataset["hyperparameters"], entry["file"], diff --git a/src/best.py b/src/best.py index 7197a57..f61f87c 100644 --- a/src/best.py +++ b/src/best.py @@ -1,4 +1,5 @@ import argparse +import json from Results import Summary from Utils import EnvDefault @@ -27,8 +28,15 @@ metrics = all_metrics if score == "all" else [score] summary = Summary() summary.acquire() +nl = 50 +num = 100 for metric in metrics: title = f"BEST RESULTS of {metric} for datasets" best = summary.best_results_datasets(score=metric) for key, item in best.items(): - print(f"{key}: {item}") + print(f"{key:30s} {item[2]:{nl}s}") + print("-" * num) + print(f"{item[0]:30.7f} {json.dumps(item[1]):{nl}s}") + print("-" * num) + print(f"{item[3]:{nl+num}s}") + print("*" * num)