diff --git a/src/Results.py b/src/Results.py index ab8637d..caf4e12 100644 --- a/src/Results.py +++ b/src/Results.py @@ -758,3 +758,23 @@ class Summary: if len(haystack) > 0 else {} ) + + def best_results_datasets(self, score="accuracy") -> dict: + """Get the best results for each dataset""" + dt = Datasets() + best_results = {} + for dataset in dt: + best_results[dataset] = (1, "", "", "") + haystack = [x for x in self.data if x["score"] == score] + # Search for the best results for each dataset + for entry in haystack: + print(entry) + for dataset in entry["results"]: + if dataset["score"] < best_results[dataset][0]: + best_results[dataset] = ( + dataset["score"], + dataset["hyperparameters"], + entry["file"], + entry["title"], + ) + return best_results diff --git a/src/best.py b/src/best.py new file mode 100644 index 0000000..7197a57 --- /dev/null +++ b/src/best.py @@ -0,0 +1,34 @@ +import argparse +from Results import Summary +from Utils import EnvDefault + + +def parse_arguments(): + ap = argparse.ArgumentParser() + ap.add_argument( + "-s", + "--score", + type=str, + action=EnvDefault, + envvar="score", + required=True, + help="score name {accuracy, f1_micro, f1_macro, all}", + ) + args = ap.parse_args() + return (args.score,) + + +(score,) = parse_arguments() + +all_metrics = ["accuracy", "f1-macro", "f1-micro"] + +metrics = all_metrics if score == "all" else [score] + +summary = Summary() +summary.acquire() + +for metric in metrics: + title = f"BEST RESULTS of {metric} for datasets" + best = summary.best_results_datasets(score=metric) + for key, item in best.items(): + print(f"{key}: {item}")