mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-15 23:45:54 +00:00
Add show result
This commit is contained in:
@@ -684,6 +684,38 @@ class Summary:
|
||||
)
|
||||
)
|
||||
|
||||
def show_result(self, file_name: str, metric: float = 0.0) -> None:
|
||||
def whites(n: int) -> str:
|
||||
return " " * n + "*"
|
||||
|
||||
result = StubReport(os.path.join(Folders.results, file_name))
|
||||
length = 80
|
||||
print("*" * length)
|
||||
print("*" + whites(length - 2))
|
||||
print(f"* {file_name:60s}" + whites(length - 63))
|
||||
print("*" + whites(length - 2))
|
||||
print(
|
||||
f"* Date : {result.data['date']:15s} Time: "
|
||||
f"{result.data['time']:18s} Time Spent: "
|
||||
f"{result.data['duration']:9,.2f} secs." + whites(length - 78)
|
||||
)
|
||||
print(
|
||||
f"* Model: {result.data['model']:15s} Score: "
|
||||
f"{result.data['score_name']:15s} "
|
||||
f" Stratified: {str(result.data['stratified']):15s}"
|
||||
+ whites(length - 78)
|
||||
)
|
||||
seeds = str(result.data["seeds"])
|
||||
seeds_len = len(seeds)
|
||||
print(
|
||||
f"* Seeds: {seeds:{seeds_len}s} Platform: "
|
||||
f"{result.data['platform']:17s} " + whites(length - 79)
|
||||
)
|
||||
if metric != 0.0:
|
||||
print(f"* Metric: {metric:10.7f}" + whites(length - 21))
|
||||
print("*" + whites(length - 2))
|
||||
print("*" * length)
|
||||
|
||||
def best_result(
|
||||
self, criterion=None, value=None, score="accuracy"
|
||||
) -> dict:
|
||||
@@ -694,4 +726,8 @@ class Summary:
|
||||
if criterion is None or value is None
|
||||
else [x for x in haystack if x[criterion] == value]
|
||||
)
|
||||
return sorted(haystack, key=lambda x: x["metric"], reverse=True)[0]
|
||||
return (
|
||||
sorted(haystack, key=lambda x: x["metric"], reverse=True)[0]
|
||||
if len(haystack) > 0
|
||||
else {}
|
||||
)
|
||||
|
@@ -2,6 +2,14 @@ from Results import Summary
|
||||
|
||||
summary = Summary()
|
||||
summary.acquire()
|
||||
print(summary.best_result())
|
||||
print(summary.best_result(criterion="model", value="ODTE"))
|
||||
summary.list()
|
||||
|
||||
for metric in ["accuracy", "f1_macro", "f1_micro"]:
|
||||
for model in ["STree", "ODTE"]:
|
||||
best = summary.best_result(
|
||||
criterion="model", value=model, score=metric
|
||||
)
|
||||
summary.show_result(
|
||||
best["file"], best["metric"]
|
||||
) if best != {} else print(
|
||||
"No best result for {} {}".format(model, metric)
|
||||
)
|
||||
|
Reference in New Issue
Block a user