mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 00:15:55 +00:00
Put Sumary class in Results
Add criterion summary and add different scores
This commit is contained in:
@@ -6,7 +6,7 @@ import subprocess
|
|||||||
import xlsxwriter
|
import xlsxwriter
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from Experiments import Datasets, BestResults
|
from Experiments import Datasets, BestResults
|
||||||
from Utils import Folders, Files, Symbols
|
from Utils import Folders, Files, Symbols, BEST_ACCURACY_STREE
|
||||||
|
|
||||||
|
|
||||||
class BaseReport(abc.ABC):
|
class BaseReport(abc.ABC):
|
||||||
@@ -165,7 +165,7 @@ class Report(BaseReport):
|
|||||||
)
|
)
|
||||||
self.header_line(
|
self.header_line(
|
||||||
f" Accuracy compared to stree_default (liblinear-ovr) .: "
|
f" Accuracy compared to stree_default (liblinear-ovr) .: "
|
||||||
f"{accuracy/40.282203:7.4f}"
|
f"{accuracy/BEST_ACCURACY_STREE:7.4f}"
|
||||||
)
|
)
|
||||||
self.header_line("*")
|
self.header_line("*")
|
||||||
|
|
||||||
@@ -231,7 +231,7 @@ class ReportBest(BaseReport):
|
|||||||
)
|
)
|
||||||
self.header_line(
|
self.header_line(
|
||||||
f" Scores compared to stree_default accuracy (liblinear-ovr) .: "
|
f" Scores compared to stree_default accuracy (liblinear-ovr) .: "
|
||||||
f"{accuracy/40.282203:7.4f}"
|
f"{accuracy/BEST_ACCURACY_STREE:7.4f}"
|
||||||
)
|
)
|
||||||
self.header_line("*")
|
self.header_line("*")
|
||||||
|
|
||||||
@@ -354,7 +354,7 @@ class Excel(BaseReport):
|
|||||||
self.row += 1
|
self.row += 1
|
||||||
message = (
|
message = (
|
||||||
f"** Accuracy compared to stree_default (liblinear-ovr) .: "
|
f"** Accuracy compared to stree_default (liblinear-ovr) .: "
|
||||||
f"{accuracy/40.282203:7.4f}"
|
f"{accuracy/BEST_ACCURACY_STREE:7.4f}"
|
||||||
)
|
)
|
||||||
bold = self.book.add_format({"bold": True, "font_size": 14})
|
bold = self.book.add_format({"bold": True, "font_size": 14})
|
||||||
self.sheet.write(self.row + 1, 0, message, bold)
|
self.sheet.write(self.row + 1, 0, message, bold)
|
||||||
@@ -626,3 +626,72 @@ class Benchmark:
|
|||||||
footer()
|
footer()
|
||||||
|
|
||||||
book.close()
|
book.close()
|
||||||
|
|
||||||
|
|
||||||
|
class StubReport(BaseReport):
|
||||||
|
def __init__(self, file_name):
|
||||||
|
super().__init__(file_name=file_name, best_file=False)
|
||||||
|
|
||||||
|
def print_line(self, line) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def header(self) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def footer(self, accuracy: float) -> None:
|
||||||
|
self.accuracy = accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class Summary:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.results = Files().get_all_results()
|
||||||
|
self.data = []
|
||||||
|
|
||||||
|
def acquire(self) -> None:
|
||||||
|
"""Get all results"""
|
||||||
|
for result in self.results:
|
||||||
|
(
|
||||||
|
score,
|
||||||
|
model,
|
||||||
|
platform,
|
||||||
|
date,
|
||||||
|
time,
|
||||||
|
stratified,
|
||||||
|
) = Files().split_file_name(result)
|
||||||
|
report = StubReport(os.path.join(Folders.results, result))
|
||||||
|
report.report()
|
||||||
|
entry = dict(
|
||||||
|
score=score,
|
||||||
|
model=model,
|
||||||
|
platform=platform,
|
||||||
|
date=date,
|
||||||
|
time=time,
|
||||||
|
stratified=stratified,
|
||||||
|
file=result,
|
||||||
|
metric=report.accuracy / BEST_ACCURACY_STREE,
|
||||||
|
)
|
||||||
|
self.data.append(entry)
|
||||||
|
|
||||||
|
def list(self) -> None:
|
||||||
|
"""Print the list of results"""
|
||||||
|
max_length = max(len(x["file"]) for x in self.data)
|
||||||
|
print(
|
||||||
|
"\n".join(
|
||||||
|
[
|
||||||
|
f"{x['file']:{max_length}s} {x['metric']:7.3f}"
|
||||||
|
for x in self.data
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def best_result(
|
||||||
|
self, criterion=None, value=None, score="accuracy"
|
||||||
|
) -> dict:
|
||||||
|
# First filter the same score results (accuracy, f1, ...)
|
||||||
|
haystack = [x for x in self.data if x["score"] == score]
|
||||||
|
haystack = (
|
||||||
|
haystack
|
||||||
|
if criterion is None or value is None
|
||||||
|
else [x for x in haystack if x[criterion] == value]
|
||||||
|
)
|
||||||
|
return sorted(haystack, key=lambda x: x["metric"], reverse=True)[0]
|
||||||
|
@@ -1,35 +1,7 @@
|
|||||||
import os
|
from Results import Summary
|
||||||
from Utils import Folders, Files
|
|
||||||
from Results import BaseReport
|
|
||||||
|
|
||||||
|
summary = Summary()
|
||||||
class StubReport(BaseReport):
|
summary.acquire()
|
||||||
def __init__(self, file_name):
|
print(summary.best_result())
|
||||||
super().__init__(file_name=file_name, best_file=False)
|
print(summary.best_result(criterion="model", value="ODTE"))
|
||||||
|
summary.list()
|
||||||
def print_line(self, line) -> None:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def header(self) -> None:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def footer(self, accuracy: float) -> None:
|
|
||||||
self.accuracy = accuracy
|
|
||||||
|
|
||||||
|
|
||||||
class Summary:
|
|
||||||
def __init__(self) -> None:
|
|
||||||
self.results = Files().get_all_results()
|
|
||||||
|
|
||||||
def list(self) -> None:
|
|
||||||
"""List all results"""
|
|
||||||
max_length = max([len(x) for x in self.results])
|
|
||||||
for result in self.results:
|
|
||||||
report = StubReport(os.path.join(Folders.results, result))
|
|
||||||
report.report()
|
|
||||||
print(f"{result:{max_length}s} {report.accuracy:7.3f}")
|
|
||||||
print("\n".join(self.results))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
Summary().list()
|
|
||||||
|
@@ -2,6 +2,8 @@ import os
|
|||||||
import subprocess
|
import subprocess
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
|
BEST_ACCURACY_STREE = 40.282203
|
||||||
|
|
||||||
|
|
||||||
class Folders:
|
class Folders:
|
||||||
data = "data"
|
data = "data"
|
||||||
@@ -47,6 +49,11 @@ class Files:
|
|||||||
f"{stratified}.json"
|
f"{stratified}.json"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def split_file_name(self, name):
|
||||||
|
_, score, model, platform, date, time, stratified = name.split("_")
|
||||||
|
stratified = stratified.replace(self.report_ext, "")
|
||||||
|
return score, model, platform, date, time, stratified
|
||||||
|
|
||||||
def results_suffixes(self, score="", model=""):
|
def results_suffixes(self, score="", model=""):
|
||||||
suffix = self.report_ext
|
suffix = self.report_ext
|
||||||
if model == "" and score == "":
|
if model == "" and score == "":
|
||||||
|
Reference in New Issue
Block a user