mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 08:25:53 +00:00
Begin Summary
This commit is contained in:
@@ -10,23 +10,22 @@ class Models:
|
|||||||
def get_model(name):
|
def get_model(name):
|
||||||
if name == "STree":
|
if name == "STree":
|
||||||
return Stree
|
return Stree
|
||||||
elif name == "Cart":
|
if name == "Cart":
|
||||||
return DecisionTreeClassifier
|
return DecisionTreeClassifier
|
||||||
elif name == "ExtraTree":
|
if name == "ExtraTree":
|
||||||
return ExtraTreeClassifier
|
return ExtraTreeClassifier
|
||||||
elif name == "Wodt":
|
if name == "Wodt":
|
||||||
return TreeClassifier
|
return TreeClassifier
|
||||||
elif name == "SVC":
|
if name == "SVC":
|
||||||
return SVC
|
return SVC
|
||||||
elif name == "ODTE":
|
if name == "ODTE":
|
||||||
return Odte
|
return Odte
|
||||||
else:
|
msg = f"No model recognized {name}"
|
||||||
msg = f"No model recognized {name}"
|
if name in ("Stree", "stree"):
|
||||||
if name == "Stree" or name == "stree":
|
msg += ", did you mean STree?"
|
||||||
msg += ", did you mean STree?"
|
elif name in ("odte", "Odte"):
|
||||||
elif name == "odte" or name == "Odte":
|
msg += ", did you mean ODTE?"
|
||||||
msg += ", did you mean ODTE?"
|
raise ValueError(msg)
|
||||||
raise ValueError(msg)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_complexity(name, result):
|
def get_complexity(name, result):
|
||||||
|
@@ -34,7 +34,7 @@ class BaseReport(abc.ABC):
|
|||||||
best = BestResults(score, model, Datasets())
|
best = BestResults(score, model, Datasets())
|
||||||
self.best_results = best.load({})
|
self.best_results = best.load({})
|
||||||
|
|
||||||
def _compute_status(self, dataset, accuracy):
|
def _compute_status(self, dataset, accuracy: float):
|
||||||
best = self.best_results[dataset][0]
|
best = self.best_results[dataset][0]
|
||||||
status = " "
|
status = " "
|
||||||
if accuracy == best:
|
if accuracy == best:
|
||||||
@@ -57,15 +57,15 @@ class BaseReport(abc.ABC):
|
|||||||
return meaning[status]
|
return meaning[status]
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def header(self):
|
def header(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def print_line(self, result):
|
def print_line(self, result) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def footer(self, accuracy):
|
def footer(self, accuracy: float) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@@ -84,18 +84,18 @@ class Report(BaseReport):
|
|||||||
"Hyperparameters",
|
"Hyperparameters",
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, file_name, compare=False):
|
def __init__(self, file_name: str, compare: bool = False):
|
||||||
super().__init__(file_name)
|
super().__init__(file_name)
|
||||||
self.compare = compare
|
self.compare = compare
|
||||||
|
|
||||||
def header_line(self, text):
|
def header_line(self, text: str) -> None:
|
||||||
length = sum(self.header_lengths) + len(self.header_lengths) - 3
|
length = sum(self.header_lengths) + len(self.header_lengths) - 3
|
||||||
if text == "*":
|
if text == "*":
|
||||||
print("*" * (length + 2))
|
print("*" * (length + 2))
|
||||||
else:
|
else:
|
||||||
print(f"*{text:{length}s}*")
|
print(f"*{text:{length}s}*")
|
||||||
|
|
||||||
def print_line(self, result):
|
def print_line(self, result) -> None:
|
||||||
hl = self.header_lengths
|
hl = self.header_lengths
|
||||||
i = 0
|
i = 0
|
||||||
print(f"{result['dataset']:{hl[i]}s} ", end="")
|
print(f"{result['dataset']:{hl[i]}s} ", end="")
|
||||||
@@ -128,7 +128,7 @@ class Report(BaseReport):
|
|||||||
i += 1
|
i += 1
|
||||||
print(f"{str(result['hyperparameters']):{hl[i]}s} ")
|
print(f"{str(result['hyperparameters']):{hl[i]}s} ")
|
||||||
|
|
||||||
def header(self):
|
def header(self) -> None:
|
||||||
if self.compare:
|
if self.compare:
|
||||||
self._load_best_results(
|
self._load_best_results(
|
||||||
self.data["score_name"], self.data["model"]
|
self.data["score_name"], self.data["model"]
|
||||||
@@ -156,7 +156,7 @@ class Report(BaseReport):
|
|||||||
line_col += "=" * underscore + " "
|
line_col += "=" * underscore + " "
|
||||||
print(f"\n{line_col}")
|
print(f"\n{line_col}")
|
||||||
|
|
||||||
def footer(self, accuracy):
|
def footer(self, accuracy: float) -> None:
|
||||||
self.header_line("*")
|
self.header_line("*")
|
||||||
if self.compare:
|
if self.compare:
|
||||||
for key, value in self._compare_totals.items():
|
for key, value in self._compare_totals.items():
|
||||||
|
35
src/Summary.py
Normal file
35
src/Summary.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import os
|
||||||
|
from Utils import Folders, Files
|
||||||
|
from Results import BaseReport
|
||||||
|
|
||||||
|
|
||||||
|
class StubReport(BaseReport):
|
||||||
|
def __init__(self, file_name):
|
||||||
|
super().__init__(file_name=file_name, best_file=False)
|
||||||
|
|
||||||
|
def print_line(self, line) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def header(self) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def footer(self, accuracy: float) -> None:
|
||||||
|
self.accuracy = accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class Summary:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.results = Files().get_all_results()
|
||||||
|
|
||||||
|
def list(self) -> None:
|
||||||
|
"""List all results"""
|
||||||
|
max_length = max([len(x) for x in self.results])
|
||||||
|
for result in self.results:
|
||||||
|
report = StubReport(os.path.join(Folders.results, result))
|
||||||
|
report.report()
|
||||||
|
print(f"{result:{max_length}s} {report.accuracy:7.3f}")
|
||||||
|
print("\n".join(self.results))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
Summary().list()
|
30
src/Utils.py
30
src/Utils.py
@@ -13,7 +13,7 @@ class Folders:
|
|||||||
|
|
||||||
class Files:
|
class Files:
|
||||||
index = "all.txt"
|
index = "all.txt"
|
||||||
|
report_ext = ".json"
|
||||||
cmd_open_macos = "/usr/bin/open"
|
cmd_open_macos = "/usr/bin/open"
|
||||||
cmd_open_linux = "/usr/bin/xdg-open"
|
cmd_open_linux = "/usr/bin/xdg-open"
|
||||||
exreport_pdf = "Rplots.pdf"
|
exreport_pdf = "Rplots.pdf"
|
||||||
@@ -47,15 +47,13 @@ class Files:
|
|||||||
f"{stratified}.json"
|
f"{stratified}.json"
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
def results_suffixes(self, score="", model=""):
|
||||||
def results_suffixes(score="", model=""):
|
suffix = self.report_ext
|
||||||
suffix = ".json"
|
|
||||||
if model == "" and score == "":
|
if model == "" and score == "":
|
||||||
return "results_", suffix
|
return "results_", suffix
|
||||||
elif model == "":
|
if model == "":
|
||||||
return f"results_{score}_", suffix
|
return f"results_{score}_", suffix
|
||||||
else:
|
return f"results_{score}_{model}_", suffix
|
||||||
return f"results_{score}_{model}_", suffix
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def dataset(name):
|
def dataset(name):
|
||||||
@@ -75,6 +73,24 @@ class Files:
|
|||||||
)
|
)
|
||||||
subprocess.run([command, name])
|
subprocess.run([command, name])
|
||||||
|
|
||||||
|
def get_all_results(self) -> list[str]:
|
||||||
|
first_path = "."
|
||||||
|
first_try = os.path.join(first_path, Folders.results)
|
||||||
|
second_path = ".."
|
||||||
|
second_try = os.path.join(second_path, first_try)
|
||||||
|
if os.path.isdir(first_try):
|
||||||
|
files_list = os.listdir(first_try)
|
||||||
|
elif os.path.isdir(second_try):
|
||||||
|
files_list = os.listdir(second_try)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"{first_try} or {second_try} does not exist")
|
||||||
|
result = []
|
||||||
|
prefix, suffix = self.results_suffixes()
|
||||||
|
for result_file in files_list:
|
||||||
|
if result_file.startswith(prefix) and result_file.endswith(suffix):
|
||||||
|
result.append(result_file)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
class Symbols:
|
class Symbols:
|
||||||
check_mark = "\N{heavy check mark}"
|
check_mark = "\N{heavy check mark}"
|
||||||
|
Reference in New Issue
Block a user