mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 00:15:55 +00:00
Begin refactor main scripts
This commit is contained in:
@@ -1159,3 +1159,56 @@ class Summary:
|
|||||||
input_data=self.best_results(score=score, n=n),
|
input_data=self.best_results(score=score, n=n),
|
||||||
sort_key="metric",
|
sort_key="metric",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PairCheck:
|
||||||
|
def __init__(self, score, model_a, model_b, winners=False, loosers=False):
|
||||||
|
self.score = score
|
||||||
|
self.model_a = model_a
|
||||||
|
self.model_b = model_b
|
||||||
|
self.winners = winners
|
||||||
|
self.loosers = loosers
|
||||||
|
self.winners_data = []
|
||||||
|
self.loosers_data = []
|
||||||
|
self.tie_data = []
|
||||||
|
|
||||||
|
def compute(self):
|
||||||
|
summary = Summary()
|
||||||
|
summary.acquire()
|
||||||
|
best_a = summary.best_result(
|
||||||
|
criterion="model", value=self.model_a, score=self.score
|
||||||
|
)
|
||||||
|
best_b = summary.best_result(
|
||||||
|
criterion="model", value=self.model_b, score=self.score
|
||||||
|
)
|
||||||
|
report_a = StubReport(os.path.join(Folders.results, best_a["file"]))
|
||||||
|
report_a.report()
|
||||||
|
report_b = StubReport(os.path.join(Folders.results, best_b["file"]))
|
||||||
|
report_b.report()
|
||||||
|
for result_a, result_b in zip(report_a.lines, report_b.lines):
|
||||||
|
result = result_a["score"] - result_b["score"]
|
||||||
|
if result > 0:
|
||||||
|
self.winners_data.append(result_a["dataset"])
|
||||||
|
elif result < 0:
|
||||||
|
self.loosers_data.append(result_a["dataset"])
|
||||||
|
else:
|
||||||
|
self.tie_data.append(result_a["dataset"])
|
||||||
|
|
||||||
|
def print(self):
|
||||||
|
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
||||||
|
print("=" * 20 + " " + "=" * 70 + " " + "=" * 10 + " === === =====")
|
||||||
|
print(
|
||||||
|
f"{self.model_a:<20} {self.best_a['file']:<70} {report_1.score:10.5f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"{model2:<20} {best_2['file']:<70} "
|
||||||
|
f"{report_2.score:10.5f} "
|
||||||
|
f"{TextColor.GREEN}{win:3d} {TextColor.YELLOW}{tie:3d} "
|
||||||
|
f"{TextColor.RED}{loose:5d}"
|
||||||
|
)
|
||||||
|
if win_results:
|
||||||
|
print(TextColor.GREEN + "Winners:")
|
||||||
|
print(winners)
|
||||||
|
if loose_results:
|
||||||
|
print(TextColor.RED + "Loosers:")
|
||||||
|
print(loosers)
|
||||||
|
2
src/list
2
src/list
@@ -64,8 +64,8 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
(excel, score, model, key, number, hidden) = parse_arguments()
|
(excel, score, model, key, number, hidden) = parse_arguments()
|
||||||
|
|
||||||
data = Summary(hidden=hidden)
|
data = Summary(hidden=hidden)
|
||||||
data.acquire()
|
data.acquire()
|
||||||
data.list_results(score=score, model=model, sort_key=key, number=number)
|
data.list_results(score=score, model=model, sort_key=key, number=number)
|
||||||
|
1
src/main
1
src/main
@@ -117,6 +117,7 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
(
|
(
|
||||||
stratified,
|
stratified,
|
||||||
score,
|
score,
|
||||||
|
@@ -59,6 +59,7 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
(
|
(
|
||||||
score,
|
score,
|
||||||
model1,
|
model1,
|
||||||
@@ -66,7 +67,6 @@ def parse_arguments():
|
|||||||
win_results,
|
win_results,
|
||||||
loose_results,
|
loose_results,
|
||||||
) = parse_arguments()
|
) = parse_arguments()
|
||||||
|
|
||||||
summary = Summary()
|
summary = Summary()
|
||||||
summary.acquire()
|
summary.acquire()
|
||||||
win = tie = loose = 0
|
win = tie = loose = 0
|
||||||
|
@@ -114,6 +114,7 @@ def default_report():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
(file, excel, sql, compare, best, grid, score, model) = parse_arguments()
|
(file, excel, sql, compare, best, grid, score, model) = parse_arguments()
|
||||||
if grid:
|
if grid:
|
||||||
best = False
|
best = False
|
||||||
|
@@ -40,22 +40,21 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
(
|
(
|
||||||
score,
|
score,
|
||||||
model,
|
model,
|
||||||
list_results,
|
list_results,
|
||||||
) = parse_arguments()
|
) = parse_arguments()
|
||||||
|
|
||||||
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
||||||
|
|
||||||
metrics = all_metrics if score == "all" else [score]
|
metrics = all_metrics if score == "all" else [score]
|
||||||
|
|
||||||
summary = Summary()
|
summary = Summary()
|
||||||
summary.acquire()
|
summary.acquire()
|
||||||
|
|
||||||
for metric in metrics:
|
for metric in metrics:
|
||||||
title = f"BEST RESULT of {metric} for {model}"
|
title = f"BEST RESULT of {metric} for {model}"
|
||||||
best = summary.best_result(criterion="model", value=model, score=metric)
|
best = summary.best_result(
|
||||||
|
criterion="model", value=model, score=metric
|
||||||
|
)
|
||||||
summary.show_result(data=best, title=title)
|
summary.show_result(data=best, title=title)
|
||||||
summary.show_result(
|
summary.show_result(
|
||||||
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
||||||
|
Reference in New Issue
Block a user