mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-15 23:45:54 +00:00
refactor pair_check to new class
This commit is contained in:
@@ -1166,11 +1166,11 @@ class PairCheck:
|
||||
self.score = score
|
||||
self.model_a = model_a
|
||||
self.model_b = model_b
|
||||
self.winners = winners
|
||||
self.loosers = loosers
|
||||
self.winners_data = []
|
||||
self.loosers_data = []
|
||||
self.tie_data = []
|
||||
self.show_winners = winners
|
||||
self.show_loosers = loosers
|
||||
self.winners = []
|
||||
self.loosers = []
|
||||
self.tie = []
|
||||
|
||||
def compute(self):
|
||||
summary = Summary()
|
||||
@@ -1178,37 +1178,39 @@ class PairCheck:
|
||||
best_a = summary.best_result(
|
||||
criterion="model", value=self.model_a, score=self.score
|
||||
)
|
||||
self.file_a = best_a["file"]
|
||||
best_b = summary.best_result(
|
||||
criterion="model", value=self.model_b, score=self.score
|
||||
)
|
||||
self.file_b = best_b["file"]
|
||||
report_a = StubReport(os.path.join(Folders.results, best_a["file"]))
|
||||
report_a.report()
|
||||
self.score_a = report_a.score
|
||||
report_b = StubReport(os.path.join(Folders.results, best_b["file"]))
|
||||
report_b.report()
|
||||
self.score_b = report_b.score
|
||||
for result_a, result_b in zip(report_a.lines, report_b.lines):
|
||||
result = result_a["score"] - result_b["score"]
|
||||
if result > 0:
|
||||
self.winners_data.append(result_a["dataset"])
|
||||
self.winners.append(result_a["dataset"])
|
||||
elif result < 0:
|
||||
self.loosers_data.append(result_a["dataset"])
|
||||
self.loosers.append(result_a["dataset"])
|
||||
else:
|
||||
self.tie_data.append(result_a["dataset"])
|
||||
self.tie.append(result_a["dataset"])
|
||||
|
||||
def print(self):
|
||||
def report(self):
|
||||
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
||||
print("=" * 20 + " " + "=" * 70 + " " + "=" * 10 + " === === =====")
|
||||
print(f"{self.model_a:<20} {self.file_a:<70} {self.score_a:10.5f}")
|
||||
print(
|
||||
f"{self.model_a:<20} {self.best_a['file']:<70} {report_1.score:10.5f}"
|
||||
f"{self.model_b:<20} {self.file_b:<70} "
|
||||
f"{self.score_b:10.5f} "
|
||||
f"{TextColor.GREEN}{len(self.winners):3d} {TextColor.YELLOW}{len(self.tie):3d} "
|
||||
f"{TextColor.RED}{len(self.loosers):5d}"
|
||||
)
|
||||
print(
|
||||
f"{model2:<20} {best_2['file']:<70} "
|
||||
f"{report_2.score:10.5f} "
|
||||
f"{TextColor.GREEN}{win:3d} {TextColor.YELLOW}{tie:3d} "
|
||||
f"{TextColor.RED}{loose:5d}"
|
||||
)
|
||||
if win_results:
|
||||
if self.show_winners:
|
||||
print(TextColor.GREEN + "Winners:")
|
||||
print(winners)
|
||||
if loose_results:
|
||||
print(self.winners)
|
||||
if self.show_loosers:
|
||||
print(TextColor.RED + "Loosers:")
|
||||
print(loosers)
|
||||
print(self.loosers)
|
||||
|
@@ -1,8 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
import argparse
|
||||
import os
|
||||
from Results import Summary, StubReport
|
||||
from Utils import EnvDefault, Folders, TextColor
|
||||
from Results import PairCheck
|
||||
from Utils import EnvDefault
|
||||
|
||||
"""Check best results of two models giving scores and win-tie-loose results
|
||||
"""
|
||||
@@ -67,39 +66,6 @@ if __name__ == "__main__":
|
||||
win_results,
|
||||
loose_results,
|
||||
) = parse_arguments()
|
||||
summary = Summary()
|
||||
summary.acquire()
|
||||
win = tie = loose = 0
|
||||
winners = []
|
||||
loosers = []
|
||||
best_1 = summary.best_result(criterion="model", value=model1, score=score)
|
||||
best_2 = summary.best_result(criterion="model", value=model2, score=score)
|
||||
report_1 = StubReport(os.path.join(Folders.results, best_1["file"]))
|
||||
report_1.report()
|
||||
report_2 = StubReport(os.path.join(Folders.results, best_2["file"]))
|
||||
report_2.report()
|
||||
for result1, result2 in zip(report_1.lines, report_2.lines):
|
||||
result = result1["score"] - result2["score"]
|
||||
if result > 0:
|
||||
win += 1
|
||||
winners.append(result1["dataset"])
|
||||
elif result < 0:
|
||||
loose += 1
|
||||
loosers.append(result1["dataset"])
|
||||
else:
|
||||
tie += 1
|
||||
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
||||
print("=" * 20 + " " + "=" * 70 + " " + "=" * 10 + " === === =====")
|
||||
print(f"{model1:<20} {best_1['file']:<70} {report_1.score:10.5f}")
|
||||
print(
|
||||
f"{model2:<20} {best_2['file']:<70} "
|
||||
f"{report_2.score:10.5f} "
|
||||
f"{TextColor.GREEN}{win:3d} {TextColor.YELLOW}{tie:3d} "
|
||||
f"{TextColor.RED}{loose:5d}"
|
||||
)
|
||||
if win_results:
|
||||
print(TextColor.GREEN + "Winners:")
|
||||
print(winners)
|
||||
if loose_results:
|
||||
print(TextColor.RED + "Loosers:")
|
||||
print(loosers)
|
||||
pair_check = PairCheck(score, model1, model2, win_results, loose_results)
|
||||
pair_check.compute()
|
||||
pair_check.report()
|
||||
|
Reference in New Issue
Block a user