mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 00:15:55 +00:00
Add dataset names to pair_check output
This commit is contained in:
@@ -2,7 +2,7 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
from Results import Summary, StubReport
|
from Results import Summary, StubReport
|
||||||
from Utils import EnvDefault, Folders
|
from Utils import EnvDefault, Folders, TextColor
|
||||||
|
|
||||||
"""Check best results of two models giving scores and win-tie-loose results
|
"""Check best results of two models giving scores and win-tie-loose results
|
||||||
"""
|
"""
|
||||||
@@ -33,11 +33,29 @@ def parse_arguments():
|
|||||||
required=True,
|
required=True,
|
||||||
help="model 2 name",
|
help="model 2 name",
|
||||||
)
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"-w",
|
||||||
|
"--win",
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
required=False,
|
||||||
|
help="show win results",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"-l",
|
||||||
|
"--loose",
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
required=False,
|
||||||
|
help="show loose results",
|
||||||
|
)
|
||||||
args = ap.parse_args()
|
args = ap.parse_args()
|
||||||
return (
|
return (
|
||||||
args.score,
|
args.score,
|
||||||
args.model1,
|
args.model1,
|
||||||
args.model2,
|
args.model2,
|
||||||
|
args.win,
|
||||||
|
args.loose,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -45,11 +63,15 @@ def parse_arguments():
|
|||||||
score,
|
score,
|
||||||
model1,
|
model1,
|
||||||
model2,
|
model2,
|
||||||
|
win_results,
|
||||||
|
loose_results,
|
||||||
) = parse_arguments()
|
) = parse_arguments()
|
||||||
|
|
||||||
summary = Summary()
|
summary = Summary()
|
||||||
summary.acquire()
|
summary.acquire()
|
||||||
win = tie = loose = 0
|
win = tie = loose = 0
|
||||||
|
winners = []
|
||||||
|
loosers = []
|
||||||
best_1 = summary.best_result(criterion="model", value=model1, score=score)
|
best_1 = summary.best_result(criterion="model", value=model1, score=score)
|
||||||
best_2 = summary.best_result(criterion="model", value=model2, score=score)
|
best_2 = summary.best_result(criterion="model", value=model2, score=score)
|
||||||
report_1 = StubReport(os.path.join(Folders.results, best_1["file"]))
|
report_1 = StubReport(os.path.join(Folders.results, best_1["file"]))
|
||||||
@@ -60,8 +82,10 @@ for result1, result2 in zip(report_1.lines, report_2.lines):
|
|||||||
result = result1["score"] - result2["score"]
|
result = result1["score"] - result2["score"]
|
||||||
if result > 0:
|
if result > 0:
|
||||||
win += 1
|
win += 1
|
||||||
|
winners.append(result1["dataset"])
|
||||||
elif result < 0:
|
elif result < 0:
|
||||||
loose += 1
|
loose += 1
|
||||||
|
loosers.append(result1["dataset"])
|
||||||
else:
|
else:
|
||||||
tie += 1
|
tie += 1
|
||||||
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
||||||
@@ -70,5 +94,11 @@ print(f"{model1:<20} {best_1['file']:<70} {report_1.score:10.5f}")
|
|||||||
print(
|
print(
|
||||||
f"{model2:<20} {best_2['file']:<70} "
|
f"{model2:<20} {best_2['file']:<70} "
|
||||||
f"{report_2.score:10.5f} "
|
f"{report_2.score:10.5f} "
|
||||||
f"{win:3d} {tie:3d} {loose:5d}"
|
f"{TextColor.GREEN}{win:3d} {TextColor.YELLOW}{tie:3d} {TextColor.RED}{loose:5d}"
|
||||||
)
|
)
|
||||||
|
if win_results:
|
||||||
|
print(TextColor.GREEN+"Winners:")
|
||||||
|
print(winners)
|
||||||
|
if loose_results:
|
||||||
|
print(TextColor.RED+"Loosers:")
|
||||||
|
print(loosers)
|
Reference in New Issue
Block a user