mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 08:25:53 +00:00
Begin refactor main scripts
This commit is contained in:
@@ -1159,3 +1159,56 @@ class Summary:
|
|||||||
input_data=self.best_results(score=score, n=n),
|
input_data=self.best_results(score=score, n=n),
|
||||||
sort_key="metric",
|
sort_key="metric",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PairCheck:
|
||||||
|
def __init__(self, score, model_a, model_b, winners=False, loosers=False):
|
||||||
|
self.score = score
|
||||||
|
self.model_a = model_a
|
||||||
|
self.model_b = model_b
|
||||||
|
self.winners = winners
|
||||||
|
self.loosers = loosers
|
||||||
|
self.winners_data = []
|
||||||
|
self.loosers_data = []
|
||||||
|
self.tie_data = []
|
||||||
|
|
||||||
|
def compute(self):
|
||||||
|
summary = Summary()
|
||||||
|
summary.acquire()
|
||||||
|
best_a = summary.best_result(
|
||||||
|
criterion="model", value=self.model_a, score=self.score
|
||||||
|
)
|
||||||
|
best_b = summary.best_result(
|
||||||
|
criterion="model", value=self.model_b, score=self.score
|
||||||
|
)
|
||||||
|
report_a = StubReport(os.path.join(Folders.results, best_a["file"]))
|
||||||
|
report_a.report()
|
||||||
|
report_b = StubReport(os.path.join(Folders.results, best_b["file"]))
|
||||||
|
report_b.report()
|
||||||
|
for result_a, result_b in zip(report_a.lines, report_b.lines):
|
||||||
|
result = result_a["score"] - result_b["score"]
|
||||||
|
if result > 0:
|
||||||
|
self.winners_data.append(result_a["dataset"])
|
||||||
|
elif result < 0:
|
||||||
|
self.loosers_data.append(result_a["dataset"])
|
||||||
|
else:
|
||||||
|
self.tie_data.append(result_a["dataset"])
|
||||||
|
|
||||||
|
def print(self):
|
||||||
|
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
||||||
|
print("=" * 20 + " " + "=" * 70 + " " + "=" * 10 + " === === =====")
|
||||||
|
print(
|
||||||
|
f"{self.model_a:<20} {self.best_a['file']:<70} {report_1.score:10.5f}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"{model2:<20} {best_2['file']:<70} "
|
||||||
|
f"{report_2.score:10.5f} "
|
||||||
|
f"{TextColor.GREEN}{win:3d} {TextColor.YELLOW}{tie:3d} "
|
||||||
|
f"{TextColor.RED}{loose:5d}"
|
||||||
|
)
|
||||||
|
if win_results:
|
||||||
|
print(TextColor.GREEN + "Winners:")
|
||||||
|
print(winners)
|
||||||
|
if loose_results:
|
||||||
|
print(TextColor.RED + "Loosers:")
|
||||||
|
print(loosers)
|
||||||
|
10
src/list
10
src/list
@@ -64,8 +64,8 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
(excel, score, model, key, number, hidden) = parse_arguments()
|
if __name__ == "__main__":
|
||||||
|
(excel, score, model, key, number, hidden) = parse_arguments()
|
||||||
data = Summary(hidden=hidden)
|
data = Summary(hidden=hidden)
|
||||||
data.acquire()
|
data.acquire()
|
||||||
data.list_results(score=score, model=model, sort_key=key, number=number)
|
data.list_results(score=score, model=model, sort_key=key, number=number)
|
||||||
|
77
src/main
77
src/main
@@ -117,41 +117,42 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
(
|
if __name__ == "__main__":
|
||||||
stratified,
|
(
|
||||||
score,
|
stratified,
|
||||||
model,
|
score,
|
||||||
folds,
|
model,
|
||||||
platform,
|
folds,
|
||||||
quiet,
|
platform,
|
||||||
hyperparameters,
|
quiet,
|
||||||
paramfile,
|
hyperparameters,
|
||||||
grid_paramfile,
|
paramfile,
|
||||||
report,
|
grid_paramfile,
|
||||||
experiment_title,
|
report,
|
||||||
dataset,
|
experiment_title,
|
||||||
) = parse_arguments()
|
dataset,
|
||||||
report = report or dataset is not None
|
) = parse_arguments()
|
||||||
if grid_paramfile:
|
report = report or dataset is not None
|
||||||
paramfile = False
|
if grid_paramfile:
|
||||||
job = Experiment(
|
paramfile = False
|
||||||
score_name=score,
|
job = Experiment(
|
||||||
model_name=model,
|
score_name=score,
|
||||||
stratified=stratified,
|
model_name=model,
|
||||||
datasets=Datasets(dataset=dataset),
|
stratified=stratified,
|
||||||
hyperparams_dict=hyperparameters,
|
datasets=Datasets(dataset=dataset),
|
||||||
hyperparams_file=paramfile,
|
hyperparams_dict=hyperparameters,
|
||||||
grid_paramfile=grid_paramfile,
|
hyperparams_file=paramfile,
|
||||||
progress_bar=not quiet,
|
grid_paramfile=grid_paramfile,
|
||||||
platform=platform,
|
progress_bar=not quiet,
|
||||||
title=experiment_title,
|
platform=platform,
|
||||||
folds=folds,
|
title=experiment_title,
|
||||||
)
|
folds=folds,
|
||||||
job.do_experiment()
|
)
|
||||||
if report:
|
job.do_experiment()
|
||||||
result_file = job.get_output_file()
|
if report:
|
||||||
report = Report(result_file)
|
result_file = job.get_output_file()
|
||||||
report.report()
|
report = Report(result_file)
|
||||||
if dataset is not None:
|
report.report()
|
||||||
print(f"Partial result file removed: {result_file}")
|
if dataset is not None:
|
||||||
os.remove(result_file)
|
print(f"Partial result file removed: {result_file}")
|
||||||
|
os.remove(result_file)
|
||||||
|
@@ -59,47 +59,47 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
(
|
if __name__ == "__main__":
|
||||||
score,
|
(
|
||||||
model1,
|
score,
|
||||||
model2,
|
model1,
|
||||||
win_results,
|
model2,
|
||||||
loose_results,
|
win_results,
|
||||||
) = parse_arguments()
|
loose_results,
|
||||||
|
) = parse_arguments()
|
||||||
summary = Summary()
|
summary = Summary()
|
||||||
summary.acquire()
|
summary.acquire()
|
||||||
win = tie = loose = 0
|
win = tie = loose = 0
|
||||||
winners = []
|
winners = []
|
||||||
loosers = []
|
loosers = []
|
||||||
best_1 = summary.best_result(criterion="model", value=model1, score=score)
|
best_1 = summary.best_result(criterion="model", value=model1, score=score)
|
||||||
best_2 = summary.best_result(criterion="model", value=model2, score=score)
|
best_2 = summary.best_result(criterion="model", value=model2, score=score)
|
||||||
report_1 = StubReport(os.path.join(Folders.results, best_1["file"]))
|
report_1 = StubReport(os.path.join(Folders.results, best_1["file"]))
|
||||||
report_1.report()
|
report_1.report()
|
||||||
report_2 = StubReport(os.path.join(Folders.results, best_2["file"]))
|
report_2 = StubReport(os.path.join(Folders.results, best_2["file"]))
|
||||||
report_2.report()
|
report_2.report()
|
||||||
for result1, result2 in zip(report_1.lines, report_2.lines):
|
for result1, result2 in zip(report_1.lines, report_2.lines):
|
||||||
result = result1["score"] - result2["score"]
|
result = result1["score"] - result2["score"]
|
||||||
if result > 0:
|
if result > 0:
|
||||||
win += 1
|
win += 1
|
||||||
winners.append(result1["dataset"])
|
winners.append(result1["dataset"])
|
||||||
elif result < 0:
|
elif result < 0:
|
||||||
loose += 1
|
loose += 1
|
||||||
loosers.append(result1["dataset"])
|
loosers.append(result1["dataset"])
|
||||||
else:
|
else:
|
||||||
tie += 1
|
tie += 1
|
||||||
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
print(f"{'Model':<20} {'File':<70} {'Score':<10} Win Tie Loose")
|
||||||
print("=" * 20 + " " + "=" * 70 + " " + "=" * 10 + " === === =====")
|
print("=" * 20 + " " + "=" * 70 + " " + "=" * 10 + " === === =====")
|
||||||
print(f"{model1:<20} {best_1['file']:<70} {report_1.score:10.5f}")
|
print(f"{model1:<20} {best_1['file']:<70} {report_1.score:10.5f}")
|
||||||
print(
|
print(
|
||||||
f"{model2:<20} {best_2['file']:<70} "
|
f"{model2:<20} {best_2['file']:<70} "
|
||||||
f"{report_2.score:10.5f} "
|
f"{report_2.score:10.5f} "
|
||||||
f"{TextColor.GREEN}{win:3d} {TextColor.YELLOW}{tie:3d} "
|
f"{TextColor.GREEN}{win:3d} {TextColor.YELLOW}{tie:3d} "
|
||||||
f"{TextColor.RED}{loose:5d}"
|
f"{TextColor.RED}{loose:5d}"
|
||||||
)
|
)
|
||||||
if win_results:
|
if win_results:
|
||||||
print(TextColor.GREEN + "Winners:")
|
print(TextColor.GREEN + "Winners:")
|
||||||
print(winners)
|
print(winners)
|
||||||
if loose_results:
|
if loose_results:
|
||||||
print(TextColor.RED + "Loosers:")
|
print(TextColor.RED + "Loosers:")
|
||||||
print(loosers)
|
print(loosers)
|
||||||
|
37
src/report
37
src/report
@@ -114,22 +114,23 @@ def default_report():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
(file, excel, sql, compare, best, grid, score, model) = parse_arguments()
|
if __name__ == "__main__":
|
||||||
if grid:
|
(file, excel, sql, compare, best, grid, score, model) = parse_arguments()
|
||||||
best = False
|
if grid:
|
||||||
if file is None and best is None:
|
best = False
|
||||||
default_report()
|
if file is None and best is None:
|
||||||
else:
|
default_report()
|
||||||
if best is not None or grid is not None:
|
|
||||||
report = ReportBest(score, model, best, grid)
|
|
||||||
report.report()
|
|
||||||
else:
|
else:
|
||||||
report = Report(file, compare)
|
if best is not None or grid is not None:
|
||||||
report.report()
|
report = ReportBest(score, model, best, grid)
|
||||||
if excel:
|
report.report()
|
||||||
excel = Excel(file, compare)
|
else:
|
||||||
excel.report()
|
report = Report(file, compare)
|
||||||
Files.open(excel.get_file_name())
|
report.report()
|
||||||
if sql:
|
if excel:
|
||||||
sql = SQL(file)
|
excel = Excel(file, compare)
|
||||||
sql.report()
|
excel.report()
|
||||||
|
Files.open(excel.get_file_name())
|
||||||
|
if sql:
|
||||||
|
sql = SQL(file)
|
||||||
|
sql.report()
|
||||||
|
45
src/summary
45
src/summary
@@ -40,26 +40,25 @@ def parse_arguments():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
(
|
if __name__ == "__main__":
|
||||||
score,
|
(
|
||||||
model,
|
score,
|
||||||
list_results,
|
model,
|
||||||
) = parse_arguments()
|
list_results,
|
||||||
|
) = parse_arguments()
|
||||||
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
||||||
|
metrics = all_metrics if score == "all" else [score]
|
||||||
metrics = all_metrics if score == "all" else [score]
|
summary = Summary()
|
||||||
|
summary.acquire()
|
||||||
summary = Summary()
|
for metric in metrics:
|
||||||
summary.acquire()
|
title = f"BEST RESULT of {metric} for {model}"
|
||||||
|
best = summary.best_result(
|
||||||
for metric in metrics:
|
criterion="model", value=model, score=metric
|
||||||
title = f"BEST RESULT of {metric} for {model}"
|
)
|
||||||
best = summary.best_result(criterion="model", value=model, score=metric)
|
summary.show_result(data=best, title=title)
|
||||||
summary.show_result(data=best, title=title)
|
summary.show_result(
|
||||||
summary.show_result(
|
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
||||||
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
)
|
||||||
)
|
summary.show_top(score=metric, n=10)
|
||||||
summary.show_top(score=metric, n=10)
|
if list_results:
|
||||||
if list_results:
|
summary.list_results()
|
||||||
summary.list_results()
|
|
||||||
|
Reference in New Issue
Block a user