mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-15 23:45:54 +00:00
Add experiment title and model version to reports
This commit is contained in:
@@ -133,6 +133,7 @@ class Experiment:
|
||||
hyperparams_dict,
|
||||
hyperparams_file,
|
||||
platform,
|
||||
title,
|
||||
progress_bar=True,
|
||||
folds=5,
|
||||
):
|
||||
@@ -152,6 +153,7 @@ class Experiment:
|
||||
)
|
||||
self.score_name = score_name
|
||||
self.model_name = model_name
|
||||
self.title = title
|
||||
self.stratified = stratified == "1"
|
||||
self.stratified_class = StratifiedKFold if self.stratified else KFold
|
||||
self.model = Models.get_model(model_name)
|
||||
@@ -231,6 +233,7 @@ class Experiment:
|
||||
|
||||
def _add_results(self, name, hyperparameters, samples, features, classes):
|
||||
record = {}
|
||||
record["title"] = self.title
|
||||
record["dataset"] = name
|
||||
record["samples"] = samples
|
||||
record["features"] = features
|
||||
|
@@ -140,12 +140,13 @@ class Report(BaseReport):
|
||||
f" with {self.data['folds']} Folds "
|
||||
f"cross validation and {len(self.data['seeds'])} random seeds"
|
||||
)
|
||||
self.header_line(f" {self.data['title']}")
|
||||
self.header_line(
|
||||
f" Random seeds: {self.data['seeds']} Stratified: "
|
||||
f"{self.data['stratified']}"
|
||||
)
|
||||
self.header_line(
|
||||
f" Execution took {self.data['duration']:7.2f} seconds on an "
|
||||
f" Execution took {self.data['duration']:7.2f} seconds on "
|
||||
f"{self.data['platform']}"
|
||||
)
|
||||
self.header_line(f" Score is {self.data['score_name']}")
|
||||
@@ -238,7 +239,7 @@ class ReportBest(BaseReport):
|
||||
|
||||
|
||||
class Excel(BaseReport):
|
||||
row = 5
|
||||
row = 6
|
||||
|
||||
def __init__(self, file_name, compare=False):
|
||||
super().__init__(file_name)
|
||||
@@ -271,21 +272,27 @@ class Excel(BaseReport):
|
||||
self.sheet.write(
|
||||
1,
|
||||
0,
|
||||
f" Execution took {self.data['duration']:7.2f} seconds on an "
|
||||
f" {self.data['title']}",
|
||||
subheader,
|
||||
)
|
||||
self.sheet.write(
|
||||
2,
|
||||
0,
|
||||
f" Execution took {self.data['duration']:7.2f} seconds on "
|
||||
f"{self.data['platform']}",
|
||||
subheader,
|
||||
)
|
||||
self.sheet.write(
|
||||
1,
|
||||
2,
|
||||
5,
|
||||
f"Random seeds: {self.data['seeds']}",
|
||||
subheader,
|
||||
)
|
||||
self.sheet.write(
|
||||
2, 0, f" Score is {self.data['score_name']}", subheader
|
||||
3, 0, f" Score is {self.data['score_name']}", subheader
|
||||
)
|
||||
self.sheet.write(
|
||||
2,
|
||||
3,
|
||||
5,
|
||||
f"Stratified: {self.data['stratified']}",
|
||||
subheader,
|
||||
@@ -309,7 +316,7 @@ class Excel(BaseReport):
|
||||
bold = self.book.add_format({"bold": True, "font_size": 14})
|
||||
i = 0
|
||||
for item, length in header_cols:
|
||||
self.sheet.write(4, i, item, bold)
|
||||
self.sheet.write(5, i, item, bold)
|
||||
self.sheet.set_column(i, i, length)
|
||||
i += 1
|
||||
|
||||
@@ -378,12 +385,14 @@ class SQL(BaseReport):
|
||||
"date",
|
||||
"time",
|
||||
"type",
|
||||
"title",
|
||||
"stratified",
|
||||
"score_name",
|
||||
"score",
|
||||
"score_std",
|
||||
"dataset",
|
||||
"classifier",
|
||||
"version",
|
||||
"norm",
|
||||
"stand",
|
||||
"time_spent",
|
||||
@@ -407,12 +416,14 @@ class SQL(BaseReport):
|
||||
self.data["date"],
|
||||
self.data["time"],
|
||||
"crossval",
|
||||
self.data["title"],
|
||||
"1" if self.data["stratified"] else "0",
|
||||
self.data["score_name"],
|
||||
result["score"],
|
||||
result["score_std"],
|
||||
result["dataset"],
|
||||
self.data["model"],
|
||||
self.data["version"],
|
||||
0,
|
||||
1,
|
||||
result["time"],
|
||||
@@ -702,6 +713,8 @@ class Summary:
|
||||
print(f"*{title:^{length - 2}s}*")
|
||||
print("*" + "-" * (length - 2) + "*")
|
||||
print("*" + whites(length - 2))
|
||||
print(f"* {result.data['title']:^{length - 4}} *")
|
||||
print("*" + whites(length - 2))
|
||||
print(
|
||||
f"* Model: {result.data['model']:15s} "
|
||||
f"Ver. {result.data['version']:10s} "
|
||||
|
@@ -49,6 +49,9 @@ def parse_arguments():
|
||||
ap.add_argument(
|
||||
"-f", "--paramfile", type=bool, required=False, default=False
|
||||
)
|
||||
ap.add_argument(
|
||||
"--title", type=str, required=True, help="experiment title"
|
||||
)
|
||||
ap.add_argument(
|
||||
"-q",
|
||||
"--quiet",
|
||||
@@ -85,6 +88,7 @@ def parse_arguments():
|
||||
args.hyperparameters,
|
||||
args.paramfile,
|
||||
args.report,
|
||||
args.title,
|
||||
)
|
||||
|
||||
|
||||
@@ -98,6 +102,7 @@ def parse_arguments():
|
||||
hyperparameters,
|
||||
paramfile,
|
||||
report,
|
||||
experiment_title,
|
||||
) = parse_arguments()
|
||||
job = Experiment(
|
||||
score_name=score,
|
||||
@@ -108,6 +113,7 @@ job = Experiment(
|
||||
hyperparams_file=paramfile,
|
||||
progress_bar=not quiet,
|
||||
platform=platform,
|
||||
title=experiment_title,
|
||||
folds=folds,
|
||||
)
|
||||
job.do_experiment()
|
||||
|
@@ -12,6 +12,9 @@ for result in results:
|
||||
file_name = os.path.join(Folders.results, result)
|
||||
with open(file_name) as f:
|
||||
data = json.load(f)
|
||||
if "title" not in data:
|
||||
print(f"Repairing title in {result}")
|
||||
data["title"] = "default"
|
||||
if "version" not in data:
|
||||
print(f"Repairing version in {result}")
|
||||
model = data["model"]
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import argparse
|
||||
from Results import Summary
|
||||
from Utils import EnvDefault
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
@@ -8,6 +9,8 @@ def parse_arguments():
|
||||
"-m",
|
||||
"--model",
|
||||
type=str,
|
||||
action=EnvDefault,
|
||||
envvar="model",
|
||||
required=True,
|
||||
help="model name",
|
||||
)
|
||||
@@ -15,19 +18,31 @@ def parse_arguments():
|
||||
"-s",
|
||||
"--score",
|
||||
type=str,
|
||||
action=EnvDefault,
|
||||
envvar="score",
|
||||
required=True,
|
||||
help="score name {accuracy, f1_micro, f1_macro, all}",
|
||||
)
|
||||
ap.add_argument(
|
||||
"-l",
|
||||
"--list",
|
||||
type=bool,
|
||||
required=False,
|
||||
default=False,
|
||||
help="List all results",
|
||||
)
|
||||
args = ap.parse_args()
|
||||
return (
|
||||
args.score,
|
||||
args.model,
|
||||
args.list,
|
||||
)
|
||||
|
||||
|
||||
(
|
||||
score,
|
||||
model,
|
||||
list_results,
|
||||
) = parse_arguments()
|
||||
|
||||
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
||||
@@ -44,3 +59,5 @@ for metric in metrics:
|
||||
summary.show_result(
|
||||
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
||||
)
|
||||
if list_results:
|
||||
summary.list()
|
||||
|
Reference in New Issue
Block a user