mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 00:15:55 +00:00
Add experiment title and model version to reports
This commit is contained in:
@@ -133,6 +133,7 @@ class Experiment:
|
|||||||
hyperparams_dict,
|
hyperparams_dict,
|
||||||
hyperparams_file,
|
hyperparams_file,
|
||||||
platform,
|
platform,
|
||||||
|
title,
|
||||||
progress_bar=True,
|
progress_bar=True,
|
||||||
folds=5,
|
folds=5,
|
||||||
):
|
):
|
||||||
@@ -152,6 +153,7 @@ class Experiment:
|
|||||||
)
|
)
|
||||||
self.score_name = score_name
|
self.score_name = score_name
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
|
self.title = title
|
||||||
self.stratified = stratified == "1"
|
self.stratified = stratified == "1"
|
||||||
self.stratified_class = StratifiedKFold if self.stratified else KFold
|
self.stratified_class = StratifiedKFold if self.stratified else KFold
|
||||||
self.model = Models.get_model(model_name)
|
self.model = Models.get_model(model_name)
|
||||||
@@ -231,6 +233,7 @@ class Experiment:
|
|||||||
|
|
||||||
def _add_results(self, name, hyperparameters, samples, features, classes):
|
def _add_results(self, name, hyperparameters, samples, features, classes):
|
||||||
record = {}
|
record = {}
|
||||||
|
record["title"] = self.title
|
||||||
record["dataset"] = name
|
record["dataset"] = name
|
||||||
record["samples"] = samples
|
record["samples"] = samples
|
||||||
record["features"] = features
|
record["features"] = features
|
||||||
|
@@ -140,12 +140,13 @@ class Report(BaseReport):
|
|||||||
f" with {self.data['folds']} Folds "
|
f" with {self.data['folds']} Folds "
|
||||||
f"cross validation and {len(self.data['seeds'])} random seeds"
|
f"cross validation and {len(self.data['seeds'])} random seeds"
|
||||||
)
|
)
|
||||||
|
self.header_line(f" {self.data['title']}")
|
||||||
self.header_line(
|
self.header_line(
|
||||||
f" Random seeds: {self.data['seeds']} Stratified: "
|
f" Random seeds: {self.data['seeds']} Stratified: "
|
||||||
f"{self.data['stratified']}"
|
f"{self.data['stratified']}"
|
||||||
)
|
)
|
||||||
self.header_line(
|
self.header_line(
|
||||||
f" Execution took {self.data['duration']:7.2f} seconds on an "
|
f" Execution took {self.data['duration']:7.2f} seconds on "
|
||||||
f"{self.data['platform']}"
|
f"{self.data['platform']}"
|
||||||
)
|
)
|
||||||
self.header_line(f" Score is {self.data['score_name']}")
|
self.header_line(f" Score is {self.data['score_name']}")
|
||||||
@@ -238,7 +239,7 @@ class ReportBest(BaseReport):
|
|||||||
|
|
||||||
|
|
||||||
class Excel(BaseReport):
|
class Excel(BaseReport):
|
||||||
row = 5
|
row = 6
|
||||||
|
|
||||||
def __init__(self, file_name, compare=False):
|
def __init__(self, file_name, compare=False):
|
||||||
super().__init__(file_name)
|
super().__init__(file_name)
|
||||||
@@ -271,21 +272,27 @@ class Excel(BaseReport):
|
|||||||
self.sheet.write(
|
self.sheet.write(
|
||||||
1,
|
1,
|
||||||
0,
|
0,
|
||||||
f" Execution took {self.data['duration']:7.2f} seconds on an "
|
f" {self.data['title']}",
|
||||||
|
subheader,
|
||||||
|
)
|
||||||
|
self.sheet.write(
|
||||||
|
2,
|
||||||
|
0,
|
||||||
|
f" Execution took {self.data['duration']:7.2f} seconds on "
|
||||||
f"{self.data['platform']}",
|
f"{self.data['platform']}",
|
||||||
subheader,
|
subheader,
|
||||||
)
|
)
|
||||||
self.sheet.write(
|
self.sheet.write(
|
||||||
1,
|
2,
|
||||||
5,
|
5,
|
||||||
f"Random seeds: {self.data['seeds']}",
|
f"Random seeds: {self.data['seeds']}",
|
||||||
subheader,
|
subheader,
|
||||||
)
|
)
|
||||||
self.sheet.write(
|
self.sheet.write(
|
||||||
2, 0, f" Score is {self.data['score_name']}", subheader
|
3, 0, f" Score is {self.data['score_name']}", subheader
|
||||||
)
|
)
|
||||||
self.sheet.write(
|
self.sheet.write(
|
||||||
2,
|
3,
|
||||||
5,
|
5,
|
||||||
f"Stratified: {self.data['stratified']}",
|
f"Stratified: {self.data['stratified']}",
|
||||||
subheader,
|
subheader,
|
||||||
@@ -309,7 +316,7 @@ class Excel(BaseReport):
|
|||||||
bold = self.book.add_format({"bold": True, "font_size": 14})
|
bold = self.book.add_format({"bold": True, "font_size": 14})
|
||||||
i = 0
|
i = 0
|
||||||
for item, length in header_cols:
|
for item, length in header_cols:
|
||||||
self.sheet.write(4, i, item, bold)
|
self.sheet.write(5, i, item, bold)
|
||||||
self.sheet.set_column(i, i, length)
|
self.sheet.set_column(i, i, length)
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
@@ -378,12 +385,14 @@ class SQL(BaseReport):
|
|||||||
"date",
|
"date",
|
||||||
"time",
|
"time",
|
||||||
"type",
|
"type",
|
||||||
|
"title",
|
||||||
"stratified",
|
"stratified",
|
||||||
"score_name",
|
"score_name",
|
||||||
"score",
|
"score",
|
||||||
"score_std",
|
"score_std",
|
||||||
"dataset",
|
"dataset",
|
||||||
"classifier",
|
"classifier",
|
||||||
|
"version",
|
||||||
"norm",
|
"norm",
|
||||||
"stand",
|
"stand",
|
||||||
"time_spent",
|
"time_spent",
|
||||||
@@ -407,12 +416,14 @@ class SQL(BaseReport):
|
|||||||
self.data["date"],
|
self.data["date"],
|
||||||
self.data["time"],
|
self.data["time"],
|
||||||
"crossval",
|
"crossval",
|
||||||
|
self.data["title"],
|
||||||
"1" if self.data["stratified"] else "0",
|
"1" if self.data["stratified"] else "0",
|
||||||
self.data["score_name"],
|
self.data["score_name"],
|
||||||
result["score"],
|
result["score"],
|
||||||
result["score_std"],
|
result["score_std"],
|
||||||
result["dataset"],
|
result["dataset"],
|
||||||
self.data["model"],
|
self.data["model"],
|
||||||
|
self.data["version"],
|
||||||
0,
|
0,
|
||||||
1,
|
1,
|
||||||
result["time"],
|
result["time"],
|
||||||
@@ -702,6 +713,8 @@ class Summary:
|
|||||||
print(f"*{title:^{length - 2}s}*")
|
print(f"*{title:^{length - 2}s}*")
|
||||||
print("*" + "-" * (length - 2) + "*")
|
print("*" + "-" * (length - 2) + "*")
|
||||||
print("*" + whites(length - 2))
|
print("*" + whites(length - 2))
|
||||||
|
print(f"* {result.data['title']:^{length - 4}} *")
|
||||||
|
print("*" + whites(length - 2))
|
||||||
print(
|
print(
|
||||||
f"* Model: {result.data['model']:15s} "
|
f"* Model: {result.data['model']:15s} "
|
||||||
f"Ver. {result.data['version']:10s} "
|
f"Ver. {result.data['version']:10s} "
|
||||||
|
@@ -49,6 +49,9 @@ def parse_arguments():
|
|||||||
ap.add_argument(
|
ap.add_argument(
|
||||||
"-f", "--paramfile", type=bool, required=False, default=False
|
"-f", "--paramfile", type=bool, required=False, default=False
|
||||||
)
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--title", type=str, required=True, help="experiment title"
|
||||||
|
)
|
||||||
ap.add_argument(
|
ap.add_argument(
|
||||||
"-q",
|
"-q",
|
||||||
"--quiet",
|
"--quiet",
|
||||||
@@ -85,6 +88,7 @@ def parse_arguments():
|
|||||||
args.hyperparameters,
|
args.hyperparameters,
|
||||||
args.paramfile,
|
args.paramfile,
|
||||||
args.report,
|
args.report,
|
||||||
|
args.title,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -98,6 +102,7 @@ def parse_arguments():
|
|||||||
hyperparameters,
|
hyperparameters,
|
||||||
paramfile,
|
paramfile,
|
||||||
report,
|
report,
|
||||||
|
experiment_title,
|
||||||
) = parse_arguments()
|
) = parse_arguments()
|
||||||
job = Experiment(
|
job = Experiment(
|
||||||
score_name=score,
|
score_name=score,
|
||||||
@@ -108,6 +113,7 @@ job = Experiment(
|
|||||||
hyperparams_file=paramfile,
|
hyperparams_file=paramfile,
|
||||||
progress_bar=not quiet,
|
progress_bar=not quiet,
|
||||||
platform=platform,
|
platform=platform,
|
||||||
|
title=experiment_title,
|
||||||
folds=folds,
|
folds=folds,
|
||||||
)
|
)
|
||||||
job.do_experiment()
|
job.do_experiment()
|
||||||
|
@@ -12,6 +12,9 @@ for result in results:
|
|||||||
file_name = os.path.join(Folders.results, result)
|
file_name = os.path.join(Folders.results, result)
|
||||||
with open(file_name) as f:
|
with open(file_name) as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
|
if "title" not in data:
|
||||||
|
print(f"Repairing title in {result}")
|
||||||
|
data["title"] = "default"
|
||||||
if "version" not in data:
|
if "version" not in data:
|
||||||
print(f"Repairing version in {result}")
|
print(f"Repairing version in {result}")
|
||||||
model = data["model"]
|
model = data["model"]
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
import argparse
|
import argparse
|
||||||
from Results import Summary
|
from Results import Summary
|
||||||
|
from Utils import EnvDefault
|
||||||
|
|
||||||
|
|
||||||
def parse_arguments():
|
def parse_arguments():
|
||||||
@@ -8,6 +9,8 @@ def parse_arguments():
|
|||||||
"-m",
|
"-m",
|
||||||
"--model",
|
"--model",
|
||||||
type=str,
|
type=str,
|
||||||
|
action=EnvDefault,
|
||||||
|
envvar="model",
|
||||||
required=True,
|
required=True,
|
||||||
help="model name",
|
help="model name",
|
||||||
)
|
)
|
||||||
@@ -15,19 +18,31 @@ def parse_arguments():
|
|||||||
"-s",
|
"-s",
|
||||||
"--score",
|
"--score",
|
||||||
type=str,
|
type=str,
|
||||||
|
action=EnvDefault,
|
||||||
|
envvar="score",
|
||||||
required=True,
|
required=True,
|
||||||
help="score name {accuracy, f1_micro, f1_macro, all}",
|
help="score name {accuracy, f1_micro, f1_macro, all}",
|
||||||
)
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"-l",
|
||||||
|
"--list",
|
||||||
|
type=bool,
|
||||||
|
required=False,
|
||||||
|
default=False,
|
||||||
|
help="List all results",
|
||||||
|
)
|
||||||
args = ap.parse_args()
|
args = ap.parse_args()
|
||||||
return (
|
return (
|
||||||
args.score,
|
args.score,
|
||||||
args.model,
|
args.model,
|
||||||
|
args.list,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
(
|
(
|
||||||
score,
|
score,
|
||||||
model,
|
model,
|
||||||
|
list_results,
|
||||||
) = parse_arguments()
|
) = parse_arguments()
|
||||||
|
|
||||||
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
all_metrics = ["accuracy", "f1-macro", "f1-micro"]
|
||||||
@@ -44,3 +59,5 @@ for metric in metrics:
|
|||||||
summary.show_result(
|
summary.show_result(
|
||||||
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
summary.best_result(score=metric), title=f"BEST RESULT of {metric}"
|
||||||
)
|
)
|
||||||
|
if list_results:
|
||||||
|
summary.list()
|
||||||
|
Reference in New Issue
Block a user