mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-17 00:15:55 +00:00
124 lines
2.6 KiB
Python
124 lines
2.6 KiB
Python
import argparse
|
|
from Experiments import Experiment, Datasets
|
|
from Results import Report
|
|
from Utils import EnvDefault
|
|
|
|
"""Do experiment and build result file, optionally print report with results
|
|
"""
|
|
|
|
|
|
def parse_arguments():
|
|
ap = argparse.ArgumentParser()
|
|
ap.add_argument(
|
|
"-s",
|
|
"--score",
|
|
action=EnvDefault,
|
|
envvar="score",
|
|
type=str,
|
|
required=True,
|
|
help="score name {accuracy, f1_macro, ...}",
|
|
)
|
|
ap.add_argument(
|
|
"-P",
|
|
"--platform",
|
|
action=EnvDefault,
|
|
envvar="platform",
|
|
type=str,
|
|
required=True,
|
|
help="Platform where the test is run",
|
|
)
|
|
ap.add_argument(
|
|
"-m",
|
|
"--model",
|
|
type=str,
|
|
required=True,
|
|
help="model name",
|
|
)
|
|
ap.add_argument(
|
|
"-n",
|
|
"--n_folds",
|
|
action=EnvDefault,
|
|
envvar="n_folds",
|
|
type=int,
|
|
required=True,
|
|
help="number of folds",
|
|
)
|
|
ap.add_argument(
|
|
"-p", "--hyperparameters", type=str, required=False, default="{}"
|
|
)
|
|
ap.add_argument(
|
|
"-f", "--paramfile", type=bool, required=False, default=False
|
|
)
|
|
ap.add_argument(
|
|
"--title", type=str, required=True, help="experiment title"
|
|
)
|
|
ap.add_argument(
|
|
"-q",
|
|
"--quiet",
|
|
type=bool,
|
|
default=False,
|
|
required=False,
|
|
help="Wether to show progress bar or not",
|
|
)
|
|
ap.add_argument(
|
|
"-r",
|
|
"--report",
|
|
type=bool,
|
|
default=False,
|
|
required=False,
|
|
help="Report results",
|
|
)
|
|
ap.add_argument(
|
|
"-t",
|
|
"--stratified",
|
|
action=EnvDefault,
|
|
envvar="stratified",
|
|
type=str,
|
|
required=True,
|
|
help="Stratified",
|
|
)
|
|
args = ap.parse_args()
|
|
return (
|
|
args.stratified,
|
|
args.score,
|
|
args.model,
|
|
args.n_folds,
|
|
args.platform,
|
|
args.quiet,
|
|
args.hyperparameters,
|
|
args.paramfile,
|
|
args.report,
|
|
args.title,
|
|
)
|
|
|
|
|
|
(
|
|
stratified,
|
|
score,
|
|
model,
|
|
folds,
|
|
platform,
|
|
quiet,
|
|
hyperparameters,
|
|
paramfile,
|
|
report,
|
|
experiment_title,
|
|
) = parse_arguments()
|
|
job = Experiment(
|
|
score_name=score,
|
|
model_name=model,
|
|
stratified=stratified,
|
|
datasets=Datasets(),
|
|
hyperparams_dict=hyperparameters,
|
|
hyperparams_file=paramfile,
|
|
progress_bar=not quiet,
|
|
platform=platform,
|
|
title=experiment_title,
|
|
folds=folds,
|
|
)
|
|
job.do_experiment()
|
|
if report:
|
|
result_file = job.get_output_file()
|
|
report = Report(result_file)
|
|
report.report()
|