mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-15 15:35:52 +00:00
Fix circular definition
This commit is contained in:
@@ -11,137 +11,3 @@ ALL_METRICS = (
|
||||
)
|
||||
|
||||
|
||||
class EnvData:
|
||||
@staticmethod
|
||||
def load():
|
||||
args = {}
|
||||
with open(Files.dot_env) as f:
|
||||
for line in f.read().splitlines():
|
||||
if line == "" or line.startswith("#"):
|
||||
continue
|
||||
key, value = line.split("=")
|
||||
args[key] = value
|
||||
return args
|
||||
|
||||
|
||||
class EnvDefault(argparse.Action):
|
||||
# Thanks to https://stackoverflow.com/users/445507/russell-heilling
|
||||
def __init__(self, envvar, required=True, default=None, **kwargs):
|
||||
self._args = EnvData.load()
|
||||
default = self._args[envvar]
|
||||
required = False
|
||||
super(EnvDefault, self).__init__(
|
||||
default=default, required=required, **kwargs
|
||||
)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, values)
|
||||
|
||||
|
||||
class Arguments:
|
||||
def __init__(self):
|
||||
self.ap = argparse.ArgumentParser()
|
||||
models_data = Models.define_models(random_state=0)
|
||||
models = "{" + ", ".join(models_data) + "}"
|
||||
self.parameters = {
|
||||
"best": [
|
||||
("-b", "--best"),
|
||||
{
|
||||
"type": str,
|
||||
"required": False,
|
||||
"help": "best results of models",
|
||||
},
|
||||
],
|
||||
"color": [],
|
||||
"compare": [
|
||||
("-c", "--compare"),
|
||||
{
|
||||
"type": bool,
|
||||
"required": False,
|
||||
"help": "Compare accuracy with best results",
|
||||
},
|
||||
],
|
||||
"dataset": [],
|
||||
"excel": [
|
||||
("-x", "--excel"),
|
||||
{
|
||||
"type": bool,
|
||||
"required": False,
|
||||
"default": False,
|
||||
"help": "Generate Excel File",
|
||||
},
|
||||
],
|
||||
"file": [
|
||||
("-f", "--file"),
|
||||
{"type": str, "required": False, "help": "Result file"},
|
||||
],
|
||||
"grid": [
|
||||
("-g", "--grid"),
|
||||
{
|
||||
"type": str,
|
||||
"required": False,
|
||||
"help": "grid results of model",
|
||||
},
|
||||
],
|
||||
"grid_paramfile": [],
|
||||
"hidden": [],
|
||||
"hyperparameters": [],
|
||||
"key": [],
|
||||
"lose": [],
|
||||
"model": [
|
||||
("-m", "--model"),
|
||||
{
|
||||
"type": str,
|
||||
"required": True,
|
||||
"choices": list(models_data),
|
||||
"help": f"model name: {models}",
|
||||
},
|
||||
],
|
||||
"model1": [],
|
||||
"model2": [],
|
||||
"nan": [],
|
||||
"number": [],
|
||||
"n_folds": [],
|
||||
"paramfile": [],
|
||||
"platform": [],
|
||||
"quiet": [],
|
||||
"report": [],
|
||||
"score": [
|
||||
("-s", "--score"),
|
||||
{
|
||||
"action": EnvDefault,
|
||||
"envvar": "score",
|
||||
"type": str,
|
||||
"required": True,
|
||||
"choices": ALL_METRICS,
|
||||
},
|
||||
],
|
||||
"sql": [
|
||||
("-q", "--sql"),
|
||||
{"type": bool, "required": False, "help": "Generate SQL File"},
|
||||
],
|
||||
"stratified": [],
|
||||
"tex_output": [
|
||||
("-t", "--tex-output"),
|
||||
{
|
||||
"type": bool,
|
||||
"required": False,
|
||||
"default": False,
|
||||
"help": "Generate Tex file with the table",
|
||||
},
|
||||
],
|
||||
"title": [],
|
||||
"win": [],
|
||||
}
|
||||
|
||||
def xset(self, *arg_name, **kwargs):
|
||||
print("parameters", arg_name[0])
|
||||
names, default = self.parameters[arg_name[0]]
|
||||
self.ap.add_argument(
|
||||
*names,
|
||||
**{**default, **kwargs},
|
||||
)
|
||||
return self
|
||||
|
||||
def parse(self):
|
||||
return self.ap.parse_args()
|
||||
|
@@ -14,8 +14,7 @@ from sklearn.model_selection import (
|
||||
cross_validate,
|
||||
)
|
||||
from .Utils import Folders, Files
|
||||
from .Arguments import EnvData
|
||||
from .Models import Models
|
||||
from .Models import Models, EnvData
|
||||
|
||||
|
||||
class Randomized:
|
||||
|
@@ -1,3 +1,4 @@
|
||||
import argparse
|
||||
from statistics import mean
|
||||
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
|
||||
from sklearn.ensemble import (
|
||||
@@ -11,6 +12,15 @@ from stree import Stree
|
||||
from wodt import Wodt
|
||||
from odte import Odte
|
||||
from xgboost import XGBClassifier
|
||||
from .Utils import Files
|
||||
|
||||
ALL_METRICS = (
|
||||
"accuracy",
|
||||
"f1-macro",
|
||||
"f1-micro",
|
||||
"f1-weighted",
|
||||
"roc-auc-ovr",
|
||||
)
|
||||
|
||||
|
||||
class Models:
|
||||
@@ -89,3 +99,139 @@ class Models:
|
||||
nodes, leaves = result.nodes_leaves()
|
||||
depth = result.depth_ if hasattr(result, "depth_") else 0
|
||||
return nodes, leaves, depth
|
||||
|
||||
|
||||
class EnvData:
|
||||
@staticmethod
|
||||
def load():
|
||||
args = {}
|
||||
with open(Files.dot_env) as f:
|
||||
for line in f.read().splitlines():
|
||||
if line == "" or line.startswith("#"):
|
||||
continue
|
||||
key, value = line.split("=")
|
||||
args[key] = value
|
||||
return args
|
||||
|
||||
|
||||
class EnvDefault(argparse.Action):
|
||||
# Thanks to https://stackoverflow.com/users/445507/russell-heilling
|
||||
def __init__(self, envvar, required=True, default=None, **kwargs):
|
||||
self._args = EnvData.load()
|
||||
default = self._args[envvar]
|
||||
required = False
|
||||
super(EnvDefault, self).__init__(
|
||||
default=default, required=required, **kwargs
|
||||
)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, values)
|
||||
|
||||
|
||||
class Arguments:
|
||||
def __init__(self):
|
||||
self.ap = argparse.ArgumentParser()
|
||||
models_data = Models.define_models(random_state=0)
|
||||
self.parameters = {
|
||||
"best": [
|
||||
("-b", "--best"),
|
||||
{
|
||||
"type": str,
|
||||
"required": False,
|
||||
"help": "best results of models",
|
||||
},
|
||||
],
|
||||
"color": [],
|
||||
"compare": [
|
||||
("-c", "--compare"),
|
||||
{
|
||||
"type": bool,
|
||||
"required": False,
|
||||
"help": "Compare accuracy with best results",
|
||||
},
|
||||
],
|
||||
"dataset": [],
|
||||
"excel": [
|
||||
("-x", "--excel"),
|
||||
{
|
||||
"type": bool,
|
||||
"required": False,
|
||||
"default": False,
|
||||
"help": "Generate Excel File",
|
||||
},
|
||||
],
|
||||
"file": [
|
||||
("-f", "--file"),
|
||||
{"type": str, "required": False, "help": "Result file"},
|
||||
],
|
||||
"grid": [
|
||||
("-g", "--grid"),
|
||||
{
|
||||
"type": str,
|
||||
"required": False,
|
||||
"help": "grid results of model",
|
||||
},
|
||||
],
|
||||
"grid_paramfile": [],
|
||||
"hidden": [],
|
||||
"hyperparameters": [],
|
||||
"key": [],
|
||||
"lose": [],
|
||||
"model": [
|
||||
("-m", "--model"),
|
||||
{
|
||||
"type": str,
|
||||
"required": True,
|
||||
"choices": list(models_data),
|
||||
"action": EnvDefault,
|
||||
"envvar": "model",
|
||||
"help": f"model name",
|
||||
},
|
||||
],
|
||||
"model1": [],
|
||||
"model2": [],
|
||||
"nan": [],
|
||||
"number": [],
|
||||
"n_folds": [],
|
||||
"paramfile": [],
|
||||
"platform": [],
|
||||
"quiet": [],
|
||||
"report": [],
|
||||
"score": [
|
||||
("-s", "--score"),
|
||||
{
|
||||
"action": EnvDefault,
|
||||
"envvar": "score",
|
||||
"type": str,
|
||||
"required": True,
|
||||
"choices": ALL_METRICS,
|
||||
},
|
||||
],
|
||||
"sql": [
|
||||
("-q", "--sql"),
|
||||
{"type": bool, "required": False, "help": "Generate SQL File"},
|
||||
],
|
||||
"stratified": [],
|
||||
"tex_output": [
|
||||
("-t", "--tex-output"),
|
||||
{
|
||||
"type": bool,
|
||||
"required": False,
|
||||
"default": False,
|
||||
"help": "Generate Tex file with the table",
|
||||
},
|
||||
],
|
||||
"title": [],
|
||||
"win": [],
|
||||
}
|
||||
|
||||
def xset(self, *arg_name, **kwargs):
|
||||
names, default = self.parameters[arg_name[0]]
|
||||
self.ap.add_argument(
|
||||
*names,
|
||||
**{**default, **kwargs},
|
||||
)
|
||||
return self
|
||||
|
||||
def parse(self):
|
||||
return self.ap.parse_args()
|
||||
|
@@ -2,13 +2,6 @@ import os
|
||||
import subprocess
|
||||
|
||||
BEST_ACCURACY_STREE = 40.282203
|
||||
ALL_METRICS = (
|
||||
"accuracy",
|
||||
"f1-macro",
|
||||
"f1-micro",
|
||||
"f1-weighted",
|
||||
"roc-auc-ovr",
|
||||
)
|
||||
|
||||
|
||||
class Folders:
|
||||
|
@@ -1,6 +1,6 @@
|
||||
from .Experiments import Experiment, Datasets, DatasetsSurcov, DatasetsTanveer
|
||||
from .Results import Report, Summary
|
||||
from .Arguments import EnvDefault
|
||||
from .Models import EnvDefault
|
||||
|
||||
__author__ = "Ricardo Montañana Gómez"
|
||||
__copyright__ = "Copyright 2020-2022, Ricardo Montañana Gómez"
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
from benchmark.Results import Benchmark
|
||||
from benchmark.Utils import Files
|
||||
from benchmark.Arguments import Arguments
|
||||
from benchmark.Models import Arguments
|
||||
|
||||
|
||||
arguments = Arguments()
|
||||
|
@@ -6,7 +6,7 @@ from benchmark.Utils import (
|
||||
Files,
|
||||
TextColor,
|
||||
)
|
||||
from benchmark.Arguments import Arguments
|
||||
from benchmark.Models import Arguments
|
||||
|
||||
|
||||
"""Build report on screen of a result file, optionally generate excel and sql
|
||||
@@ -44,7 +44,9 @@ def default_report():
|
||||
if __name__ == "__main__":
|
||||
arguments = Arguments()
|
||||
arguments.xset("file").xset("excel").xset("sql").xset("compare")
|
||||
arguments.xset("best").xset("grid").xset("model").xset("score")
|
||||
arguments.xset("best").xset("grid").xset("model", required=False).xset(
|
||||
"score"
|
||||
)
|
||||
args = arguments.parse()
|
||||
|
||||
if args.grid:
|
||||
|
Reference in New Issue
Block a user