21 Commits

Author SHA1 Message Date
fea46834c8 Update bayesclass models 2022-11-24 00:20:29 +01:00
a94a33e028 Update actions 2022-11-23 22:33:22 +01:00
b05a62b2e8 Update requirements and github actions 2022-11-23 22:21:34 +01:00
2baaf753ef Add terminal support to debug github action 2022-11-23 12:58:00 +01:00
b01ee40df2 Update main.yml 2022-11-23 09:43:51 +01:00
ed308773ee Update main.yml 2022-11-23 09:34:43 +01:00
0782736338 Update tests be_init_project_tests 2022-11-23 01:31:01 +01:00
71a11110bd Update tests 2022-11-22 23:32:28 +01:00
3a2ec38671 Update be_list to new formats 2022-11-22 17:38:11 +01:00
f60d9365dd Refactor be_report and fix error in datasets 2022-11-22 16:47:03 +01:00
5d7ed6f1ed Fix be_list Results error 2022-11-22 16:26:24 +01:00
8aa76c27c3 Refactor Datasets 2022-11-22 16:26:04 +01:00
93f0db36fa Fix stratified default value from .env 2022-11-22 01:47:12 +01:00
4e0be95a00 Refactor be_list 2022-11-21 20:22:59 +01:00
e76366561c Add be_init_project to scripts 2022-11-21 00:07:29 +01:00
7e9bd7ae4a Begin refactor be_list arguments 2022-11-20 20:17:58 +01:00
3ade3f4022 Add incompatible hyparams to be_main 2022-11-20 19:10:28 +01:00
1b8a424ad3 Add subparser to be_report & tests 2022-11-20 18:23:26 +01:00
146304f4b5 Refactor Arguments to be child of ArgumentParser 2022-11-19 21:25:50 +01:00
07172b91c5 Add overrides to args parse for dataset/title in be_main 2022-11-19 21:16:29 +01:00
Ricardo Montañana Gómez
68d9cb776e Merge pull request #7 from Doctorado-ML:add_excel_belist
Add excel output of reports of be_list
2022-11-18 23:37:17 +01:00
42 changed files with 646 additions and 344 deletions

View File

@@ -12,11 +12,8 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-latest, ubuntu-latest]
python: ["3.10", "3.11"]
exclude:
- os: macos-latest
python: "3.11"
os: [ubuntu-latest]
python: ["3.10"]
steps:
- uses: actions/checkout@v3
@@ -46,6 +43,7 @@ jobs:
pip install -q --upgrade pip
pip install -q -r requirements.txt
pip install -q --upgrade codecov coverage black flake8
git clone https://github.com/Doctorado-ML/bayesclass.git
- name: Lint
run: |
black --check --diff benchmark

View File

@@ -36,6 +36,7 @@ class EnvDefault(argparse.Action):
self, envvar, required=True, default=None, mandatory=False, **kwargs
):
self._args = EnvData.load()
self._overrides = {}
if required and not mandatory:
default = self._args[envvar]
required = False
@@ -47,24 +48,27 @@ class EnvDefault(argparse.Action):
setattr(namespace, self.dest, values)
class Arguments:
def __init__(self):
self.ap = argparse.ArgumentParser()
class Arguments(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
models_data = Models.define_models(random_state=0)
self._overrides = {}
self._subparser = None
self.parameters = {
"best": [
("-b", "--best"),
"best_paramfile": [
("-b", "--best_paramfile"),
{
"type": str,
"action": "store_true",
"required": False,
"help": "best results of models",
"default": False,
"help": "Use best hyperparams file?",
},
],
"color": [
("-c", "--color"),
{
"type": bool,
"required": False,
"action": "store_true",
"default": False,
"help": "use colors for the tree",
},
@@ -72,8 +76,9 @@ class Arguments:
"compare": [
("-c", "--compare"),
{
"type": bool,
"action": "store_true",
"required": False,
"default": False,
"help": "Compare accuracy with best results",
},
],
@@ -81,6 +86,8 @@ class Arguments:
("-d", "--dataset"),
{
"type": str,
"envvar": "dataset", # for compatiblity with EnvDefault
"action": EnvDefault,
"required": False,
"help": "dataset to work with",
},
@@ -88,38 +95,26 @@ class Arguments:
"excel": [
("-x", "--excel"),
{
"type": bool,
"required": False,
"action": "store_true",
"default": False,
"help": "Generate Excel File",
},
],
"file": [
("-f", "--file"),
{"type": str, "required": False, "help": "Result file"},
],
"grid": [
("-g", "--grid"),
{
"type": str,
"required": False,
"help": "grid results of model",
},
],
"grid_paramfile": [
("-g", "--grid_paramfile"),
{
"type": bool,
"required": False,
"action": "store_true",
"default": False,
"help": "Use best hyperparams file?",
"help": "Use grid output hyperparams file?",
},
],
"hidden": [
("--hidden",),
{
"type": str,
"required": False,
"action": "store_true",
"default": False,
"help": "Show hidden results",
},
@@ -140,8 +135,8 @@ class Arguments:
"lose": [
("-l", "--lose"),
{
"type": bool,
"default": False,
"action": "store_true",
"required": False,
"help": "show lose results",
},
@@ -178,9 +173,10 @@ class Arguments:
"nan": [
("--nan",),
{
"type": bool,
"action": "store_true",
"required": False,
"help": "Move nan results to hidden folder",
"default": False,
"help": "List nan results to hidden folder",
},
],
"number": [
@@ -202,15 +198,6 @@ class Arguments:
"help": "number of folds",
},
],
"paramfile": [
("-f", "--paramfile"),
{
"type": bool,
"required": False,
"default": False,
"help": "Use best hyperparams file?",
},
],
"platform": [
("-P", "--platform"),
{
@@ -224,7 +211,7 @@ class Arguments:
"quiet": [
("-q", "--quiet"),
{
"type": bool,
"action": "store_true",
"required": False,
"default": False,
},
@@ -232,7 +219,7 @@ class Arguments:
"report": [
("-r", "--report"),
{
"type": bool,
"action": "store_true",
"default": False,
"required": False,
"help": "Report results",
@@ -250,14 +237,18 @@ class Arguments:
],
"sql": [
("-q", "--sql"),
{"type": bool, "required": False, "help": "Generate SQL File"},
{
"required": False,
"action": "store_true",
"default": False,
"help": "Generate SQL File",
},
],
"stratified": [
("-t", "--stratified"),
{
"action": EnvDefault,
"envvar": "stratified",
"type": str,
"required": True,
"help": "Stratified",
},
@@ -265,8 +256,8 @@ class Arguments:
"tex_output": [
("-t", "--tex-output"),
{
"type": bool,
"required": False,
"action": "store_true",
"default": False,
"help": "Generate Tex file with the table",
},
@@ -278,8 +269,8 @@ class Arguments:
"win": [
("-w", "--win"),
{
"type": bool,
"default": False,
"action": "store_true",
"required": False,
"help": "show win results",
},
@@ -287,12 +278,43 @@ class Arguments:
}
def xset(self, *arg_name, **kwargs):
names, default = self.parameters[arg_name[0]]
self.ap.add_argument(
names, parameters = self.parameters[arg_name[0]]
if "overrides" in kwargs:
self._overrides[names[0]] = (kwargs["overrides"], kwargs["const"])
del kwargs["overrides"]
self.add_argument(
*names,
**{**default, **kwargs},
**{**parameters, **kwargs},
)
return self
def add_subparser(
self, dest="subcommand", help_text="help for subcommand"
):
self._subparser = self.add_subparsers(dest=dest, help=help_text)
def add_subparsers_options(self, subparser, arguments):
command, help_text = subparser
parser = self._subparser.add_parser(command, help=help_text)
for name, args in arguments:
try:
names, parameters = self.parameters[name]
except KeyError:
names = (name,)
parameters = {}
# Order of args is important
parser.add_argument(*names, **{**args, **parameters})
def add_exclusive(self, hyperparameters, required=False):
group = self.add_mutually_exclusive_group(required=required)
for name in hyperparameters:
names, parameters = self.parameters[name]
group.add_argument(*names, **parameters)
def parse(self, args=None):
return self.ap.parse_args(args)
for key, (dest_key, value) in self._overrides.items():
if args is None:
args = sys.argv[1:]
if key in args:
args.extend((f"--{dest_key}", value))
return super().parse_args(args)

View File

@@ -1,4 +1,5 @@
import os
from types import SimpleNamespace
import pandas as pd
import numpy as np
from scipy.io import arff
@@ -31,6 +32,7 @@ class DatasetsArff:
data = arff.loadarff(file_name)
df = pd.DataFrame(data[0])
df.dropna(axis=0, how="any", inplace=True)
self.dataset = df
X = df.drop(class_name, axis=1)
self.features = X.columns
self.class_name = class_name
@@ -55,8 +57,12 @@ class DatasetsTanveer:
sep="\t",
index_col=0,
)
X = data.drop("clase", axis=1).to_numpy()
X = data.drop("clase", axis=1)
self.features = X.columns
X = X.to_numpy()
y = data["clase"].to_numpy()
self.dataset = data
self.class_name = "clase"
return X, y
@@ -77,8 +83,11 @@ class DatasetsSurcov:
)
data.dropna(axis=0, how="any", inplace=True)
self.columns = data.columns
col_list = ["class"]
X = data.drop(col_list, axis=1).to_numpy()
X = data.drop(["class"], axis=1)
self.features = X.columns
self.class_name = "class"
self.dataset = data
X = X.to_numpy()
y = data["class"].to_numpy()
return X, y
@@ -86,50 +95,48 @@ class DatasetsSurcov:
class Datasets:
def __init__(self, dataset_name=None):
envData = EnvData.load()
class_name = getattr(
# DatasetsSurcov, DatasetsTanveer, DatasetsArff,...
source_name = getattr(
__import__(__name__),
f"Datasets{envData['source_data']}",
)
self.load = (
self.load_discretized
if envData["discretize"] == "1"
else self.load_continuous
)
self.dataset = class_name()
self.discretize = envData["discretize"] == "1"
self.dataset = source_name()
self.class_names = []
self._load_names()
if dataset_name is not None:
try:
class_name = self.class_names[
self.data_sets.index(dataset_name)
]
self.class_names = [class_name]
except ValueError:
raise ValueError(f"Unknown dataset: {dataset_name}")
self.data_sets = [dataset_name]
self.data_sets = []
# initialize self.class_names & self.data_sets
class_names, sets = self._init_names(dataset_name)
self.class_names = class_names
self.data_sets = sets
def _load_names(self):
def _init_names(self, dataset_name):
file_name = os.path.join(self.dataset.folder(), Files.index)
default_class = "class"
with open(file_name) as f:
self.data_sets = f.read().splitlines()
self.class_names = [default_class] * len(self.data_sets)
if "," in self.data_sets[0]:
sets = f.read().splitlines()
class_names = [default_class] * len(sets)
if "," in sets[0]:
result = []
class_names = []
for data in self.data_sets:
for data in sets:
name, class_name = data.split(",")
result.append(name)
class_names.append(class_name)
self.data_sets = result
self.class_names = class_names
sets = result
# Set as dataset list the dataset passed as argument
if dataset_name is None:
return class_names, sets
try:
class_name = class_names[sets.index(dataset_name)]
except ValueError:
raise ValueError(f"Unknown dataset: {dataset_name}")
return [class_name], [dataset_name]
def get_attributes(self, name):
class Attributes:
pass
X, y = self.load_continuous(name)
attr = Attributes()
tmp = self.discretize
self.discretize = False
X, y = self.load(name)
attr = SimpleNamespace()
values, counts = np.unique(y, return_counts=True)
comp = ""
sep = ""
@@ -140,6 +147,7 @@ class Datasets:
attr.classes = len(np.unique(y))
attr.samples = X.shape[0]
attr.features = X.shape[1]
self.discretize = tmp
return attr
def get_features(self):
@@ -148,14 +156,25 @@ class Datasets:
def get_class_name(self):
return self.dataset.class_name
def load_continuous(self, name):
def get_dataset(self):
return self.dataset.dataset
def load(self, name, dataframe=False):
try:
class_name = self.class_names[self.data_sets.index(name)]
return self.dataset.load(name, class_name)
X, y = self.dataset.load(name, class_name)
if self.discretize:
X = self.discretize_dataset(X, y)
dataset = pd.DataFrame(X, columns=self.get_features())
dataset[self.get_class_name()] = y
self.dataset.dataset = dataset
if dataframe:
return self.get_dataset()
return X, y
except (ValueError, FileNotFoundError):
raise ValueError(f"Unknown dataset: {name}")
def discretize(self, X, y):
def discretize_dataset(self, X, y):
"""Supervised discretization with Fayyad and Irani's MDLP algorithm.
Parameters
@@ -173,14 +192,5 @@ class Datasets:
Xdisc = discretiz.fit_transform(X, y)
return Xdisc
def load_discretized(self, name, dataframe=False):
X, yd = self.load_continuous(name)
Xd = self.discretize(X, yd)
dataset = pd.DataFrame(Xd, columns=self.get_features())
dataset[self.get_class_name()] = yd
if dataframe:
return dataset
return Xd, yd
def __iter__(self) -> Diterator:
return Diterator(self.data_sets)

View File

@@ -8,7 +8,7 @@ from sklearn.ensemble import (
)
from sklearn.svm import SVC
from stree import Stree
from bayesclass import TAN, KDB, AODE
from bayesclass.clfs import TAN, KDB, AODE
from wodt import Wodt
from odte import Odte
from xgboost import XGBClassifier

View File

@@ -1,6 +1,7 @@
import os
import sys
from operator import itemgetter
from types import SimpleNamespace
import math
import json
import abc
@@ -22,8 +23,8 @@ from .Utils import (
from ._version import __version__
def get_input(is_test):
return "test" if is_test else input()
def get_input(message="", is_test=False):
return "test" if is_test else input(message)
class BestResultsEver:
@@ -251,7 +252,7 @@ class ReportBest(BaseReport):
"Hyperparameters",
]
def __init__(self, score, model, best, grid):
def __init__(self, score, model, best):
name = (
Files.best_results(score, model)
if best
@@ -259,7 +260,6 @@ class ReportBest(BaseReport):
)
file_name = os.path.join(Folders.results, name)
self.best = best
self.grid = grid
self.score_name = score
self.model = model
super().__init__(file_name, best_file=True)
@@ -1353,13 +1353,14 @@ class StubReport(BaseReport):
class Summary:
def __init__(self, hidden=False) -> None:
def __init__(self, hidden=False, compare=False) -> None:
self.results = Files().get_all_results(hidden=hidden)
self.data = []
self.data_filtered = []
self.datasets = {}
self.models = set()
self.hidden = hidden
self.compare = compare
def get_models(self):
return sorted(self.models)
@@ -1402,18 +1403,15 @@ class Summary:
self.data.append(entry)
def get_results_criteria(
self,
score,
model,
input_data,
sort_key,
number,
self, score, model, input_data, sort_key, number, nan=False
):
data = self.data.copy() if input_data is None else input_data
if score:
data = [x for x in data if x["score"] == score]
if model:
data = [x for x in data if x["model"] == model]
if nan:
data = [x for x in data if x["metric"] != x["metric"]]
keys = (
itemgetter(sort_key, "time")
if sort_key == "date"
@@ -1431,11 +1429,12 @@ class Summary:
input_data=None,
sort_key="date",
number=0,
nan=False,
) -> None:
"""Print the list of results"""
if self.data_filtered == []:
self.data_filtered = self.get_results_criteria(
score, model, input_data, sort_key, number
score, model, input_data, sort_key, number, nan=nan
)
if self.data_filtered == []:
raise ValueError(NO_RESULTS)
@@ -1477,44 +1476,99 @@ class Summary:
)
)
def manage_results(self, excel, is_test):
def manage_results(self):
"""Manage results showed in the summary
return True if excel file is created False otherwise
"""
num = ""
book = None
while True:
print(
"Which result do you want to report? (q to quit, r to list "
"again, number to report): ",
end="",
def process_file(num, command, path):
num = int(num)
name = self.data_filtered[num]["file"]
file_name_result = os.path.join(path, name)
verb1, verb2 = (
("delete", "Deleting")
if command == cmd.delete
else (
"hide",
"Hiding",
)
)
num = get_input(is_test)
if num == "r":
conf_message = (
TextColor.RED
+ f"Are you sure to {verb1} {file_name_result} (y/n)? "
)
confirm = get_input(message=conf_message)
if confirm == "y":
print(TextColor.YELLOW + f"{verb2} {file_name_result}")
if command == cmd.delete:
os.unlink(file_name_result)
else:
os.rename(
os.path.join(Folders.results, name),
os.path.join(Folders.hidden_results, name),
)
self.data_filtered.pop(num)
get_input(message="Press enter to continue")
self.list_results()
if num == "q":
if excel:
cmd = SimpleNamespace(
quit="q", relist="r", delete="d", hide="h", excel="e"
)
message = (
TextColor.ENDC
+ f"Choose option {str(cmd).replace('namespace', '')}: "
)
path = Folders.hidden_results if self.hidden else Folders.results
book = None
max_value = len(self.data)
while True:
match get_input(message=message).split():
case [cmd.relist]:
self.list_results()
case [cmd.quit]:
if book is not None:
book.close()
return True
return False
if num.isdigit() and int(num) < len(self.data) and int(num) >= 0:
rep = Report(self.data_filtered[int(num)]["file"], self.hidden)
rep.report()
if excel and not self.hidden:
return False
case [cmd.hide, num] if num.isdigit() and int(num) < max_value:
if self.hidden:
print("Already hidden")
else:
process_file(num, path=path, command=cmd.hide)
case [cmd.delete, num] if num.isdigit() and int(
num
) < max_value:
process_file(num=num, path=path, command=cmd.delete)
case [cmd.excel, num] if num.isdigit() and int(
num
) < max_value:
# Add to excel file result #num
num = int(num)
file_name_result = os.path.join(
path, self.data_filtered[num]["file"]
)
if book is None:
file_name = Files.be_list_excel
book = xlsxwriter.Workbook(
file_name, {"nan_inf_to_errors": True}
)
excel = Excel(
file_name=self.data_filtered[int(num)]["file"],
file_name=file_name_result,
book=book,
compare=self.compare,
)
excel.report()
else:
if num not in ("r", "q"):
print(f"Invalid option {num}. Try again!")
print(f"Added {file_name_result} to {Files.be_list_excel}")
case [num] if num.isdigit() and int(num) < max_value:
# Report the result #num
num = int(num)
file_name_result = os.path.join(
path, self.data_filtered[num]["file"]
)
rep = Report(file_name_result, compare=self.compare)
rep.report()
case _:
print("Invalid option. Try again!")
def show_result(self, data: dict, title: str = "") -> None:
def whites(n: int) -> str:

View File

@@ -146,3 +146,7 @@ class TextColor:
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
WHITE = "\033[97m"
GREY = "\033[90m"
BLACK = "\033[90m"
DEFAULT = "\033[99m"

View File

@@ -5,7 +5,7 @@ from benchmark.Arguments import Arguments
def main(args_test=None):
arguments = Arguments()
arguments = Arguments(prog="be_benchmark")
arguments.xset("score").xset("excel").xset("tex_output").xset("quiet")
args = arguments.parse(args_test)
benchmark = Benchmark(score=args.score, visualize=not args.quiet)

View File

@@ -21,5 +21,5 @@ def main(args_test=None):
print(e)
else:
if args.report:
report = ReportBest(args.score, args.model, best=True, grid=False)
report = ReportBest(args.score, args.model, best=True)
report.report()

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env python
import os
from benchmark.Utils import Files, Folders
from benchmark.Arguments import Arguments
def main(args_test=None):
arguments = Arguments(prog="be_init_project")
arguments.add_argument("project_name", help="Project name")
args = arguments.parse(args_test)
folders = []
folders.append(args.project_name)
folders.append(os.path.join(args.project_name, Folders.results))
folders.append(os.path.join(args.project_name, Folders.hidden_results))
folders.append(os.path.join(args.project_name, Folders.exreport))
folders.append(os.path.join(args.project_name, Folders.report))
folders.append(os.path.join(args.project_name, Folders.img))
try:
for folder in folders:
print(f"Creating folder {folder}")
os.makedirs(folder)
except FileExistsError as e:
print(e)
exit(1)
env_src = os.path.join(Folders.src(), "..", f"{Files.dot_env}.dist")
env_to = os.path.join(args.project_name, Files.dot_env)
os.system(f"cp {env_src} {env_to}")
print("Done!")
print(
"Please, edit .env file with your settings and add a datasets folder"
)
print("with an all.txt file with the datasets you want to use.")
print("In that folder you have to include all the datasets you'll use.")

View File

@@ -1,7 +1,6 @@
#! /usr/bin/env python
import os
from benchmark.Results import Summary
from benchmark.Utils import Folders, Files
from benchmark.Utils import Files
from benchmark.Arguments import Arguments
"""List experiments of a model
@@ -9,12 +8,12 @@ from benchmark.Arguments import Arguments
def main(args_test=None):
arguments = Arguments()
arguments = Arguments(prog="be_list")
arguments.xset("number").xset("model", required=False).xset("key")
arguments.xset("hidden").xset("nan").xset("score", required=False)
arguments.xset("excel")
arguments.xset("score", required=False).xset("compare").xset("hidden")
arguments.xset("nan")
args = arguments.parse(args_test)
data = Summary(hidden=args.hidden)
data = Summary(hidden=args.hidden, compare=args.compare)
data.acquire()
try:
data.list_results(
@@ -22,40 +21,12 @@ def main(args_test=None):
model=args.model,
sort_key=args.key,
number=args.number,
nan=args.nan,
)
is_test = args_test is not None
if not args.nan:
excel_generated = data.manage_results(args.excel, is_test)
if args.excel and excel_generated:
print(f"Generated file: {Files.be_list_excel}")
Files.open(Files.be_list_excel, is_test)
except ValueError as e:
print(e)
return
if args.nan:
results_nan = []
results = data.get_results_criteria(
score=args.score,
model=args.model,
input_data=None,
sort_key=args.key,
number=args.number,
)
for result in results:
if result["metric"] != result["metric"]:
results_nan.append(result)
if results_nan != []:
print(
"\n"
+ "*" * 30
+ " Results with nan moved to hidden "
+ "*" * 30
)
data.data_filtered = []
data.list_results(input_data=results_nan)
for result in results_nan:
name = result["file"]
os.rename(
os.path.join(Folders.results, name),
os.path.join(Folders.hidden_results, name),
)
excel_generated = data.manage_results()
if excel_generated:
print(f"Generated file: {Files.be_list_excel}")
Files.open(Files.be_list_excel, test=args_test is not None)

View File

@@ -10,15 +10,20 @@ from benchmark.Arguments import Arguments
def main(args_test=None):
arguments = Arguments()
arguments = Arguments(prog="be_main")
arguments.xset("stratified").xset("score").xset("model", mandatory=True)
arguments.xset("n_folds").xset("platform").xset("quiet").xset("title")
arguments.xset("hyperparameters").xset("paramfile").xset("report")
arguments.xset("grid_paramfile").xset("dataset")
arguments.xset("report")
arguments.add_exclusive(
["grid_paramfile", "best_paramfile", "hyperparameters"]
)
arguments.xset(
"dataset", overrides="title", const="Test with only one dataset"
)
args = arguments.parse(args_test)
report = args.report or args.dataset is not None
if args.grid_paramfile:
args.paramfile = False
args.best_paramfile = False
try:
job = Experiment(
score_name=args.score,
@@ -26,7 +31,7 @@ def main(args_test=None):
stratified=args.stratified,
datasets=Datasets(dataset_name=args.dataset),
hyperparams_dict=args.hyperparameters,
hyperparams_file=args.paramfile,
hyperparams_file=args.best_paramfile,
grid_paramfile=args.grid_paramfile,
progress_bar=not args.quiet,
platform=args.platform,

View File

@@ -5,46 +5,79 @@ from benchmark.Arguments import Arguments
"""Build report on screen of a result file, optionally generate excel and sql
file, and can compare results of report with best results obtained by model
file, and can compare results of report wibth best results obtained by model
If no argument is set, displays the datasets and its characteristics
"""
def main(args_test=None):
arguments = Arguments()
arguments.xset("file").xset("excel").xset("sql").xset("compare")
arguments.xset("best").xset("grid").xset("model", required=False)
arguments.xset("score", required=False)
is_test = args_test is not None
arguments = Arguments(prog="be_report")
arguments.add_subparser()
arguments.add_subparsers_options(
(
"best",
"Report best results obtained by any model/score. "
"See be_build_best",
),
[
("model", dict(required=False)),
("score", dict(required=False)),
],
)
arguments.add_subparsers_options(
(
"grid",
"Report grid results obtained by any model/score. "
"See be_build_grid",
),
[
("model", dict(required=False)),
("score", dict(required=False)),
],
)
arguments.add_subparsers_options(
("file", "Report file results"),
[
("file_name", {}),
("excel", {}),
("sql", {}),
("compare", {}),
],
)
arguments.add_subparsers_options(
("datasets", "Report datasets information"),
[
("excel", {}),
],
)
args = arguments.parse(args_test)
if args.best:
args.grid = None
if args.grid:
args.best = None
if args.file is None and args.best is None and args.grid is None:
report = ReportDatasets(args.excel)
report.report()
if args.excel:
is_test = args_test is not None
Files.open(report.get_file_name(), is_test)
else:
if args.best is not None or args.grid is not None:
report = ReportBest(args.score, args.model, args.best, args.grid)
match args.subcommand:
case "best" | "grid":
best = args.subcommand == "best"
report = ReportBest(args.score, args.model, best)
report.report()
else:
case "file":
try:
report = Report(args.file, args.compare)
report = Report(args.file_name, args.compare)
report.report()
except FileNotFoundError as e:
print(e)
else:
report.report()
if args.excel:
excel = Excel(
file_name=args.file,
compare=args.compare,
)
excel.report()
is_test = args_test is not None
Files.open(excel.get_file_name(), is_test)
if args.sql:
sql = SQL(args.file)
sql.report()
return
if args.sql:
sql = SQL(args.file_name)
sql.report()
if args.excel:
excel = Excel(
file_name=args.file_name,
compare=args.compare,
)
excel.report()
Files.open(excel.get_file_name(), is_test)
case "datasets":
report = ReportDatasets(args.excel)
report.report()
if args.excel:
Files.open(report.get_file_name(), is_test)
case _:
arguments.print_help()

View File

@@ -24,13 +24,11 @@ class ArgumentsTest(TestBase):
def test_parameters(self):
expected_parameters = {
"best": ("-b", "--best"),
"best_paramfile": ("-b", "--best_paramfile"),
"color": ("-c", "--color"),
"compare": ("-c", "--compare"),
"dataset": ("-d", "--dataset"),
"excel": ("-x", "--excel"),
"file": ("-f", "--file"),
"grid": ("-g", "--grid"),
"grid_paramfile": ("-g", "--grid_paramfile"),
"hidden": ("--hidden",),
"hyperparameters": ("-p", "--hyperparameters"),
@@ -42,7 +40,6 @@ class ArgumentsTest(TestBase):
"nan": ("--nan",),
"number": ("-n", "--number"),
"n_folds": ("-n", "--n_folds"),
"paramfile": ("-f", "--paramfile"),
"platform": ("-P", "--platform"),
"quiet": ("-q", "--quiet"),
"report": ("-r", "--report"),
@@ -98,3 +95,27 @@ class ArgumentsTest(TestBase):
finally:
os.chdir(path)
self.assertEqual(stderr.getvalue(), f"{NO_ENV}\n")
@patch("sys.stderr", new_callable=StringIO)
def test_overrides(self, stderr):
arguments = self.build_args()
arguments.xset("title")
arguments.xset("dataset", overrides="title", const="sample text")
test_args = ["-n", "3", "-m", "SVC", "-k", "1", "-d", "dataset"]
args = arguments.parse(test_args)
self.assertEqual(stderr.getvalue(), "")
self.assertEqual(args.title, "sample text")
@patch("sys.stderr", new_callable=StringIO)
def test_overrides_no_args(self, stderr):
arguments = self.build_args()
arguments.xset("title")
arguments.xset("dataset", overrides="title", const="sample text")
test_args = None
with self.assertRaises(SystemExit):
arguments.parse(test_args)
self.assertRegexpMatches(
stderr.getvalue(),
r"error: the following arguments are required: -m/--model, "
"-k/--key, --title",
)

View File

@@ -33,8 +33,8 @@ class DatasetTest(TestBase):
def test_load_dataframe(self):
self.set_env(".env.arff")
dt = Datasets()
X, y = dt.load_discretized("iris", dataframe=False)
dataset = dt.load_discretized("iris", dataframe=True)
X, y = dt.load("iris", dataframe=False)
dataset = dt.load("iris", dataframe=True)
class_name = dt.get_class_name()
features = dt.get_features()
self.assertListEqual(y.tolist(), dataset[class_name].tolist())

View File

@@ -69,13 +69,13 @@ class ReportTest(TestBase):
_ = Report("unknown_file")
def test_report_best(self):
report = ReportBest("accuracy", "STree", best=True, grid=False)
report = ReportBest("accuracy", "STree", best=True)
with patch(self.output, new=StringIO()) as stdout:
report.report()
self.check_output_file(stdout, "report_best")
def test_report_grid(self):
report = ReportBest("accuracy", "STree", best=False, grid=True)
report = ReportBest("accuracy", "STree", best=False)
with patch(self.output, new=StringIO()) as stdout:
report.report()
file_name = "report_grid.test"
@@ -90,12 +90,6 @@ class ReportTest(TestBase):
self.assertEqual(line, output_text[index])
def test_report_best_both(self):
report = ReportBest("accuracy", "STree", best=True, grid=True)
with patch(self.output, new=StringIO()) as stdout:
report.report()
self.check_output_file(stdout, "report_best")
@patch("sys.stdout", new_callable=StringIO)
def test_report_datasets(self, mock_output):
report = ReportDatasets()

View File

@@ -13,6 +13,7 @@ from .PairCheck_test import PairCheckTest
from .Arguments_test import ArgumentsTest
from .scripts.Be_Pair_check_test import BePairCheckTest
from .scripts.Be_List_test import BeListTest
from .scripts.Be_Init_Project_test import BeInitProjectTest
from .scripts.Be_Report_test import BeReportTest
from .scripts.Be_Summary_test import BeSummaryTest
from .scripts.Be_Grid_test import BeGridTest

View File

@@ -25,7 +25,7 @@ class BeBenchmarkTest(TestBase):
def test_be_benchmark_complete(self):
stdout, stderr = self.execute_script(
"be_benchmark", ["-s", self.score, "-q", "1", "-t", "1", "-x", "1"]
"be_benchmark", ["-s", self.score, "-q", "-t", "-x"]
)
self.assertEqual(stderr.getvalue(), "")
# Check output
@@ -60,7 +60,7 @@ class BeBenchmarkTest(TestBase):
def test_be_benchmark_single(self):
stdout, stderr = self.execute_script(
"be_benchmark", ["-s", self.score, "-q", "1"]
"be_benchmark", ["-s", self.score, "-q"]
)
self.assertEqual(stderr.getvalue(), "")
# Check output

View File

@@ -67,7 +67,7 @@ class BeBestTest(TestBase):
def test_be_build_best_report(self):
stdout, _ = self.execute_script(
"be_build_best", ["-s", "accuracy", "-m", "ODTE", "-r", "1"]
"be_build_best", ["-s", "accuracy", "-m", "ODTE", "-r"]
)
expected_data = {
"balance-scale": [

View File

@@ -69,7 +69,7 @@ class BeGridTest(TestBase):
def test_be_grid_no_input(self):
stdout, stderr = self.execute_script(
"be_grid",
["-m", "ODTE", "-s", "f1-weighted", "-q", "1"],
["-m", "ODTE", "-s", "f1-weighted", "-q"],
)
self.assertEqual(stderr.getvalue(), "")
grid_file = os.path.join(

View File

@@ -0,0 +1,66 @@
import os
from io import StringIO
from unittest.mock import patch
from ..TestBase import TestBase
from ...Utils import Folders
class BeInitProjectTest(TestBase):
def setUp(self):
self.prepare_scripts_env()
def tearDown(self):
if os.path.exists("test_project"):
os.system("rm -rf test_project")
def assertIsFile(self, file_name):
if not os.path.isfile(file_name):
raise AssertionError(f"File {str(file_name)} does not exist")
def assertIsFolder(self, path):
if not os.path.exists(path):
raise AssertionError(f"Folder {str(path)} does not exist")
def test_be_init_project(self):
test_project = "test_project"
stdout, stderr = self.execute_script("be_init_project", [test_project])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_init_project")
# check folders
expected = [
Folders.results,
Folders.hidden_results,
Folders.exreport,
Folders.report,
Folders.img,
]
for folder in expected:
self.assertIsFolder(os.path.join(test_project, folder))
self.assertIsFile(os.path.join(test_project, ".env"))
os.system(f"rm -rf {test_project}")
@patch("sys.stdout", new_callable=StringIO)
@patch("sys.stderr", new_callable=StringIO)
def test_be_init_project_no_arguments(self, stdout, stderr):
with self.assertRaises(SystemExit) as cm:
module = self.search_script("be_init_project")
module.main("")
self.assertEqual(cm.exception.code, 2)
self.check_output_file(stdout, "be_init_project_no_arguments")
self.assertEqual(stderr.getvalue(), "")
@patch("sys.stdout", new_callable=StringIO)
@patch("sys.stderr", new_callable=StringIO)
def test_be_init_project_twice(self, stdout, stderr):
test_project = "test_project"
self.execute_script("be_init_project", [test_project])
with self.assertRaises(SystemExit) as cm:
module = self.search_script("be_init_project")
module.main([test_project])
self.assertEqual(cm.exception.code, 1)
self.assertEqual(
stderr.getvalue(),
f"Creating folder {test_project}\n"
f"[Errno 17] File exists: '{test_project}'\n",
)
self.assertEqual(stdout.getvalue(), "")

View File

@@ -1,4 +1,5 @@
import os
import shutil
from unittest.mock import patch
from openpyxl import load_workbook
from ...Utils import Folders, Files, NO_RESULTS
@@ -27,36 +28,26 @@ class BeListTest(TestBase):
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_report")
@patch("benchmark.Results.get_input", side_effect=iter(["q"]))
def test_be_list_report_excel_none(self, input_data):
stdout, stderr = self.execute_script(
"be_list", ["-m", "STree", "-x", "1"]
)
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_model")
@patch("benchmark.Results.get_input", side_effect=iter(["r", "q"]))
def test_be_list_twice(self, input_data):
stdout, stderr = self.execute_script("be_list", ["-m", "STree"])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_model_2")
@patch("benchmark.Results.get_input", side_effect=iter(["2", "q"]))
@patch("benchmark.Results.get_input", side_effect=iter(["e 2", "q"]))
def test_be_list_report_excel(self, input_data):
stdout, stderr = self.execute_script(
"be_list", ["-m", "STree", "-x", "1"]
)
stdout, stderr = self.execute_script("be_list", ["-m", "STree"])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_report_excel")
book = load_workbook(Files.be_list_excel)
sheet = book["STree"]
self.check_excel_sheet(sheet, "excel")
@patch("benchmark.Results.get_input", side_effect=iter(["2", "1", "q"]))
@patch(
"benchmark.Results.get_input", side_effect=iter(["e 2", "e 1", "q"])
)
def test_be_list_report_excel_twice(self, input_data):
stdout, stderr = self.execute_script(
"be_list", ["-m", "STree", "-x", "1"]
)
stdout, stderr = self.execute_script("be_list", ["-m", "STree"])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_report_excel_2")
book = load_workbook(Files.be_list_excel)
@@ -73,7 +64,36 @@ class BeListTest(TestBase):
self.assertEqual(stderr.getvalue(), "")
self.assertEqual(stdout.getvalue(), f"{NO_RESULTS}\n")
def test_be_list_nan(self):
@patch(
"benchmark.Results.get_input", side_effect=iter(["d 0", "y", "", "q"])
)
# @patch("benchmark.Results.get_input", side_effect=iter(["q"]))
def test_be_list_delete(self, input_data):
def copy_files(source_folder, target_folder, file_name):
source = os.path.join(source_folder, file_name)
target = os.path.join(target_folder, file_name)
shutil.copyfile(source, target)
file_name = (
"results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:"
"35_0.json"
)
# move nan result from hidden to results
copy_files(Folders.hidden_results, Folders.results, file_name)
try:
# list and delete result
stdout, stderr = self.execute_script("be_list", "")
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_delete")
except Exception:
# delete the result copied if be_list couldn't
os.unlink(os.path.join(Folders.results, file_name))
self.fail("test_be_list_delete() should not raise exception")
@patch(
"benchmark.Results.get_input", side_effect=iter(["h 0", "y", "", "q"])
)
def test_be_list_hide(self, input_data):
def swap_files(source_folder, target_folder, file_name):
source = os.path.join(source_folder, file_name)
target = os.path.join(target_folder, file_name)
@@ -86,20 +106,38 @@ class BeListTest(TestBase):
# move nan result from hidden to results
swap_files(Folders.hidden_results, Folders.results, file_name)
try:
# list and move nan result to hidden
stdout, stderr = self.execute_script("be_list", ["--nan", "1"])
# list and move nan result to hidden again
stdout, stderr = self.execute_script("be_list", "")
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_nan")
self.check_output_file(stdout, "be_list_hide")
except Exception:
# move back nan result file if be_list couldn't
# delete the result copied if be_list couldn't
swap_files(Folders.results, Folders.hidden_results, file_name)
self.fail("test_be_list_nan() should not raise exception")
self.fail("test_be_list_hide() should not raise exception")
@patch("benchmark.Results.get_input", return_value="q")
def test_be_list_nan_no_nan(self, input_data):
stdout, stderr = self.execute_script("be_list", ["--nan", "1"])
@patch("benchmark.Results.get_input", side_effect=iter(["h 0", "q"]))
def test_be_list_already_hidden(self, input_data):
stdout, stderr = self.execute_script("be_list", ["--hidden"])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_no_nan")
self.check_output_file(stdout, "be_list_already_hidden")
@patch("benchmark.Results.get_input", side_effect=iter(["h 0", "n", "q"]))
def test_be_list_dont_hide(self, input_data):
stdout, stderr = self.execute_script("be_list", "")
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_default")
@patch("benchmark.Results.get_input", side_effect=iter(["q"]))
def test_be_list_hidden_nan(self, input_data):
stdout, stderr = self.execute_script("be_list", ["--hidden", "--nan"])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_hidden_nan")
@patch("benchmark.Results.get_input", side_effect=iter(["q"]))
def test_be_list_hidden(self, input_data):
stdout, stderr = self.execute_script("be_list", ["--hidden"])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "be_list_hidden")
def test_be_no_env(self):
path = os.getcwd()

View File

@@ -1,4 +1,5 @@
import os
import json
from io import StringIO
from unittest.mock import patch
from ...Results import Report
@@ -30,7 +31,7 @@ class BeMainTest(TestBase):
def test_be_main_complete(self):
stdout, _ = self.execute_script(
"be_main",
["-s", self.score, "-m", "STree", "--title", "test", "-r", "1"],
["-s", self.score, "-m", "STree", "--title", "test", "-r"],
)
# keep the report name to delete it after
report_name = stdout.getvalue().splitlines()[-1].split("in ")[1]
@@ -66,10 +67,8 @@ class BeMainTest(TestBase):
"STree",
"--title",
"test",
"-f",
"1",
"-b",
"-r",
"1",
],
)
# keep the report name to delete it after
@@ -79,6 +78,48 @@ class BeMainTest(TestBase):
stdout, "be_main_best", [0, 2, 3, 5, 6, 7, 8, 9, 12, 13, 14]
)
@patch("sys.stdout", new_callable=StringIO)
@patch("sys.stderr", new_callable=StringIO)
def test_be_main_incompatible_params(self, stdout, stderr):
m1 = (
"be_main: error: argument -b/--best_paramfile: not allowed with "
"argument -p/--hyperparameters"
)
m2 = (
"be_main: error: argument -g/--grid_paramfile: not allowed with "
"argument -p/--hyperparameters"
)
m3 = (
"be_main: error: argument -g/--grid_paramfile: not allowed with "
"argument -p/--hyperparameters"
)
m4 = m1
p0 = [
"-s",
self.score,
"-m",
"SVC",
"--title",
"test",
]
pset = json.dumps(dict(C=17))
p1 = p0.copy()
p1.extend(["-p", pset, "-b"])
p2 = p0.copy()
p2.extend(["-p", pset, "-g"])
p3 = p0.copy()
p3.extend(["-p", pset, "-g", "-b"])
p4 = p0.copy()
p4.extend(["-b", "-g"])
parameters = [(p1, m1), (p2, m2), (p3, m3), (p4, m4)]
for parameter, message in parameters:
with self.assertRaises(SystemExit) as msg:
module = self.search_script("be_main")
module.main(parameter)
self.assertEqual(msg.exception.code, 2)
self.assertEqual(stderr.getvalue(), "")
self.assertRegexpMatches(stdout.getvalue(), message)
def test_be_main_best_params_non_existent(self):
model = "GBC"
stdout, stderr = self.execute_script(
@@ -90,10 +131,8 @@ class BeMainTest(TestBase):
model,
"--title",
"test",
"-f",
"1",
"-b",
"-r",
"1",
],
)
self.assertEqual(stderr.getvalue(), "")
@@ -117,9 +156,7 @@ class BeMainTest(TestBase):
"--title",
"test",
"-g",
"1",
"-r",
"1",
],
)
self.assertEqual(stderr.getvalue(), "")
@@ -142,9 +179,7 @@ class BeMainTest(TestBase):
"--title",
"test",
"-g",
"1",
"-r",
"1",
],
)
# keep the report name to delete it after

View File

@@ -18,7 +18,7 @@ class BePrintStrees(TestBase):
for name in self.datasets:
stdout, _ = self.execute_script(
"be_print_strees",
["-d", name, "-q", "1"],
["-d", name, "-q"],
)
file_name = os.path.join(Folders.img, f"stree_{name}.png")
self.files.append(file_name)
@@ -33,7 +33,7 @@ class BePrintStrees(TestBase):
for name in self.datasets:
stdout, _ = self.execute_script(
"be_print_strees",
["-d", name, "-q", "1", "-c", "1"],
["-d", name, "-q", "-c"],
)
file_name = os.path.join(Folders.img, f"stree_{name}.png")
self.files.append(file_name)

View File

@@ -1,5 +1,7 @@
import os
from openpyxl import load_workbook
from io import StringIO
from unittest.mock import patch
from ...Utils import Folders, Files
from ..TestBase import TestBase
from ..._version import __version__
@@ -23,25 +25,25 @@ class BeReportTest(TestBase):
"results",
"results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json",
)
stdout, stderr = self.execute_script("be_report", ["-f", file_name])
stdout, stderr = self.execute_script("be_report", ["file", file_name])
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "report")
def test_be_report_not_found(self):
stdout, stderr = self.execute_script("be_report", ["-f", "unknown"])
stdout, stderr = self.execute_script("be_report", ["file", "unknown"])
self.assertEqual(stderr.getvalue(), "")
self.assertEqual(stdout.getvalue(), "unknown does not exists!\n")
def test_be_report_compare(self):
file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
stdout, stderr = self.execute_script(
"be_report", ["-f", file_name, "-c", "1"]
"be_report", ["file", file_name, "-c"]
)
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "report_compared")
def test_be_report_datatsets(self):
stdout, stderr = self.execute_script("be_report", [])
stdout, stderr = self.execute_script("be_report", ["datasets"])
self.assertEqual(stderr.getvalue(), "")
file_name = f"report_datasets{self.ext}"
with open(os.path.join(self.test_files, file_name)) as f:
@@ -54,7 +56,7 @@ class BeReportTest(TestBase):
self.assertEqual(line, output_text[index])
def test_be_report_datasets_excel(self):
stdout, stderr = self.execute_script("be_report", ["-x", "1"])
stdout, stderr = self.execute_script("be_report", ["datasets", "-x"])
self.assertEqual(stderr.getvalue(), "")
file_name = f"report_datasets{self.ext}"
with open(os.path.join(self.test_files, file_name)) as f:
@@ -77,14 +79,14 @@ class BeReportTest(TestBase):
def test_be_report_best(self):
stdout, stderr = self.execute_script(
"be_report", ["-s", "accuracy", "-m", "STree", "-b", "1"]
"be_report", ["best", "-s", "accuracy", "-m", "STree"]
)
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "report_best")
def test_be_report_grid(self):
stdout, stderr = self.execute_script(
"be_report", ["-s", "accuracy", "-m", "STree", "-g", "1"]
"be_report", ["grid", "-s", "accuracy", "-m", "STree"]
)
self.assertEqual(stderr.getvalue(), "")
file_name = "report_grid.test"
@@ -98,19 +100,24 @@ class BeReportTest(TestBase):
line = self.replace_STree_version(line, output_text, index)
self.assertEqual(line, output_text[index])
def test_be_report_best_both(self):
stdout, stderr = self.execute_script(
"be_report",
["-s", "accuracy", "-m", "STree", "-b", "1", "-g", "1"],
)
@patch("sys.stderr", new_callable=StringIO)
def test_be_report_unknown_subcommand(self, stderr):
with self.assertRaises(SystemExit) as msg:
module = self.search_script("be_report")
module.main(["unknown"])
self.assertEqual(msg.exception.code, 2)
self.check_output_file(stderr, "report_unknown_subcommand")
def test_be_report_without_subcommand(self):
stdout, stderr = self.execute_script("be_report", "")
self.assertEqual(stderr.getvalue(), "")
self.check_output_file(stdout, "report_best")
self.check_output_file(stdout, "report_without_subcommand")
def test_be_report_excel_compared(self):
file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
stdout, stderr = self.execute_script(
"be_report",
["-f", file_name, "-x", "1", "-c", "1"],
["file", file_name, "-x", "-c"],
)
file_name = os.path.join(
Folders.results, file_name.replace(".json", ".xlsx")
@@ -125,7 +132,7 @@ class BeReportTest(TestBase):
file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
stdout, stderr = self.execute_script(
"be_report",
["-f", file_name, "-x", "1"],
["file", file_name, "-x"],
)
file_name = os.path.join(
Folders.results, file_name.replace(".json", ".xlsx")
@@ -140,7 +147,7 @@ class BeReportTest(TestBase):
file_name = "results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json"
stdout, stderr = self.execute_script(
"be_report",
["-f", file_name, "-q", "1"],
["file", file_name, "-q"],
)
file_name = os.path.join(
Folders.results, file_name.replace(".json", ".sql")

View File

@@ -0,0 +1,10 @@
Creating folder test_project
Creating folder test_project/results
Creating folder test_project/hidden_results
Creating folder test_project/exreport
Creating folder test_project/exreport/exreport_output
Creating folder test_project/img
Done!
Please, edit .env file with your settings and add a datasets folder
with an all.txt file with the datasets you want to use.
In that folder you have to include all the datasets you'll use.

View File

@@ -0,0 +1,2 @@
usage: be_init_project [-h] project_name
be_init_project: error: the following arguments are required: project_name

View File

@@ -0,0 +1,5 @@
 # Date File Score Time(h) Title
=== ========== ================================================================ ======== ======= =======================
 0 2022-05-04 results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json nan 3.091 Default hyperparameters
 1 2021-11-01 results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json 0.97446 0.098 default
Already hidden

View File

@@ -0,0 +1,16 @@
 # Date File Score Time(h) Title
=== ========== ================================================================ ======== ======= ============================================
 0 2022-05-04 results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json nan 3.091 Default hyperparameters
 1 2022-04-20 results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json 0.04341 6.275 Gridsearched hyperparams v022.1b random_init
 2 2022-01-14 results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json 0.03627 0.076 Test default paramters with RandomForest
 3 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 4 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 5 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Deleting results/results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json
 # Date File Score Time(h) Title
=== ========== =============================================================== ======== ======= ============================================
 0 2022-04-20 results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json 0.04341 6.275 Gridsearched hyperparams v022.1b random_init
 1 2022-01-14 results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json 0.03627 0.076 Test default paramters with RandomForest
 2 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 3 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 4 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters

View File

@@ -0,0 +1,4 @@
 # Date File Score Time(h) Title
=== ========== ================================================================ ======== ======= =======================
 0 2022-05-04 results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json nan 3.091 Default hyperparameters
 1 2021-11-01 results_accuracy_STree_iMac27_2021-11-01_23:55:16_0.json 0.97446 0.098 default

View File

@@ -0,0 +1,3 @@
 # Date File Score Time(h) Title
=== ========== ================================================================ ======== ======= =======================
 0 2022-05-04 results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json nan 3.091 Default hyperparameters

View File

@@ -6,8 +6,11 @@
 3 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 4 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 5 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
****************************** Results with nan moved to hidden ******************************
 # Date File Score Time(h) Title
=== ========== ================================================================ ======== ======= =======================
 0 2022-05-04 results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json nan 3.091 Default hyperparameters
Hiding results/results_accuracy_XGBoost_MacBookpro16_2022-05-04_11:00:35_0.json
 # Date File Score Time(h) Title
=== ========== =============================================================== ======== ======= ============================================
 0 2022-04-20 results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json 0.04341 6.275 Gridsearched hyperparams v022.1b random_init
 1 2022-01-14 results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json 0.03627 0.076 Test default paramters with RandomForest
 2 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 3 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 4 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters

View File

@@ -3,4 +3,3 @@
 0 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 1 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 2 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Which result do you want to report? (q to quit, r to list again, number to report):

View File

@@ -3,9 +3,8 @@
 0 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 1 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 2 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Which result do you want to report? (q to quit, r to list again, number to report):  # Date File Score Time(h) Title
 # Date File Score Time(h) Title
=== ========== ============================================================= ======== ======= =================================
 0 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 1 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 2 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Which result do you want to report? (q to quit, r to list again, number to report):

View File

@@ -3,5 +3,4 @@
 0 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 1 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 2 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Which result do you want to report? (q to quit, r to list again, number to report): Invalid option x. Try again!
Which result do you want to report? (q to quit, r to list again, number to report):
Invalid option. Try again!

View File

@@ -3,7 +3,7 @@
 0 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 1 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 2 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Which result do you want to report? (q to quit, r to list again, number to report): *************************************************************************************************************************
*************************************************************************************************************************
* STree ver. 1.2.3 Python ver. 3.11x with 5 Folds cross validation and 10 random seeds. 2021-11-01 19:17:07 *
* default B *
* Random seeds: [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1] Stratified: False *
@@ -18,4 +18,3 @@ Dataset Sampl. Feat. Cls Nodes Leaves Depth Score
*************************************************************************************************************************
* accuracy compared to STree_default (liblinear-ovr) .: 0.0379 *
*************************************************************************************************************************
Which result do you want to report? (q to quit, r to list again, number to report):

View File

@@ -3,19 +3,5 @@
 0 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 1 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 2 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Which result do you want to report? (q to quit, r to list again, number to report): *************************************************************************************************************************
* STree ver. 1.2.3 Python ver. 3.11x with 5 Folds cross validation and 10 random seeds. 2021-09-30 11:42:07 *
* With gridsearched hyperparameters *
* Random seeds: [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1] Stratified: False *
* Execution took 624.25 seconds, 0.17 hours, on iMac27 *
* Score is accuracy *
*************************************************************************************************************************
Dataset Sampl. Feat. Cls Nodes Leaves Depth Score Time Hyperparameters
============================== ====== ===== === ======= ======= ======= =============== ================= ===============
balance-scale 625 4 3 7.00 4.00 3.00 0.970560±0.0150 0.014049±0.0020 {'C': 10000.0, 'gamma': 0.1, 'kernel': 'rbf', 'max_iter': 10000.0, 'multiclass_strategy': 'ovr'}
balloons 16 4 2 3.00 2.00 2.00 0.860000±0.2850 0.000854±0.0000 {'C': 7, 'gamma': 0.1, 'kernel': 'rbf', 'max_iter': 10000.0, 'multiclass_strategy': 'ovr'}
*************************************************************************************************************************
* accuracy compared to STree_default (liblinear-ovr) .: 0.0454 *
*************************************************************************************************************************
Which result do you want to report? (q to quit, r to list again, number to report): Generated file: some_results.xlsx
Added results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json to some_results.xlsx
Generated file: some_results.xlsx

View File

@@ -3,34 +3,6 @@
 0 2021-11-01 results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0.json 0.03790 1.143 default B
 1 2021-10-27 results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json 0.04158 0.943 default A
 2 2021-09-30 results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json 0.04544 0.173 With gridsearched hyperparameters
Which result do you want to report? (q to quit, r to list again, number to report): *************************************************************************************************************************
* STree ver. 1.2.3 Python ver. 3.11x with 5 Folds cross validation and 10 random seeds. 2021-09-30 11:42:07 *
* With gridsearched hyperparameters *
* Random seeds: [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1] Stratified: False *
* Execution took 624.25 seconds, 0.17 hours, on iMac27 *
* Score is accuracy *
*************************************************************************************************************************
Dataset Sampl. Feat. Cls Nodes Leaves Depth Score Time Hyperparameters
============================== ====== ===== === ======= ======= ======= =============== ================= ===============
balance-scale 625 4 3 7.00 4.00 3.00 0.970560±0.0150 0.014049±0.0020 {'C': 10000.0, 'gamma': 0.1, 'kernel': 'rbf', 'max_iter': 10000.0, 'multiclass_strategy': 'ovr'}
balloons 16 4 2 3.00 2.00 2.00 0.860000±0.2850 0.000854±0.0000 {'C': 7, 'gamma': 0.1, 'kernel': 'rbf', 'max_iter': 10000.0, 'multiclass_strategy': 'ovr'}
*************************************************************************************************************************
* accuracy compared to STree_default (liblinear-ovr) .: 0.0454 *
*************************************************************************************************************************
Which result do you want to report? (q to quit, r to list again, number to report): *************************************************************************************************************************
* STree ver. 1.2.3 Python ver. 3.11x with 5 Folds cross validation and 10 random seeds. 2021-10-27 09:40:40 *
* default A *
* Random seeds: [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1] Stratified: False *
* Execution took 3395.01 seconds, 0.94 hours, on iMac27 *
* Score is accuracy *
*************************************************************************************************************************
Dataset Sampl. Feat. Cls Nodes Leaves Depth Score Time Hyperparameters
============================== ====== ===== === ======= ======= ======= =============== ================= ===============
balance-scale 625 4 3 11.08 5.90 5.90 0.980000±0.0010 0.285207±0.0603 {'splitter': 'best', 'max_features': 'auto'}
balloons 16 4 2 4.12 2.56 2.56 0.695000±0.2757 0.021201±0.0035 {'splitter': 'best', 'max_features': 'auto'}
*************************************************************************************************************************
* accuracy compared to STree_default (liblinear-ovr) .: 0.0416 *
*************************************************************************************************************************
Which result do you want to report? (q to quit, r to list again, number to report): Generated file: some_results.xlsx
Added results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json to some_results.xlsx
Added results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json to some_results.xlsx
Generated file: some_results.xlsx

View File

@@ -1,6 +1,6 @@
*************************************************************************************************************************
* STree ver. 1.2.4 Python ver. 3.11x with 5 Folds cross validation and 10 random seeds. 2022-05-08 19:38:28 *
* test *
* Test with only one dataset *
* Random seeds: [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1] Stratified: False *
* Execution took 0.06 seconds, 0.00 hours, on iMac27 *
* Score is accuracy *

View File

@@ -0,0 +1,2 @@
usage: be_report [-h] {best,grid,file,datasets} ...
be_report: error: argument subcommand: invalid choice: 'unknown' (choose from 'best', 'grid', 'file', 'datasets')

View File

@@ -0,0 +1,12 @@
usage: be_report [-h] {best,grid,file,datasets} ...
positional arguments:
{best,grid,file,datasets}
help for subcommand
best Report best results obtained by any model/score. See be_build_best
grid Report grid results obtained by any model/score. See be_build_grid
file Report file results
datasets Report datasets information
options:
-h, --help show this help message and exit

View File

@@ -38,6 +38,7 @@ def script_names():
"print_strees",
"report",
"summary",
"init_project",
]
result = []
for script in scripts:
@@ -60,8 +61,6 @@ setuptools.setup(
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: " + get_data("license"),
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence",