Add benchmark tests

This commit is contained in:
2022-04-25 02:35:23 +02:00
parent a3b4b59b48
commit a17166ed31
14 changed files with 176 additions and 83 deletions

View File

@@ -597,7 +597,7 @@ class Benchmark:
self._datasets = set()
def get_result_file_name(self):
return os.path.join(Folders.results, Files.exreport(self._score))
return os.path.join(Folders.exreport, Files.exreport(self._score))
def compile_results(self):
summary = Summary()
@@ -644,7 +644,7 @@ class Benchmark:
print("*" * length)
print(message)
print("*" * length)
with open(os.path.join(Folders.results, file)) as f:
with open(os.path.join(Folders.exreport, file)) as f:
data = f.read().splitlines()
for line in data:
print(line)
@@ -659,17 +659,23 @@ class Benchmark:
print("Error: %s : %s" % (Folders.report, os_error.strerror))
# Compute Friedman & Holm Tests
fout = open(
os.path.join(Folders.results, Files.exreport_output(self._score)),
os.path.join(Folders.exreport, Files.exreport_output(self._score)),
"w",
)
ferr = open(
os.path.join(Folders.results, Files.exreport_err(self._score)), "w"
os.path.join(Folders.exreport, Files.exreport_err(self._score)),
"w",
)
print(
"*********************",
os.path.join(Folders.src(), Files.benchmark_r),
)
result = subprocess.run(
[
"Rscript",
os.path.join(Folders.src(), Files.benchmark_r),
self._score,
os.path.join(Folders.exreport, f"exreport_{self._score}"),
],
stdout=fout,
stderr=ferr,

View File

@@ -1,9 +1,9 @@
library(glue)
args = commandArgs(trailingOnly=TRUE)
if (length(args)!=1) {
stop("Only one argument must be supplied (score).n", call.=FALSE)
if (length(args)!=2) {
stop("Only two arguments must be supplied (score & input_file).n", call.=FALSE)
}
csv_file <- glue("results/exreport_{args[1]}.csv")
csv_file <- glue("{args[2]}.csv")
destination <- "exreport/"
results <- read.csv(csv_file)
library(exreport)

View File

@@ -0,0 +1,53 @@
import os
import unittest
import shutil
from io import StringIO
from unittest.mock import patch
from ..Utils import Folders
from ..Results import Benchmark
class BenchmarkTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
os.chdir(os.path.dirname(os.path.abspath(__file__)))
super().__init__(*args, **kwargs)
def tearDown(self) -> None:
files = [
# "exreport_accuracy.csv",
"exreport_accuracy.txt",
"exreport_err_accuracy.txt",
]
for file_name in files:
file_name = os.path.join(Folders.exreport, file_name)
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists(Folders.report):
shutil.rmtree(Folders.report)
if os.path.exists("Rplots.pdf"):
os.remove("Rplots.pdf")
return super().tearDown()
def test_csv(self):
benchmark = Benchmark("accuracy")
benchmark.compile_results()
benchmark.save_results()
with open(benchmark.get_result_file_name()) as f:
computed = f.readlines()
with open(os.path.join("test_files", "exreport_csv.test")) as f_exp:
expected = f_exp.readlines()
self.assertEqual(computed, expected)
def test_exreport(self):
benchmark = Benchmark("accuracy")
benchmark.compile_results()
benchmark.save_results()
with patch("sys.stdout", new=StringIO()) as fake_out:
benchmark.exreport()
with open(os.path.join("test_files", "exreport.test")) as f:
expected_t = f.read()
computed_t = fake_out.getvalue()
computed_t = computed_t.split("\n")
computed_t.pop(0)
for computed, expected in zip(computed_t, expected_t.split("\n")):
self.assertEqual(computed, expected)

View File

@@ -38,44 +38,43 @@ class ExcelTest(unittest.TestCase):
self.assertEqual(sheet.cell(int(row), int(col)).value, value)
def test_report_excel_compared(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
)
file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
report = Excel(file_name, compare=True)
report.report()
file_output = report.get_file_name()
book = load_workbook(file_output)
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel_compared.test")
self.check_excel_sheet(
sheet, os.path.join("test_files", "excel_compared.test")
)
def test_report_excel(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
)
file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
report = Excel(file_name, compare=False)
report.report()
file_output = report.get_file_name()
book = load_workbook(file_output)
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel.test")
self.check_excel_sheet(sheet, os.path.join("test_files", "excel.test"))
def test_Excel_Add_sheet(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
)
file_name = "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
excel_file_name = file_name.replace(".json", ".xlsx")
book = Workbook(excel_file_name)
book = Workbook(os.path.join(Folders.results, excel_file_name))
excel = Excel(file_name=file_name, book=book)
excel.report()
report = Excel(
file_name="results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20"
"_0.json",
file_name="results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json",
book=book,
)
report.report()
book.close()
book = load_workbook(excel_file_name)
book = load_workbook(os.path.join(Folders.results, excel_file_name))
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel_add_STree.test")
self.check_excel_sheet(
sheet, os.path.join("test_files", "excel_add_STree.test")
)
sheet = book["ODTE"]
self.check_excel_sheet(sheet, "test_files/excel_add_ODTE.test")
self.check_excel_sheet(
sheet, os.path.join("test_files", "excel_add_ODTE.test")
)

View File

@@ -1,9 +1,6 @@
import os
import csv
import unittest
from openpyxl import load_workbook
from xlsxwriter import Workbook
from ..Results import Excel
from ..Results import SQL
from ..Utils import Folders
@@ -14,8 +11,6 @@ class SQLTest(unittest.TestCase):
def tearDown(self) -> None:
files = [
"results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.xlsx",
"results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.xlsx",
"results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.sql",
]
for file_name in files:
@@ -24,58 +19,16 @@ class SQLTest(unittest.TestCase):
os.remove(file_name)
return super().tearDown()
def check_excel_sheet(self, sheet, file_name):
with open(file_name, "r") as f:
expected = csv.reader(f, delimiter=";")
for row, col, value in expected:
if value.isdigit():
value = int(value)
else:
try:
value = float(value)
except ValueError:
pass
self.assertEqual(sheet.cell(int(row), int(col)).value, value)
def test_report_excel_compared(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
)
report = Excel(file_name, compare=True)
def test_report_SQL(self):
file_name = "results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json"
report = SQL(file_name)
report.report()
file_output = report.get_file_name()
book = load_workbook(file_output)
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel_compared.test")
file_name = os.path.join(
Folders.results, file_name.replace(".json", ".sql")
)
def test_report_excel(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
)
report = Excel(file_name, compare=False)
report.report()
file_output = report.get_file_name()
book = load_workbook(file_output)
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel.test")
def test_Excel_Add_sheet(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
)
excel_file_name = file_name.replace(".json", ".xlsx")
book = Workbook(excel_file_name)
excel = Excel(file_name=file_name, book=book)
excel.report()
report = Excel(
file_name="results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20"
"_0.json",
book=book,
)
report.report()
book.close()
book = load_workbook(excel_file_name)
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel_add_STree.test")
sheet = book["ODTE"]
self.check_excel_sheet(sheet, "test_files/excel_add_ODTE.test")
with open(file_name, "r") as file:
computed = file.read()
with open(os.path.join("test_files", "sql.test")) as f_exp:
expected = f_exp.read()
self.assertEqual(computed, expected)

View File

@@ -122,6 +122,8 @@ class UtilTest(unittest.TestCase):
"results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0."
"json",
"results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json",
"results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0."
"json",
],
)
self.assertCountEqual(

View File

@@ -7,6 +7,7 @@ from .GridSearch_test import GridSearchTest
from .Report_test import ReportTest
from .Excel_test import ExcelTest
from .SQL_test import SQLTest
from .Benchmark_test import BenchmarkTest
all = [
"UtilTest",
@@ -18,4 +19,5 @@ all = [
"ReportTest",
"ExcelTest",
"SQLTest",
"BenchmarkTest",
]

2
benchmark/tests/exreport/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*
!.gitignore

View File

@@ -0,0 +1,2 @@
Error: Only one argument must be supplied (score).n
Ejecución interrumpida

View File

@@ -0,0 +1,43 @@
{
"score_name": "accuracy",
"title": "Test default paramters with RandomForest",
"model": "RandomForest",
"version": "-",
"stratified": false,
"folds": 5,
"date": "2022-01-14",
"time": "12:39:30",
"duration": 272.7363500595093,
"seeds": [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1],
"platform": "iMac27",
"results": [
{
"dataset": "balance-scale",
"samples": 625,
"features": 4,
"classes": 3,
"hyperparameters": {},
"nodes": 196.91440000000003,
"leaves": 98.42,
"depth": 10.681399999999998,
"score": 0.83616,
"score_std": 0.02649630917694009,
"time": 0.08222018241882324,
"time_std": 0.0013026326815120633
},
{
"dataset": "balloons",
"samples": 16,
"features": 4,
"classes": 2,
"hyperparameters": {},
"nodes": 9.110800000000001,
"leaves": 4.58,
"depth": 3.0982,
"score": 0.625,
"score_std": 0.24958298553119898,
"time": 0.07016648769378662,
"time_std": 0.002460508923990468
}
]
}

View File

@@ -0,0 +1,22 @@
****************************************************************************************************
Benchmark Ok
****************************************************************************************************
---------------------------------------------------------------------
Friedman test, objetive maximize output variable accuracy. Obtained p-value: 1.3534e-01
Chi squared with 2 degrees of freedom statistic: 4.0000
Test accepted: p-value: 1.3534e-01 >= 0.0500
---------------------------------------------------------------------
Control post hoc test for output accuracy
Adjust method: Holm
Control method: STree
p-values:
ODTE 0.3173
RandomForest 0.0910
---------------------------------------------------------------------
$testMultiple
classifier pvalue rank win tie loss
STree STree NA 1 NA NA NA
ODTE ODTE 0.31731051 2 2 0 0
RandomForest RandomForest 0.09100053 3 2 0 0

View File

@@ -0,0 +1,7 @@
classifier, dataset, accuracy, stdev, file_name
ODTE, balance-scale, 0.96352, 0.024949741481626608, results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json
ODTE, balloons, 0.785, 0.2461311755051675, results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json
RandomForest, balance-scale, 0.83616, 0.02649630917694009, results/results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json
RandomForest, balloons, 0.625, 0.24958298553119898, results/results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json
STree, balance-scale, 0.97056, 0.015046806970251203, results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json
STree, balloons, 0.86, 0.28501461950807594, results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json

View File

@@ -0,0 +1,2 @@
replace into results (date,time,type,title,stratified,score_name,score,score_std,dataset,classifier,version,norm,stand,time_spent,time_spent_std,parameters,nodes,leaves,depth,platform,nfolds,seeds) values('2022-04-20','10:52:20','crossval','Gridsearched hyperparams v022.1b random_init','0','accuracy','0.96352','0.024949741481626608','balance-scale','ODTE','0.3.2','0','1','0.31663217544555666','0.19918813895255585','{"base_estimator__C": 57, "base_estimator__gamma": 0.1, "base_estimator__kernel": "rbf", "base_estimator__multiclass_strategy": "ovr", "n_estimators": 100, "n_jobs": -1}','7.361199999999999','4.180599999999999','3.536','Galgo','5','[57, 31, 1714, 17, 23, 79, 83, 97, 7, 1]');
replace into results (date,time,type,title,stratified,score_name,score,score_std,dataset,classifier,version,norm,stand,time_spent,time_spent_std,parameters,nodes,leaves,depth,platform,nfolds,seeds) values('2022-04-20','10:52:20','crossval','Gridsearched hyperparams v022.1b random_init','0','accuracy','0.785','0.2461311755051675','balloons','ODTE','0.3.2','0','1','0.11560620784759522','0.012784241828599895','{"base_estimator__C": 5, "base_estimator__gamma": 0.14, "base_estimator__kernel": "rbf", "base_estimator__multiclass_strategy": "ovr", "n_estimators": 100, "n_jobs": -1}','2.9951999999999996','1.9975999999999998','1.9975999999999998','Galgo','5','[57, 31, 1714, 17, 23, 79, 83, 97, 7, 1]');