Add benchmark tests

This commit is contained in:
2022-04-25 02:35:23 +02:00
parent a3b4b59b48
commit a17166ed31
14 changed files with 176 additions and 83 deletions

View File

@@ -597,7 +597,7 @@ class Benchmark:
self._datasets = set() self._datasets = set()
def get_result_file_name(self): def get_result_file_name(self):
return os.path.join(Folders.results, Files.exreport(self._score)) return os.path.join(Folders.exreport, Files.exreport(self._score))
def compile_results(self): def compile_results(self):
summary = Summary() summary = Summary()
@@ -644,7 +644,7 @@ class Benchmark:
print("*" * length) print("*" * length)
print(message) print(message)
print("*" * length) print("*" * length)
with open(os.path.join(Folders.results, file)) as f: with open(os.path.join(Folders.exreport, file)) as f:
data = f.read().splitlines() data = f.read().splitlines()
for line in data: for line in data:
print(line) print(line)
@@ -659,17 +659,23 @@ class Benchmark:
print("Error: %s : %s" % (Folders.report, os_error.strerror)) print("Error: %s : %s" % (Folders.report, os_error.strerror))
# Compute Friedman & Holm Tests # Compute Friedman & Holm Tests
fout = open( fout = open(
os.path.join(Folders.results, Files.exreport_output(self._score)), os.path.join(Folders.exreport, Files.exreport_output(self._score)),
"w", "w",
) )
ferr = open( ferr = open(
os.path.join(Folders.results, Files.exreport_err(self._score)), "w" os.path.join(Folders.exreport, Files.exreport_err(self._score)),
"w",
)
print(
"*********************",
os.path.join(Folders.src(), Files.benchmark_r),
) )
result = subprocess.run( result = subprocess.run(
[ [
"Rscript", "Rscript",
os.path.join(Folders.src(), Files.benchmark_r), os.path.join(Folders.src(), Files.benchmark_r),
self._score, self._score,
os.path.join(Folders.exreport, f"exreport_{self._score}"),
], ],
stdout=fout, stdout=fout,
stderr=ferr, stderr=ferr,

View File

@@ -1,9 +1,9 @@
library(glue) library(glue)
args = commandArgs(trailingOnly=TRUE) args = commandArgs(trailingOnly=TRUE)
if (length(args)!=1) { if (length(args)!=2) {
stop("Only one argument must be supplied (score).n", call.=FALSE) stop("Only two arguments must be supplied (score & input_file).n", call.=FALSE)
} }
csv_file <- glue("results/exreport_{args[1]}.csv") csv_file <- glue("{args[2]}.csv")
destination <- "exreport/" destination <- "exreport/"
results <- read.csv(csv_file) results <- read.csv(csv_file)
library(exreport) library(exreport)

View File

@@ -0,0 +1,53 @@
import os
import unittest
import shutil
from io import StringIO
from unittest.mock import patch
from ..Utils import Folders
from ..Results import Benchmark
class BenchmarkTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
os.chdir(os.path.dirname(os.path.abspath(__file__)))
super().__init__(*args, **kwargs)
def tearDown(self) -> None:
files = [
# "exreport_accuracy.csv",
"exreport_accuracy.txt",
"exreport_err_accuracy.txt",
]
for file_name in files:
file_name = os.path.join(Folders.exreport, file_name)
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists(Folders.report):
shutil.rmtree(Folders.report)
if os.path.exists("Rplots.pdf"):
os.remove("Rplots.pdf")
return super().tearDown()
def test_csv(self):
benchmark = Benchmark("accuracy")
benchmark.compile_results()
benchmark.save_results()
with open(benchmark.get_result_file_name()) as f:
computed = f.readlines()
with open(os.path.join("test_files", "exreport_csv.test")) as f_exp:
expected = f_exp.readlines()
self.assertEqual(computed, expected)
def test_exreport(self):
benchmark = Benchmark("accuracy")
benchmark.compile_results()
benchmark.save_results()
with patch("sys.stdout", new=StringIO()) as fake_out:
benchmark.exreport()
with open(os.path.join("test_files", "exreport.test")) as f:
expected_t = f.read()
computed_t = fake_out.getvalue()
computed_t = computed_t.split("\n")
computed_t.pop(0)
for computed, expected in zip(computed_t, expected_t.split("\n")):
self.assertEqual(computed, expected)

View File

@@ -38,44 +38,43 @@ class ExcelTest(unittest.TestCase):
self.assertEqual(sheet.cell(int(row), int(col)).value, value) self.assertEqual(sheet.cell(int(row), int(col)).value, value)
def test_report_excel_compared(self): def test_report_excel_compared(self):
file_name = ( file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
)
report = Excel(file_name, compare=True) report = Excel(file_name, compare=True)
report.report() report.report()
file_output = report.get_file_name() file_output = report.get_file_name()
book = load_workbook(file_output) book = load_workbook(file_output)
sheet = book["STree"] sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel_compared.test") self.check_excel_sheet(
sheet, os.path.join("test_files", "excel_compared.test")
)
def test_report_excel(self): def test_report_excel(self):
file_name = ( file_name = "results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
)
report = Excel(file_name, compare=False) report = Excel(file_name, compare=False)
report.report() report.report()
file_output = report.get_file_name() file_output = report.get_file_name()
book = load_workbook(file_output) book = load_workbook(file_output)
sheet = book["STree"] sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel.test") self.check_excel_sheet(sheet, os.path.join("test_files", "excel.test"))
def test_Excel_Add_sheet(self): def test_Excel_Add_sheet(self):
file_name = ( file_name = "results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
"results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
)
excel_file_name = file_name.replace(".json", ".xlsx") excel_file_name = file_name.replace(".json", ".xlsx")
book = Workbook(excel_file_name) book = Workbook(os.path.join(Folders.results, excel_file_name))
excel = Excel(file_name=file_name, book=book) excel = Excel(file_name=file_name, book=book)
excel.report() excel.report()
report = Excel( report = Excel(
file_name="results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20" file_name="results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json",
"_0.json",
book=book, book=book,
) )
report.report() report.report()
book.close() book.close()
book = load_workbook(excel_file_name) book = load_workbook(os.path.join(Folders.results, excel_file_name))
sheet = book["STree"] sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel_add_STree.test") self.check_excel_sheet(
sheet, os.path.join("test_files", "excel_add_STree.test")
)
sheet = book["ODTE"] sheet = book["ODTE"]
self.check_excel_sheet(sheet, "test_files/excel_add_ODTE.test") self.check_excel_sheet(
sheet, os.path.join("test_files", "excel_add_ODTE.test")
)

View File

@@ -1,9 +1,6 @@
import os import os
import csv
import unittest import unittest
from openpyxl import load_workbook from ..Results import SQL
from xlsxwriter import Workbook
from ..Results import Excel
from ..Utils import Folders from ..Utils import Folders
@@ -14,8 +11,6 @@ class SQLTest(unittest.TestCase):
def tearDown(self) -> None: def tearDown(self) -> None:
files = [ files = [
"results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.xlsx",
"results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.xlsx",
"results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.sql", "results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.sql",
] ]
for file_name in files: for file_name in files:
@@ -24,58 +19,16 @@ class SQLTest(unittest.TestCase):
os.remove(file_name) os.remove(file_name)
return super().tearDown() return super().tearDown()
def check_excel_sheet(self, sheet, file_name): def test_report_SQL(self):
with open(file_name, "r") as f: file_name = "results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json"
expected = csv.reader(f, delimiter=";") report = SQL(file_name)
for row, col, value in expected:
if value.isdigit():
value = int(value)
else:
try:
value = float(value)
except ValueError:
pass
self.assertEqual(sheet.cell(int(row), int(col)).value, value)
def test_report_excel_compared(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json"
)
report = Excel(file_name, compare=True)
report.report() report.report()
file_output = report.get_file_name() file_name = os.path.join(
book = load_workbook(file_output) Folders.results, file_name.replace(".json", ".sql")
sheet = book["STree"] )
self.check_excel_sheet(sheet, "test_files/excel_compared.test")
def test_report_excel(self): with open(file_name, "r") as file:
file_name = ( computed = file.read()
"results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json" with open(os.path.join("test_files", "sql.test")) as f_exp:
) expected = f_exp.read()
report = Excel(file_name, compare=False) self.assertEqual(computed, expected)
report.report()
file_output = report.get_file_name()
book = load_workbook(file_output)
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel.test")
def test_Excel_Add_sheet(self):
file_name = (
"results/results_accuracy_STree_iMac27_2021-10-27_09:40:40_0.json"
)
excel_file_name = file_name.replace(".json", ".xlsx")
book = Workbook(excel_file_name)
excel = Excel(file_name=file_name, book=book)
excel.report()
report = Excel(
file_name="results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20"
"_0.json",
book=book,
)
report.report()
book.close()
book = load_workbook(excel_file_name)
sheet = book["STree"]
self.check_excel_sheet(sheet, "test_files/excel_add_STree.test")
sheet = book["ODTE"]
self.check_excel_sheet(sheet, "test_files/excel_add_ODTE.test")

View File

@@ -122,6 +122,8 @@ class UtilTest(unittest.TestCase):
"results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0." "results_accuracy_STree_macbook-pro_2021-11-01_19:17:07_0."
"json", "json",
"results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json", "results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json",
"results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0."
"json",
], ],
) )
self.assertCountEqual( self.assertCountEqual(

View File

@@ -7,6 +7,7 @@ from .GridSearch_test import GridSearchTest
from .Report_test import ReportTest from .Report_test import ReportTest
from .Excel_test import ExcelTest from .Excel_test import ExcelTest
from .SQL_test import SQLTest from .SQL_test import SQLTest
from .Benchmark_test import BenchmarkTest
all = [ all = [
"UtilTest", "UtilTest",
@@ -18,4 +19,5 @@ all = [
"ReportTest", "ReportTest",
"ExcelTest", "ExcelTest",
"SQLTest", "SQLTest",
"BenchmarkTest",
] ]

2
benchmark/tests/exreport/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*
!.gitignore

View File

@@ -0,0 +1,2 @@
Error: Only one argument must be supplied (score).n
Ejecución interrumpida

View File

@@ -0,0 +1,43 @@
{
"score_name": "accuracy",
"title": "Test default paramters with RandomForest",
"model": "RandomForest",
"version": "-",
"stratified": false,
"folds": 5,
"date": "2022-01-14",
"time": "12:39:30",
"duration": 272.7363500595093,
"seeds": [57, 31, 1714, 17, 23, 79, 83, 97, 7, 1],
"platform": "iMac27",
"results": [
{
"dataset": "balance-scale",
"samples": 625,
"features": 4,
"classes": 3,
"hyperparameters": {},
"nodes": 196.91440000000003,
"leaves": 98.42,
"depth": 10.681399999999998,
"score": 0.83616,
"score_std": 0.02649630917694009,
"time": 0.08222018241882324,
"time_std": 0.0013026326815120633
},
{
"dataset": "balloons",
"samples": 16,
"features": 4,
"classes": 2,
"hyperparameters": {},
"nodes": 9.110800000000001,
"leaves": 4.58,
"depth": 3.0982,
"score": 0.625,
"score_std": 0.24958298553119898,
"time": 0.07016648769378662,
"time_std": 0.002460508923990468
}
]
}

View File

@@ -0,0 +1,22 @@
****************************************************************************************************
Benchmark Ok
****************************************************************************************************
---------------------------------------------------------------------
Friedman test, objetive maximize output variable accuracy. Obtained p-value: 1.3534e-01
Chi squared with 2 degrees of freedom statistic: 4.0000
Test accepted: p-value: 1.3534e-01 >= 0.0500
---------------------------------------------------------------------
Control post hoc test for output accuracy
Adjust method: Holm
Control method: STree
p-values:
ODTE 0.3173
RandomForest 0.0910
---------------------------------------------------------------------
$testMultiple
classifier pvalue rank win tie loss
STree STree NA 1 NA NA NA
ODTE ODTE 0.31731051 2 2 0 0
RandomForest RandomForest 0.09100053 3 2 0 0

View File

@@ -0,0 +1,7 @@
classifier, dataset, accuracy, stdev, file_name
ODTE, balance-scale, 0.96352, 0.024949741481626608, results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json
ODTE, balloons, 0.785, 0.2461311755051675, results/results_accuracy_ODTE_Galgo_2022-04-20_10:52:20_0.json
RandomForest, balance-scale, 0.83616, 0.02649630917694009, results/results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json
RandomForest, balloons, 0.625, 0.24958298553119898, results/results_accuracy_RandomForest_iMac27_2022-01-14_12:39:30_0.json
STree, balance-scale, 0.97056, 0.015046806970251203, results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json
STree, balloons, 0.86, 0.28501461950807594, results/results_accuracy_STree_iMac27_2021-09-30_11:42:07_0.json

View File

@@ -0,0 +1,2 @@
replace into results (date,time,type,title,stratified,score_name,score,score_std,dataset,classifier,version,norm,stand,time_spent,time_spent_std,parameters,nodes,leaves,depth,platform,nfolds,seeds) values('2022-04-20','10:52:20','crossval','Gridsearched hyperparams v022.1b random_init','0','accuracy','0.96352','0.024949741481626608','balance-scale','ODTE','0.3.2','0','1','0.31663217544555666','0.19918813895255585','{"base_estimator__C": 57, "base_estimator__gamma": 0.1, "base_estimator__kernel": "rbf", "base_estimator__multiclass_strategy": "ovr", "n_estimators": 100, "n_jobs": -1}','7.361199999999999','4.180599999999999','3.536','Galgo','5','[57, 31, 1714, 17, 23, 79, 83, 97, 7, 1]');
replace into results (date,time,type,title,stratified,score_name,score,score_std,dataset,classifier,version,norm,stand,time_spent,time_spent_std,parameters,nodes,leaves,depth,platform,nfolds,seeds) values('2022-04-20','10:52:20','crossval','Gridsearched hyperparams v022.1b random_init','0','accuracy','0.785','0.2461311755051675','balloons','ODTE','0.3.2','0','1','0.11560620784759522','0.012784241828599895','{"base_estimator__C": 5, "base_estimator__gamma": 0.14, "base_estimator__kernel": "rbf", "base_estimator__multiclass_strategy": "ovr", "n_estimators": 100, "n_jobs": -1}','2.9951999999999996','1.9975999999999998','1.9975999999999998','Galgo','5','[57, 31, 1714, 17, 23, 79, 83, 97, 7, 1]');