Add excel report in benchmark

This commit is contained in:
2021-09-25 23:30:32 +02:00
parent cbcdae3a30
commit 008982e9fa
5 changed files with 117 additions and 29 deletions

1
.gitignore vendored
View File

@@ -133,4 +133,5 @@ results/*
exreport/exreport_output/* exreport/exreport_output/*
exreport/.Rapp.history exreport/.Rapp.history
exreport/Rplots.pdf exreport/Rplots.pdf
exreport/exreport.xlsx
Rplots.pdf Rplots.pdf

View File

@@ -235,12 +235,15 @@ class Excel(BaseReport):
super().__init__(file_name) super().__init__(file_name)
self.compare = compare self.compare = compare
def get_file_name(self):
return self.excel_file_name
def header(self): def header(self):
if self.compare: if self.compare:
self._load_best_results(self.data["model"]) self._load_best_results(self.data["model"])
self._compare_totals = {} self._compare_totals = {}
file_name = self.file_name.replace(".json", ".xlsx") self.excel_file_name = self.file_name.replace(".json", ".xlsx")
self.book = xlsxwriter.Workbook(file_name) self.book = xlsxwriter.Workbook(self.excel_file_name)
self.sheet = self.book.add_worksheet(self.data["model"]) self.sheet = self.book.add_worksheet(self.data["model"])
header = self.book.add_format() header = self.book.add_format()
header.set_font_size(18) header.set_font_size(18)
@@ -453,9 +456,6 @@ class Benchmark:
for line in data: for line in data:
print(line) print(line)
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
# Remove previous results # Remove previous results
try: try:
shutil.rmtree(Folders.report) shutil.rmtree(Folders.report)
@@ -478,26 +478,25 @@ class Benchmark:
end_message("Error computing benchmark", Files.exreport_err) end_message("Error computing benchmark", Files.exreport_err)
else: else:
end_message("Benchmark Ok", Files.exreport_output) end_message("Benchmark Ok", Files.exreport_output)
Files.open(Files.exreport_pdf)
if is_exe(Files.cmd_open): @staticmethod
subprocess.run([Files.cmd_open, Files.exreport_pdf]) def build_results():
# Build results data structure
file_name = Benchmark.get_result_file_name()
results = {}
with open(file_name) as f:
data = f.read().splitlines()
data = data[1:]
for line in data:
model, dataset, accuracy, stdev = line.split(", ")
if model not in results:
results[model] = {}
results[model][dataset] = (accuracy, stdev)
return results
@staticmethod @staticmethod
def report(): def report():
def build():
# Build results data structure
file_name = Benchmark.get_result_file_name()
results = {}
with open(file_name) as f:
data = f.read().splitlines()
data = data[1:]
for line in data:
model, dataset, accuracy, stdev = line.split(", ")
if model not in results:
results[model] = {}
results[model][dataset] = (accuracy, stdev)
return results
def show(results): def show(results):
datasets = results[list(results)[0]] datasets = results[list(results)[0]]
print(f"{'Dataset':30s} ", end="") print(f"{'Dataset':30s} ", end="")
@@ -513,4 +512,67 @@ class Benchmark:
print(f"{float(results[model][dataset][1]):.3f} ", end="") print(f"{float(results[model][dataset][1]):.3f} ", end="")
print("") print("")
show(build()) show(Benchmark.build_results())
@staticmethod
def get_excel_file_name():
return os.path.join(Folders.exreport, Files.exreport_excel)
@staticmethod
def excel():
results = Benchmark.build_results()
book = xlsxwriter.Workbook(Benchmark.get_excel_file_name())
sheet = book.add_worksheet("Benchmark")
datasets = results[list(results)[0]]
normal = book.add_format({"font_size": 14})
decimal = book.add_format({"num_format": "0.000000", "font_size": 14})
merge_format = book.add_format(
{
"bold": 1,
"align": "center",
"valign": "vcenter",
"font_size": 14,
}
)
sheet.merge_range(0, 0, 1, 0, "Benchmark of Models", merge_format)
row = 3
# Set column width
sheet.set_column(0, 0, 40)
for column in range(2 * len(results)):
sheet.set_column(column + 1, column + 1, 15)
# Set report header
# Merge 2 rows
sheet.merge_range(row, 0, row + 1, 0, "Dataset", merge_format)
column = 1
for model in results:
# Merge 2 columns
sheet.merge_range(
row, column, row, column + 1, model, merge_format
)
column += 2
row += 1
column = 1
for _ in range(len(results)):
sheet.write(row, column, "Accuracy", merge_format)
sheet.write(row, column + 1, "Stdev", merge_format)
column += 2
for dataset, _ in datasets.items():
row += 1
sheet.write(row, 0, f"{dataset:30s}", normal)
column = 1
for model in results:
sheet.write(
row,
column,
float(results[model][dataset][0]),
decimal,
)
column += 1
sheet.write(
row,
column,
float(results[model][dataset][1]),
decimal,
)
column += 1
book.close()

View File

@@ -1,11 +1,13 @@
import os import os
import subprocess
class Folders: class Folders:
data = "data" data = "data"
results = "results" results = "results"
src = "src" src = "src"
report = os.path.join("exreport", "exreport_output") exreport = "exreport"
report = os.path.join(exreport, "exreport_output")
class Files: class Files:
@@ -13,7 +15,9 @@ class Files:
exreport = "exreport.csv" exreport = "exreport.csv"
exreport_output = "exreport.txt" exreport_output = "exreport.txt"
exreport_err = "exreport_err.txt" exreport_err = "exreport_err.txt"
cmd_open = "/usr/bin/open" exreport_excel = "exreport.xlsx"
cmd_open_macos = "/usr/bin/open"
cmd_open_linux = "/usr/bin/xdg-open"
exreport_pdf = "Rplots.pdf" exreport_pdf = "Rplots.pdf"
@staticmethod @staticmethod
@@ -35,6 +39,20 @@ class Files:
def dataset(name): def dataset(name):
return f"{name}_R.dat" return f"{name}_R.dat"
@staticmethod
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
@staticmethod
def open(name):
if os.path.isfile(name):
command = (
Files.cmd_open_macos
if Files.is_exe(Files.cmd_open_macos)
else Files.cmd_open_linux
)
subprocess.run([command, name])
class Symbols: class Symbols:
check_mark = "\N{heavy check mark}" check_mark = "\N{heavy check mark}"

View File

@@ -1,4 +1,6 @@
from Results import Benchmark from Results import Benchmark
from Utils import Files
import argparse
def parse_arguments(): def parse_arguments():
@@ -12,18 +14,21 @@ def parse_arguments():
help="model name, dfault STree", help="model name, dfault STree",
) )
ap.add_argument( ap.add_argument(
"-r", "-x",
"--report", "--excel",
type=bool, type=bool,
required=False, required=False,
help="Generate Report", help="Generate Excel File",
) )
args = ap.parse_args() args = ap.parse_args()
return (args.model, args.report) return (args.model, args.excel)
(model, report) = parse_arguments() (model, excel) = parse_arguments()
benchmark = Benchmark() benchmark = Benchmark()
benchmark.compile_results() benchmark.compile_results()
benchmark.report() benchmark.report()
benchmark.exreport() benchmark.exreport()
if excel:
benchmark.excel()
Files.open(benchmark.get_excel_file_name())

View File

@@ -2,6 +2,7 @@ import argparse
import numpy as np import numpy as np
from Experiments import Datasets from Experiments import Datasets
from Results import Report, Excel, SQL, ReportBest from Results import Report, Excel, SQL, ReportBest
from Utils import Files
"""Build report on screen of a result file, optionally generate excel and sql """Build report on screen of a result file, optionally generate excel and sql
file, and can compare results of report with best results obtained by model file, and can compare results of report with best results obtained by model
@@ -76,6 +77,7 @@ else:
if excel: if excel:
excel = Excel(file, compare) excel = Excel(file, compare)
excel.report() excel.report()
Files.open(excel.get_file_name())
if sql: if sql:
sql = SQL(file) sql = SQL(file)
sql.report() sql.report()