mirror of
https://github.com/Doctorado-ML/benchmark.git
synced 2025-08-15 23:45:54 +00:00
Add multy score to exreport
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -135,6 +135,6 @@ results/*
|
|||||||
exreport/exreport_output/*
|
exreport/exreport_output/*
|
||||||
exreport/.Rapp.history
|
exreport/.Rapp.history
|
||||||
exreport/Rplots.pdf
|
exreport/Rplots.pdf
|
||||||
exreport/exreport.xlsx
|
exreport/*.xlsx
|
||||||
exreport/.~lock.exreport.xlsx#
|
exreport/.~lock.*.xlsx#
|
||||||
Rplots.pdf
|
Rplots.pdf
|
||||||
|
@@ -279,15 +279,15 @@ class Excel(BaseReport):
|
|||||||
header_cols = [
|
header_cols = [
|
||||||
("Dataset", 30),
|
("Dataset", 30),
|
||||||
("Samples", 10),
|
("Samples", 10),
|
||||||
("Variables", 7),
|
("Features", 7),
|
||||||
("Classes", 7),
|
("Classes", 7),
|
||||||
("Nodes", 7),
|
("Nodes", 7),
|
||||||
("Leaves", 7),
|
("Leaves", 7),
|
||||||
("Depth", 7),
|
("Depth", 7),
|
||||||
("Score", 10),
|
("Score", 12),
|
||||||
("Score Std.", 10),
|
("Score Std.", 12),
|
||||||
("Time", 10),
|
("Time", 12),
|
||||||
("Time Std.", 10),
|
("Time Std.", 12),
|
||||||
("Parameters", 50),
|
("Parameters", 50),
|
||||||
]
|
]
|
||||||
if self.compare:
|
if self.compare:
|
||||||
@@ -348,6 +348,7 @@ class Excel(BaseReport):
|
|||||||
self.sheet.write(self.row + 1, 0, message, bold)
|
self.sheet.write(self.row + 1, 0, message, bold)
|
||||||
for c in range(self.row + 2):
|
for c in range(self.row + 2):
|
||||||
self.sheet.set_row(c, 20)
|
self.sheet.set_row(c, 20)
|
||||||
|
self.sheet.set_row(0, 25)
|
||||||
self.book.close()
|
self.book.close()
|
||||||
|
|
||||||
|
|
||||||
@@ -452,12 +453,12 @@ class Benchmark:
|
|||||||
Benchmark._process_dataset(results, data)
|
Benchmark._process_dataset(results, data)
|
||||||
|
|
||||||
with open(result_file_name, "w") as f:
|
with open(result_file_name, "w") as f:
|
||||||
f.write("classifier, dataset, accuracy, stdev\n")
|
f.write(f"classifier, dataset, {score}, stdev\n")
|
||||||
for (model, dataset), (accuracy, stdev) in results.items():
|
for (model, dataset), (accuracy, stdev) in results.items():
|
||||||
f.write(f"{model}, {dataset}, {accuracy}, {stdev}\n")
|
f.write(f"{model}, {dataset}, {accuracy}, {stdev}\n")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def exreport():
|
def exreport(score):
|
||||||
def end_message(message, file):
|
def end_message(message, file):
|
||||||
length = 100
|
length = 100
|
||||||
print("*" * length)
|
print("*" * length)
|
||||||
@@ -477,25 +478,29 @@ class Benchmark:
|
|||||||
except OSError as e:
|
except OSError as e:
|
||||||
print("Error: %s : %s" % (Folders.report, e.strerror))
|
print("Error: %s : %s" % (Folders.report, e.strerror))
|
||||||
# Compute Friedman & Holm Tests
|
# Compute Friedman & Holm Tests
|
||||||
fout = open(os.path.join(Folders.results, Files.exreport_output), "w")
|
fout = open(
|
||||||
ferr = open(os.path.join(Folders.results, Files.exreport_err), "w")
|
os.path.join(Folders.results, Files.exreport_output(score)), "w"
|
||||||
|
)
|
||||||
|
ferr = open(
|
||||||
|
os.path.join(Folders.results, Files.exreport_err(score)), "w"
|
||||||
|
)
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
["Rscript", os.path.join(Folders.src, Files.benchmark_r)],
|
["Rscript", os.path.join(Folders.src, Files.benchmark_r), score],
|
||||||
stdout=fout,
|
stdout=fout,
|
||||||
stderr=ferr,
|
stderr=ferr,
|
||||||
)
|
)
|
||||||
fout.close()
|
fout.close()
|
||||||
ferr.close()
|
ferr.close()
|
||||||
if result.returncode != 0:
|
if result.returncode != 0:
|
||||||
end_message("Error computing benchmark", Files.exreport_err)
|
end_message("Error computing benchmark", Files.exreport_err(score))
|
||||||
else:
|
else:
|
||||||
end_message("Benchmark Ok", Files.exreport_output)
|
end_message("Benchmark Ok", Files.exreport_output(score))
|
||||||
Files.open(Files.exreport_pdf)
|
Files.open(Files.exreport_pdf)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def build_results():
|
def build_results(score):
|
||||||
# Build results data structure
|
# Build results data structure
|
||||||
file_name = Benchmark.get_result_file_name()
|
file_name = Benchmark.get_result_file_name(score)
|
||||||
results = {}
|
results = {}
|
||||||
with open(file_name) as f:
|
with open(file_name) as f:
|
||||||
data = f.read().splitlines()
|
data = f.read().splitlines()
|
||||||
@@ -508,7 +513,7 @@ class Benchmark:
|
|||||||
return results
|
return results
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def report():
|
def report(score):
|
||||||
def show(results):
|
def show(results):
|
||||||
datasets = results[list(results)[0]]
|
datasets = results[list(results)[0]]
|
||||||
print(f"{'Dataset':30s} ", end="")
|
print(f"{'Dataset':30s} ", end="")
|
||||||
@@ -524,18 +529,20 @@ class Benchmark:
|
|||||||
print(f"{float(results[model][dataset][1]):.3f} ", end="")
|
print(f"{float(results[model][dataset][1]):.3f} ", end="")
|
||||||
print("")
|
print("")
|
||||||
|
|
||||||
show(Benchmark.build_results())
|
print(f"* Score is: {score}")
|
||||||
|
show(Benchmark.build_results(score))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_excel_file_name():
|
def get_excel_file_name(score):
|
||||||
return os.path.join(Folders.exreport, Files.exreport_excel)
|
return os.path.join(Folders.exreport, Files.exreport_excel(score))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def excel():
|
def excel(score):
|
||||||
results = Benchmark.build_results()
|
results = Benchmark.build_results(score)
|
||||||
book = xlsxwriter.Workbook(Benchmark.get_excel_file_name())
|
book = xlsxwriter.Workbook(Benchmark.get_excel_file_name(score))
|
||||||
sheet = book.add_worksheet("Benchmark")
|
sheet = book.add_worksheet("Benchmark")
|
||||||
normal = book.add_format({"font_size": 14})
|
normal = book.add_format({"font_size": 14})
|
||||||
|
bold = book.add_format({"bold": True, "font_size": 14})
|
||||||
decimal = book.add_format({"num_format": "0.000000", "font_size": 14})
|
decimal = book.add_format({"num_format": "0.000000", "font_size": 14})
|
||||||
merge_format = book.add_format(
|
merge_format = book.add_format(
|
||||||
{
|
{
|
||||||
@@ -545,12 +552,14 @@ class Benchmark:
|
|||||||
"font_size": 14,
|
"font_size": 14,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
row = row_init = 3
|
row = row_init = 4
|
||||||
|
|
||||||
def header():
|
def header():
|
||||||
nonlocal row
|
nonlocal row
|
||||||
sheet.merge_range(0, 0, 1, 0, "Benchmark of Models", merge_format)
|
sheet.merge_range(0, 0, 1, 0, "Benchmark of Models", merge_format)
|
||||||
# Set column width
|
sheet.write(1, 2, f"Score is {score}", bold)
|
||||||
|
sheet.set_row(1, 20)
|
||||||
|
# Set columns width
|
||||||
sheet.set_column(0, 0, 40)
|
sheet.set_column(0, 0, 40)
|
||||||
for column in range(2 * len(results)):
|
for column in range(2 * len(results)):
|
||||||
sheet.set_column(column + 1, column + 1, 15)
|
sheet.set_column(column + 1, column + 1, 15)
|
||||||
|
15
src/Utils.py
15
src/Utils.py
@@ -13,14 +13,23 @@ class Folders:
|
|||||||
class Files:
|
class Files:
|
||||||
index = "all.txt"
|
index = "all.txt"
|
||||||
|
|
||||||
exreport_output = "exreport.txt"
|
|
||||||
exreport_err = "exreport_err.txt"
|
|
||||||
exreport_excel = "exreport.xlsx"
|
|
||||||
cmd_open_macos = "/usr/bin/open"
|
cmd_open_macos = "/usr/bin/open"
|
||||||
cmd_open_linux = "/usr/bin/xdg-open"
|
cmd_open_linux = "/usr/bin/xdg-open"
|
||||||
exreport_pdf = "Rplots.pdf"
|
exreport_pdf = "Rplots.pdf"
|
||||||
benchmark_r = "benchmark.r"
|
benchmark_r = "benchmark.r"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def exreport_output(score):
|
||||||
|
return f"exreport_{score}.txt"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def exreport_err(score):
|
||||||
|
return f"exreport_err_{score}.txt"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def exreport_excel(score):
|
||||||
|
return f"exreport_{score}.xlsx"
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def exreport(score):
|
def exreport(score):
|
||||||
return f"exreport_{score}.csv"
|
return f"exreport_{score}.csv"
|
||||||
|
@@ -20,14 +20,14 @@ def parse_arguments():
|
|||||||
help="Generate Excel File",
|
help="Generate Excel File",
|
||||||
)
|
)
|
||||||
args = ap.parse_args()
|
args = ap.parse_args()
|
||||||
return (args, score, args.excel)
|
return (args.score, args.excel)
|
||||||
|
|
||||||
|
|
||||||
(score, excel) = parse_arguments()
|
(score, excel) = parse_arguments()
|
||||||
benchmark = Benchmark()
|
benchmark = Benchmark()
|
||||||
benchmark.compile_results(score)
|
benchmark.compile_results(score)
|
||||||
benchmark.report()
|
benchmark.report(score)
|
||||||
benchmark.exreport()
|
benchmark.exreport(score)
|
||||||
if excel:
|
if excel:
|
||||||
benchmark.excel()
|
benchmark.excel(score)
|
||||||
Files.open(benchmark.get_excel_file_name())
|
Files.open(benchmark.get_excel_file_name(score))
|
||||||
|
@@ -1,13 +1,18 @@
|
|||||||
csv_file <- "results/exreport.csv"
|
library(glue)
|
||||||
|
args = commandArgs(trailingOnly=TRUE)
|
||||||
|
if (length(args)!=1) {
|
||||||
|
stop("Only one argument must be supplied (score).n", call.=FALSE)
|
||||||
|
}
|
||||||
|
csv_file <- glue("results/exreport_{args[1]}.csv")
|
||||||
destination <- "exreport/"
|
destination <- "exreport/"
|
||||||
results <- read.csv(csv_file)
|
results <- read.csv(csv_file)
|
||||||
library(exreport)
|
library(exreport)
|
||||||
experiment <- expCreate(results, method="classifier", problem="dataset", name="Stree")
|
experiment <- expCreate(results, method="classifier", problem="dataset", name="Stree")
|
||||||
testAccuracy <- testMultipleControl(experiment, "accuracy", "max")
|
testAccuracy <- testMultipleControl(experiment, args[1], "max")
|
||||||
summary(testAccuracy)
|
summary(testAccuracy)
|
||||||
table1 <- tabularTestSummary(testAccuracy, columns = c("pvalue", "rank", "wtl"))
|
table1 <- tabularTestSummary(testAccuracy, columns = c("pvalue", "rank", "wtl"))
|
||||||
table1
|
table1
|
||||||
plot1 <- plotExpSummary(experiment, "accuracy", columns = 3)
|
plot1 <- plotExpSummary(experiment, args[1], columns = 3)
|
||||||
plot2 <- plotCumulativeRank(testAccuracy)
|
plot2 <- plotCumulativeRank(testAccuracy)
|
||||||
plot3 <- plotRankDistribution(testAccuracy)
|
plot3 <- plotRankDistribution(testAccuracy)
|
||||||
|
|
||||||
@@ -26,7 +31,7 @@ report <- exreportAdd(report, list(plot1,plot2,table1,plot3))
|
|||||||
# We have decided to generate the table at this point of the tutorial to discusse some special formating parameters of this function. Concretely, some of the tabular outputs generated by exreport have some properties that are only useful when rendering the objets in a graphic report, and have no effect in the object representation in the R console. In this case, we will tell the function to boldface the method that maximices the result for each column, and to split the table into to pieces when rendering.
|
# We have decided to generate the table at this point of the tutorial to discusse some special formating parameters of this function. Concretely, some of the tabular outputs generated by exreport have some properties that are only useful when rendering the objets in a graphic report, and have no effect in the object representation in the R console. In this case, we will tell the function to boldface the method that maximices the result for each column, and to split the table into to pieces when rendering.
|
||||||
|
|
||||||
# We create the table:
|
# We create the table:
|
||||||
table2 <- tabularExpSummary(experiment, "accuracy", digits=4, format="f", boldfaceColumns="max", tableSplit=2)
|
table2 <- tabularExpSummary(experiment, args[1], digits=4, format="f", boldfaceColumns="max", tableSplit=2)
|
||||||
# And add it to the report:
|
# And add it to the report:
|
||||||
report <- exreportAdd(report, table2)
|
report <- exreportAdd(report, table2)
|
||||||
# Now that we have finished adding elements to the report it is time to render it. We want to generate an HTML report, so we call the appropiate function, by default it renders and opens the report in your browser using a temporary file, but you can optionally specify a folder in which the report will be saved for future use.
|
# Now that we have finished adding elements to the report it is time to render it. We want to generate an HTML report, so we call the appropiate function, by default it renders and opens the report in your browser using a temporary file, but you can optionally specify a folder in which the report will be saved for future use.
|
||||||
|
@@ -47,8 +47,23 @@ def parse_arguments():
|
|||||||
required=False,
|
required=False,
|
||||||
help="best results of models",
|
help="best results of models",
|
||||||
)
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"-s",
|
||||||
|
"--score",
|
||||||
|
type=str,
|
||||||
|
required=False,
|
||||||
|
default="accuracy",
|
||||||
|
help="score used in best results model",
|
||||||
|
)
|
||||||
args = ap.parse_args()
|
args = ap.parse_args()
|
||||||
return (args.file, args.excel, args.sql, args.compare, args.best)
|
return (
|
||||||
|
args.file,
|
||||||
|
args.excel,
|
||||||
|
args.sql,
|
||||||
|
args.compare,
|
||||||
|
args.best,
|
||||||
|
args.score,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def default_report():
|
def default_report():
|
||||||
@@ -63,13 +78,13 @@ def default_report():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
(file, excel, sql, compare, best) = parse_arguments()
|
(file, excel, sql, compare, best, score) = parse_arguments()
|
||||||
|
|
||||||
if file is None and best is None:
|
if file is None and best is None:
|
||||||
default_report()
|
default_report()
|
||||||
else:
|
else:
|
||||||
if best is not None:
|
if best is not None:
|
||||||
report = ReportBest(best)
|
report = ReportBest(score, best)
|
||||||
report.report()
|
report.report()
|
||||||
else:
|
else:
|
||||||
report = Report(file, compare)
|
report = Report(file, compare)
|
||||||
|
Reference in New Issue
Block a user