Compare commits
91 Commits
optimize_m
...
bestResult
Author | SHA1 | Date | |
---|---|---|---|
06de13df98
|
|||
de4fa6a04f
|
|||
3a7bf4e672
|
|||
cd0bc02a74
|
|||
c8597a794e
|
|||
b30416364d
|
|||
3a16589220
|
|||
c4f9187e2a
|
|||
c4d0a5b4e6
|
|||
7bfafe555f
|
|||
337b6f7e79
|
|||
5fa0b957dd
|
|||
67252fc41d
|
|||
94ae9456a0
|
|||
781993e326
|
|||
8257a6ae39
|
|||
fc81730dfc | |||
d8734ff082
|
|||
03533461c8
|
|||
68f22a673d
|
|||
b9bc0088f3
|
|||
c280e254ca
|
|||
3d0f29fda3
|
|||
20a6ebab7c
|
|||
925f71166c
|
|||
f69f415b92
|
|||
1bdfbd1620
|
|||
06fb135526
|
|||
501ea0ab4e
|
|||
847c6761d7
|
|||
6030885fc3
|
|||
89df7f4db0
|
|||
41257ed566
|
|||
506369e46b
|
|||
d908f389f5
|
|||
5a7c8f1818
|
|||
64fc7bd9dd
|
|||
0b7beda78c
|
|||
05b670dfc0
|
|||
de62d42b74
|
|||
edb957d22e
|
|||
4de5cb4c6c | |||
c35030f137
|
|||
182b07ed90
|
|||
7806f961e2
|
|||
7c3e315ae7
|
|||
284ef6dfd1
|
|||
1c6af619b5
|
|||
86ffdfd6f3
|
|||
d82148079d
|
|||
067430fd1b
|
|||
f5d0d16365 | |||
97ca8ac084
|
|||
1c1385b768
|
|||
35432b6294
|
|||
c59dd30e53
|
|||
d2da0ddb88
|
|||
8066701c3c
|
|||
0f66ac73d0
|
|||
4370bf51d7
|
|||
2b7353b9e0
|
|||
b686b3c9c3
|
|||
2dd04a6c44
|
|||
1da83662d0
|
|||
3ac9593c65
|
|||
6b317accf1
|
|||
4964aab722
|
|||
7a6ec73d63 | |||
1a534888d6
|
|||
59ffd179f4
|
|||
9972738deb
|
|||
bafcb26bb6
|
|||
2d7999d5f2
|
|||
a6bb22dfb5
|
|||
704dc937be
|
|||
a3e665eed6
|
|||
918a7b4180
|
|||
80b20f35b4
|
|||
4d4780c1d5
|
|||
fa612c531e
|
|||
24b68f9ae2
|
|||
a062ebf445 | |||
2a3fc9aa45
|
|||
55d21294d5
|
|||
3691cb4a61
|
|||
054567c65a
|
|||
2729b92f06
|
|||
f26ea1f0ac
|
|||
af0419c9da
|
|||
90c92e5c56 | |||
6679b90a82 |
31
.clang-uml
Normal file
31
.clang-uml
Normal file
@@ -0,0 +1,31 @@
|
||||
compilation_database_dir: build
|
||||
output_directory: puml
|
||||
diagrams:
|
||||
BayesNet:
|
||||
type: class
|
||||
glob:
|
||||
- src/BayesNet/*.cc
|
||||
- src/Platform/*.cc
|
||||
using_namespace: bayesnet
|
||||
include:
|
||||
namespaces:
|
||||
- bayesnet
|
||||
- platform
|
||||
plantuml:
|
||||
after:
|
||||
- "note left of {{ alias(\"MyProjectMain\") }}: Main class of myproject library."
|
||||
sequence:
|
||||
type: sequence
|
||||
glob:
|
||||
- src/Platform/main.cc
|
||||
combine_free_functions_into_file_participants: true
|
||||
using_namespace:
|
||||
- std
|
||||
- bayesnet
|
||||
- platform
|
||||
include:
|
||||
paths:
|
||||
- src/BayesNet
|
||||
- src/Platform
|
||||
start_from:
|
||||
- function: main(int,const char **)
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -35,3 +35,5 @@ build/
|
||||
*.dSYM/**
|
||||
cmake-build*/**
|
||||
.idea
|
||||
puml/**
|
||||
.vscode/settings.json
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -10,3 +10,6 @@
|
||||
[submodule "lib/json"]
|
||||
path = lib/json
|
||||
url = https://github.com/nlohmann/json.git
|
||||
[submodule "lib/libxlsxwriter"]
|
||||
path = lib/libxlsxwriter
|
||||
url = https://github.com/jmcnamara/libxlsxwriter.git
|
||||
|
42
.vscode/launch.json
vendored
42
.vscode/launch.json
vendored
@@ -10,7 +10,7 @@
|
||||
"-d",
|
||||
"iris",
|
||||
"-m",
|
||||
"KDB",
|
||||
"TANLd",
|
||||
"-s",
|
||||
"271",
|
||||
"-p",
|
||||
@@ -25,15 +25,51 @@
|
||||
"program": "${workspaceFolder}/build/src/Platform/main",
|
||||
"args": [
|
||||
"-m",
|
||||
"SPODELd",
|
||||
"BoostAODE",
|
||||
"-p",
|
||||
"/Users/rmontanana/Code/discretizbench/datasets",
|
||||
"--stratified",
|
||||
"-d",
|
||||
"iris"
|
||||
"mfeat-morphological",
|
||||
"--discretize"
|
||||
// "--hyperparameters",
|
||||
// "{\"repeatSparent\": true, \"maxModels\": 12}"
|
||||
],
|
||||
"cwd": "/Users/rmontanana/Code/discretizbench",
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "best",
|
||||
"program": "${workspaceFolder}/build/src/Platform/best",
|
||||
"args": [
|
||||
"-m",
|
||||
"BoostAODE",
|
||||
"-s",
|
||||
"accuracy",
|
||||
"--build",
|
||||
],
|
||||
"cwd": "/Users/rmontanana/Code/discretizbench",
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "manage",
|
||||
"program": "${workspaceFolder}/build/src/Platform/manage",
|
||||
"args": [
|
||||
"-n",
|
||||
"20"
|
||||
],
|
||||
"cwd": "/Users/rmontanana/Code/discretizbench",
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "list",
|
||||
"program": "${workspaceFolder}/build/src/Platform/list",
|
||||
"args": [],
|
||||
"cwd": "/Users/rmontanana/Code/discretizbench",
|
||||
},
|
||||
{
|
||||
"name": "Build & debug active file",
|
||||
"type": "cppdbg",
|
||||
|
109
.vscode/settings.json
vendored
109
.vscode/settings.json
vendored
@@ -1,109 +0,0 @@
|
||||
{
|
||||
"files.associations": {
|
||||
"*.rmd": "markdown",
|
||||
"*.py": "python",
|
||||
"vector": "cpp",
|
||||
"__bit_reference": "cpp",
|
||||
"__bits": "cpp",
|
||||
"__config": "cpp",
|
||||
"__debug": "cpp",
|
||||
"__errc": "cpp",
|
||||
"__hash_table": "cpp",
|
||||
"__locale": "cpp",
|
||||
"__mutex_base": "cpp",
|
||||
"__node_handle": "cpp",
|
||||
"__nullptr": "cpp",
|
||||
"__split_buffer": "cpp",
|
||||
"__string": "cpp",
|
||||
"__threading_support": "cpp",
|
||||
"__tuple": "cpp",
|
||||
"array": "cpp",
|
||||
"atomic": "cpp",
|
||||
"bitset": "cpp",
|
||||
"cctype": "cpp",
|
||||
"chrono": "cpp",
|
||||
"clocale": "cpp",
|
||||
"cmath": "cpp",
|
||||
"compare": "cpp",
|
||||
"complex": "cpp",
|
||||
"concepts": "cpp",
|
||||
"cstdarg": "cpp",
|
||||
"cstddef": "cpp",
|
||||
"cstdint": "cpp",
|
||||
"cstdio": "cpp",
|
||||
"cstdlib": "cpp",
|
||||
"cstring": "cpp",
|
||||
"ctime": "cpp",
|
||||
"cwchar": "cpp",
|
||||
"cwctype": "cpp",
|
||||
"exception": "cpp",
|
||||
"initializer_list": "cpp",
|
||||
"ios": "cpp",
|
||||
"iosfwd": "cpp",
|
||||
"istream": "cpp",
|
||||
"limits": "cpp",
|
||||
"locale": "cpp",
|
||||
"memory": "cpp",
|
||||
"mutex": "cpp",
|
||||
"new": "cpp",
|
||||
"optional": "cpp",
|
||||
"ostream": "cpp",
|
||||
"ratio": "cpp",
|
||||
"sstream": "cpp",
|
||||
"stdexcept": "cpp",
|
||||
"streambuf": "cpp",
|
||||
"string": "cpp",
|
||||
"string_view": "cpp",
|
||||
"system_error": "cpp",
|
||||
"tuple": "cpp",
|
||||
"type_traits": "cpp",
|
||||
"typeinfo": "cpp",
|
||||
"unordered_map": "cpp",
|
||||
"variant": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"iostream": "cpp",
|
||||
"iomanip": "cpp",
|
||||
"numeric": "cpp",
|
||||
"set": "cpp",
|
||||
"__tree": "cpp",
|
||||
"deque": "cpp",
|
||||
"list": "cpp",
|
||||
"map": "cpp",
|
||||
"unordered_set": "cpp",
|
||||
"any": "cpp",
|
||||
"condition_variable": "cpp",
|
||||
"forward_list": "cpp",
|
||||
"fstream": "cpp",
|
||||
"stack": "cpp",
|
||||
"thread": "cpp",
|
||||
"__memory": "cpp",
|
||||
"filesystem": "cpp",
|
||||
"*.toml": "toml",
|
||||
"utility": "cpp",
|
||||
"__verbose_abort": "cpp",
|
||||
"bit": "cpp",
|
||||
"random": "cpp",
|
||||
"*.tcc": "cpp",
|
||||
"functional": "cpp",
|
||||
"iterator": "cpp",
|
||||
"memory_resource": "cpp",
|
||||
"format": "cpp",
|
||||
"valarray": "cpp",
|
||||
"regex": "cpp",
|
||||
"span": "cpp",
|
||||
"cfenv": "cpp",
|
||||
"cinttypes": "cpp",
|
||||
"csetjmp": "cpp",
|
||||
"future": "cpp",
|
||||
"queue": "cpp",
|
||||
"typeindex": "cpp",
|
||||
"shared_mutex": "cpp",
|
||||
"*.ipp": "cpp",
|
||||
"cassert": "cpp",
|
||||
"charconv": "cpp",
|
||||
"source_location": "cpp",
|
||||
"ranges": "cpp"
|
||||
},
|
||||
"cmake.configureOnOpen": false,
|
||||
"C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools"
|
||||
}
|
23
.vscode/tasks.json
vendored
23
.vscode/tasks.json
vendored
@@ -32,6 +32,29 @@
|
||||
],
|
||||
"group": "build",
|
||||
"detail": "Task generated by Debugger."
|
||||
},
|
||||
{
|
||||
"type": "cppbuild",
|
||||
"label": "C/C++: g++ build active file",
|
||||
"command": "/usr/bin/g++",
|
||||
"args": [
|
||||
"-fdiagnostics-color=always",
|
||||
"-g",
|
||||
"${file}",
|
||||
"-o",
|
||||
"${fileDirname}/${fileBasenameNoExtension}"
|
||||
],
|
||||
"options": {
|
||||
"cwd": "${fileDirname}"
|
||||
},
|
||||
"problemMatcher": [
|
||||
"$gcc"
|
||||
],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"detail": "Task generated by Debugger."
|
||||
}
|
||||
]
|
||||
}
|
@@ -1,7 +1,7 @@
|
||||
cmake_minimum_required(VERSION 3.20)
|
||||
|
||||
project(BayesNet
|
||||
VERSION 0.1.0
|
||||
VERSION 0.2.0
|
||||
DESCRIPTION "Bayesian Network and basic classifiers Library."
|
||||
HOMEPAGE_URL "https://github.com/rmontanana/bayesnet"
|
||||
LANGUAGES CXX
|
||||
@@ -30,7 +30,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
|
||||
option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF)
|
||||
option(ENABLE_TESTING "Unit testing build" OFF)
|
||||
option(CODE_COVERAGE "Collect coverage from test library" OFF)
|
||||
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
|
||||
# CMakes modules
|
||||
# --------------
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH})
|
||||
@@ -40,8 +40,7 @@ if (CODE_COVERAGE)
|
||||
enable_testing()
|
||||
include(CodeCoverage)
|
||||
MESSAGE("Code coverage enabled")
|
||||
set(CMAKE_C_FLAGS " ${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -O0 -g")
|
||||
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
|
||||
endif (CODE_COVERAGE)
|
||||
|
||||
@@ -55,6 +54,7 @@ endif (ENABLE_CLANG_TIDY)
|
||||
add_git_submodule("lib/mdlp")
|
||||
add_git_submodule("lib/argparse")
|
||||
add_git_submodule("lib/json")
|
||||
find_library(XLSXWRITER_LIB libxlsxwriter.dylib PATHS /usr/local/lib)
|
||||
|
||||
# Subdirectories
|
||||
# --------------
|
||||
@@ -73,8 +73,7 @@ file(GLOB Platform_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/Platform
|
||||
|
||||
if (ENABLE_TESTING)
|
||||
MESSAGE("Testing enabled")
|
||||
add_git_submodule("lib/catch2")
|
||||
|
||||
add_git_submodule("lib/catch2")
|
||||
include(CTest)
|
||||
add_subdirectory(tests)
|
||||
endif (ENABLE_TESTING)
|
||||
|
20
Makefile
20
Makefile
@@ -11,23 +11,37 @@ setup: ## Install dependencies for tests and coverage
|
||||
pip install gcovr; \
|
||||
fi
|
||||
|
||||
dest ?= ../discretizbench
|
||||
copy: ## Copy binary files to selected folder
|
||||
@echo "Destination folder: $(dest)"
|
||||
make build
|
||||
@echo ">>> Copying files to $(dest)"
|
||||
@cp build/src/Platform/main $(dest)
|
||||
@cp build/src/Platform/list $(dest)
|
||||
@cp build/src/Platform/manage $(dest)
|
||||
@cp build/src/Platform/best $(dest)
|
||||
@echo ">>> Done"
|
||||
|
||||
dependency: ## Create a dependency graph diagram of the project (build/dependency.png)
|
||||
cd build && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png
|
||||
|
||||
build: ## Build the main and BayesNetSample
|
||||
cmake --build build -t main -t BayesNetSample -j 32
|
||||
cmake --build build -t main -t BayesNetSample -t manage -t list -t best -j 32
|
||||
|
||||
clean: ## Clean the debug info
|
||||
@echo ">>> Cleaning Debug BayesNet ...";
|
||||
find . -name "*.gcda" -print0 | xargs -0 rm
|
||||
@echo ">>> Done";
|
||||
|
||||
clang-uml: ## Create uml class and sequence diagrams
|
||||
clang-uml -p --add-compile-flag -I /usr/lib/gcc/x86_64-redhat-linux/8/include/
|
||||
|
||||
debug: ## Build a debug version of the project
|
||||
@echo ">>> Building Debug BayesNet ...";
|
||||
@if [ -d ./build ]; then rm -rf ./build; fi
|
||||
@mkdir build;
|
||||
cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON; \
|
||||
cmake --build build -j 32;
|
||||
cmake --build build -t main -t BayesNetSample -t manage -t list -t best -t unit_tests -j 32;
|
||||
@echo ">>> Done";
|
||||
|
||||
release: ## Build a Release version of the project
|
||||
@@ -35,7 +49,7 @@ release: ## Build a Release version of the project
|
||||
@if [ -d ./build ]; then rm -rf ./build; fi
|
||||
@mkdir build;
|
||||
cmake -S . -B build -D CMAKE_BUILD_TYPE=Release; \
|
||||
cmake --build build -t main -t BayesNetSample -j 32;
|
||||
cmake --build build -t main -t BayesNetSample -t manage -t list -t best -j 32;
|
||||
@echo ">>> Done";
|
||||
|
||||
test: ## Run tests
|
||||
|
32
README.md
32
README.md
@@ -2,4 +2,36 @@
|
||||
|
||||
Bayesian Network Classifier with libtorch from scratch
|
||||
|
||||
## 0. Setup
|
||||
|
||||
### libxlswriter
|
||||
|
||||
Before compiling BayesNet.
|
||||
|
||||
```bash
|
||||
cd lib/libxlsxwriter
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
It has to be installed in /usr/local/lib otherwise CMakeLists.txt has to be modified accordingly
|
||||
|
||||
Environment variable has to be set:
|
||||
|
||||
```bash
|
||||
export LD_LIBRARY_PATH=/usr/local/lib
|
||||
```
|
||||
|
||||
### Release
|
||||
|
||||
```bash
|
||||
make release
|
||||
```
|
||||
|
||||
### Debug & Tests
|
||||
|
||||
```bash
|
||||
make debug
|
||||
```
|
||||
|
||||
## 1. Introduction
|
||||
|
12
TAN_iris.dot
12
TAN_iris.dot
@@ -1,12 +0,0 @@
|
||||
digraph BayesNet {
|
||||
label=<BayesNet >
|
||||
fontsize=30
|
||||
fontcolor=blue
|
||||
labelloc=t
|
||||
layout=circo
|
||||
class [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ]
|
||||
class -> sepallength class -> sepalwidth class -> petallength class -> petalwidth petallength [shape=circle]
|
||||
petallength -> sepallength petalwidth [shape=circle]
|
||||
sepallength [shape=circle]
|
||||
sepallength -> sepalwidth sepalwidth [shape=circle]
|
||||
sepalwidth -> petalwidth }
|
@@ -1 +0,0 @@
|
||||
null
|
BIN
diagrams/BayesNet.pdf
Executable file
BIN
diagrams/BayesNet.pdf
Executable file
Binary file not shown.
@@ -1,2 +1 @@
|
||||
add_library(ArffFiles ArffFiles.cc)
|
||||
#target_link_libraries(BayesNet "${TORCH_LIBRARIES}")
|
||||
add_library(ArffFiles ArffFiles.cc)
|
Submodule lib/catch2 updated: 4acc51828f...9c541ca72e
1
lib/libxlsxwriter
Submodule
1
lib/libxlsxwriter
Submodule
Submodule lib/libxlsxwriter added at 44e72c5862
@@ -3,5 +3,6 @@ include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
|
||||
add_executable(BayesNetSample sample.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
|
||||
target_link_libraries(BayesNetSample BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
|
159
sample/sample.cc
159
sample/sample.cc
@@ -3,13 +3,14 @@
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <argparse/argparse.hpp>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "ArffFiles.h"
|
||||
#include "BayesMetrics.h"
|
||||
#include "CPPFImdlp.h"
|
||||
#include "Folding.h"
|
||||
#include "Models.h"
|
||||
#include "modelRegister.h"
|
||||
|
||||
#include <fstream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
@@ -57,6 +58,52 @@ pair<vector<vector<int>>, vector<int>> extract_indices(vector<int> indices, vect
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
torch::Tensor weights_ = torch::full({ 10 }, 1.0 / 10, torch::kFloat64);
|
||||
torch::Tensor y_ = torch::tensor({ 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, torch::kInt32);
|
||||
torch::Tensor ypred = torch::tensor({ 1, 1, 1, 0, 0, 1, 1, 1, 1, 0 }, torch::kInt32);
|
||||
cout << "Initial weights_: " << endl;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
cout << weights_.index({ i }).item<double>() << ", ";
|
||||
}
|
||||
cout << "end." << endl;
|
||||
cout << "y_: " << endl;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
cout << y_.index({ i }).item<int>() << ", ";
|
||||
}
|
||||
cout << "end." << endl;
|
||||
cout << "ypred: " << endl;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
cout << ypred.index({ i }).item<int>() << ", ";
|
||||
}
|
||||
cout << "end." << endl;
|
||||
auto mask_wrong = ypred != y_;
|
||||
auto mask_right = ypred == y_;
|
||||
auto masked_weights = weights_ * mask_wrong.to(weights_.dtype());
|
||||
double epsilon_t = masked_weights.sum().item<double>();
|
||||
cout << "epsilon_t: " << epsilon_t << endl;
|
||||
double wt = (1 - epsilon_t) / epsilon_t;
|
||||
cout << "wt: " << wt << endl;
|
||||
double alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
|
||||
cout << "alpha_t: " << alpha_t << endl;
|
||||
// Step 3.2: Update weights for next classifier
|
||||
// Step 3.2.1: Update weights of wrong samples
|
||||
cout << "exp(alpha_t): " << exp(alpha_t) << endl;
|
||||
cout << "exp(-alpha_t): " << exp(-alpha_t) << endl;
|
||||
weights_ += mask_wrong.to(weights_.dtype()) * exp(alpha_t) * weights_;
|
||||
// Step 3.2.2: Update weights of right samples
|
||||
weights_ += mask_right.to(weights_.dtype()) * exp(-alpha_t) * weights_;
|
||||
// Step 3.3: Normalise the weights
|
||||
double totalWeights = torch::sum(weights_).item<double>();
|
||||
cout << "totalWeights: " << totalWeights << endl;
|
||||
cout << "Before normalization: " << endl;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
cout << weights_.index({ i }).item<double>() << endl;
|
||||
}
|
||||
weights_ = weights_ / totalWeights;
|
||||
cout << "After normalization: " << endl;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
cout << weights_.index({ i }).item<double>() << endl;
|
||||
}
|
||||
map<string, bool> datasets = {
|
||||
{"diabetes", true},
|
||||
{"ecoli", true},
|
||||
@@ -178,59 +225,59 @@ int main(int argc, char** argv)
|
||||
cout << "end." << endl;
|
||||
auto score = clf->score(Xd, y);
|
||||
cout << "Score: " << score << endl;
|
||||
// auto graph = clf->graph();
|
||||
// auto dot_file = model_name + "_" + file_name;
|
||||
// ofstream file(dot_file + ".dot");
|
||||
// file << graph;
|
||||
// file.close();
|
||||
// cout << "Graph saved in " << model_name << "_" << file_name << ".dot" << endl;
|
||||
// cout << "dot -Tpng -o " + dot_file + ".png " + dot_file + ".dot " << endl;
|
||||
// string stratified_string = stratified ? " Stratified" : "";
|
||||
// cout << nFolds << " Folds" << stratified_string << " Cross validation" << endl;
|
||||
// cout << "==========================================" << endl;
|
||||
// torch::Tensor Xt = torch::zeros({ static_cast<int>(Xd.size()), static_cast<int>(Xd[0].size()) }, torch::kInt32);
|
||||
// torch::Tensor yt = torch::tensor(y, torch::kInt32);
|
||||
// for (int i = 0; i < features.size(); ++i) {
|
||||
// Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32));
|
||||
// }
|
||||
// float total_score = 0, total_score_train = 0, score_train, score_test;
|
||||
// Fold* fold;
|
||||
// if (stratified)
|
||||
// fold = new StratifiedKFold(nFolds, y, seed);
|
||||
// else
|
||||
// fold = new KFold(nFolds, y.size(), seed);
|
||||
// for (auto i = 0; i < nFolds; ++i) {
|
||||
// auto [train, test] = fold->getFold(i);
|
||||
// cout << "Fold: " << i + 1 << endl;
|
||||
// if (tensors) {
|
||||
// auto ttrain = torch::tensor(train, torch::kInt64);
|
||||
// auto ttest = torch::tensor(test, torch::kInt64);
|
||||
// torch::Tensor Xtraint = torch::index_select(Xt, 1, ttrain);
|
||||
// torch::Tensor ytraint = yt.index({ ttrain });
|
||||
// torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest);
|
||||
// torch::Tensor ytestt = yt.index({ ttest });
|
||||
// clf->fit(Xtraint, ytraint, features, className, states);
|
||||
// auto temp = clf->predict(Xtraint);
|
||||
// score_train = clf->score(Xtraint, ytraint);
|
||||
// score_test = clf->score(Xtestt, ytestt);
|
||||
// } else {
|
||||
// auto [Xtrain, ytrain] = extract_indices(train, Xd, y);
|
||||
// auto [Xtest, ytest] = extract_indices(test, Xd, y);
|
||||
// clf->fit(Xtrain, ytrain, features, className, states);
|
||||
// score_train = clf->score(Xtrain, ytrain);
|
||||
// score_test = clf->score(Xtest, ytest);
|
||||
// }
|
||||
// if (dump_cpt) {
|
||||
// cout << "--- CPT Tables ---" << endl;
|
||||
// clf->dump_cpt();
|
||||
// }
|
||||
// total_score_train += score_train;
|
||||
// total_score += score_test;
|
||||
// cout << "Score Train: " << score_train << endl;
|
||||
// cout << "Score Test : " << score_test << endl;
|
||||
// cout << "-------------------------------------------------------------------------------" << endl;
|
||||
// }
|
||||
// cout << "**********************************************************************************" << endl;
|
||||
// cout << "Average Score Train: " << total_score_train / nFolds << endl;
|
||||
// cout << "Average Score Test : " << total_score / nFolds << endl;return 0;
|
||||
auto graph = clf->graph();
|
||||
auto dot_file = model_name + "_" + file_name;
|
||||
ofstream file(dot_file + ".dot");
|
||||
file << graph;
|
||||
file.close();
|
||||
cout << "Graph saved in " << model_name << "_" << file_name << ".dot" << endl;
|
||||
cout << "dot -Tpng -o " + dot_file + ".png " + dot_file + ".dot " << endl;
|
||||
string stratified_string = stratified ? " Stratified" : "";
|
||||
cout << nFolds << " Folds" << stratified_string << " Cross validation" << endl;
|
||||
cout << "==========================================" << endl;
|
||||
torch::Tensor Xt = torch::zeros({ static_cast<int>(Xd.size()), static_cast<int>(Xd[0].size()) }, torch::kInt32);
|
||||
torch::Tensor yt = torch::tensor(y, torch::kInt32);
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32));
|
||||
}
|
||||
float total_score = 0, total_score_train = 0, score_train, score_test;
|
||||
platform::Fold* fold;
|
||||
if (stratified)
|
||||
fold = new platform::StratifiedKFold(nFolds, y, seed);
|
||||
else
|
||||
fold = new platform::KFold(nFolds, y.size(), seed);
|
||||
for (auto i = 0; i < nFolds; ++i) {
|
||||
auto [train, test] = fold->getFold(i);
|
||||
cout << "Fold: " << i + 1 << endl;
|
||||
if (tensors) {
|
||||
auto ttrain = torch::tensor(train, torch::kInt64);
|
||||
auto ttest = torch::tensor(test, torch::kInt64);
|
||||
torch::Tensor Xtraint = torch::index_select(Xt, 1, ttrain);
|
||||
torch::Tensor ytraint = yt.index({ ttrain });
|
||||
torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest);
|
||||
torch::Tensor ytestt = yt.index({ ttest });
|
||||
clf->fit(Xtraint, ytraint, features, className, states);
|
||||
auto temp = clf->predict(Xtraint);
|
||||
score_train = clf->score(Xtraint, ytraint);
|
||||
score_test = clf->score(Xtestt, ytestt);
|
||||
} else {
|
||||
auto [Xtrain, ytrain] = extract_indices(train, Xd, y);
|
||||
auto [Xtest, ytest] = extract_indices(test, Xd, y);
|
||||
clf->fit(Xtrain, ytrain, features, className, states);
|
||||
score_train = clf->score(Xtrain, ytrain);
|
||||
score_test = clf->score(Xtest, ytest);
|
||||
}
|
||||
if (dump_cpt) {
|
||||
cout << "--- CPT Tables ---" << endl;
|
||||
clf->dump_cpt();
|
||||
}
|
||||
total_score_train += score_train;
|
||||
total_score += score_test;
|
||||
cout << "Score Train: " << score_train << endl;
|
||||
cout << "Score Test : " << score_test << endl;
|
||||
cout << "-------------------------------------------------------------------------------" << endl;
|
||||
}
|
||||
cout << "**********************************************************************************" << endl;
|
||||
cout << "Average Score Train: " << total_score_train / nFolds << endl;
|
||||
cout << "Average Score Test : " << total_score / nFolds << endl;return 0;
|
||||
}
|
@@ -2,12 +2,14 @@
|
||||
|
||||
namespace bayesnet {
|
||||
AODE::AODE() : Ensemble() {}
|
||||
void AODE::buildModel()
|
||||
void AODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
models.push_back(std::make_unique<SPODE>(i));
|
||||
}
|
||||
n_models = models.size();
|
||||
significanceModels = vector<double>(n_models, 1.0);
|
||||
}
|
||||
vector<string> AODE::graph(const string& title) const
|
||||
{
|
||||
|
@@ -5,7 +5,7 @@
|
||||
namespace bayesnet {
|
||||
class AODE : public Ensemble {
|
||||
protected:
|
||||
void buildModel() override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
AODE();
|
||||
virtual ~AODE() {};
|
||||
|
@@ -4,9 +4,9 @@
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
AODELd::AODELd() : Ensemble(), Proposal(dataset, features, className) {}
|
||||
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
|
||||
{
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
@@ -19,15 +19,16 @@ namespace bayesnet {
|
||||
return *this;
|
||||
|
||||
}
|
||||
void AODELd::buildModel()
|
||||
void AODELd::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
models.push_back(std::make_unique<SPODELd>(i));
|
||||
}
|
||||
n_models = models.size();
|
||||
significanceModels = vector<double>(n_models, 1.0);
|
||||
}
|
||||
void AODELd::trainModel()
|
||||
void AODELd::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
for (const auto& model : models) {
|
||||
model->fit(Xf, y, features, className, states);
|
||||
|
@@ -8,13 +8,13 @@ namespace bayesnet {
|
||||
using namespace std;
|
||||
class AODELd : public Ensemble, public Proposal {
|
||||
protected:
|
||||
void trainModel() override;
|
||||
void buildModel() override;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
AODELd();
|
||||
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_) override;
|
||||
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_) override;
|
||||
virtual ~AODELd() = default;
|
||||
vector<string> graph(const string& name = "AODE") const override;
|
||||
vector<string> graph(const string& name = "AODELd") const override;
|
||||
static inline string version() { return "0.0.1"; };
|
||||
};
|
||||
}
|
||||
|
@@ -1,21 +1,25 @@
|
||||
#ifndef BASE_H
|
||||
#define BASE_H
|
||||
#include <torch/torch.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include <vector>
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
enum status_t { NORMAL, WARNING, ERROR };
|
||||
class BaseClassifier {
|
||||
protected:
|
||||
virtual void trainModel() = 0;
|
||||
virtual void trainModel(const torch::Tensor& weights) = 0;
|
||||
public:
|
||||
// X is nxm vector, y is nx1 vector
|
||||
virtual BaseClassifier& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) = 0;
|
||||
virtual BaseClassifier& fit(vector<vector<int>>& X, vector<int>& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) = 0;
|
||||
// X is nxm tensor, y is nx1 tensor
|
||||
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, vector<string>& features, string className, map<string, vector<int>>& states) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states) = 0;
|
||||
virtual BaseClassifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights) = 0;
|
||||
virtual ~BaseClassifier() = default;
|
||||
torch::Tensor virtual predict(torch::Tensor& X) = 0;
|
||||
vector<int> virtual predict(vector<vector<int>>& X) = 0;
|
||||
status_t virtual getStatus() const = 0;
|
||||
float virtual score(vector<vector<int>>& X, vector<int>& y) = 0;
|
||||
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
|
||||
int virtual getNumberOfNodes()const = 0;
|
||||
@@ -23,9 +27,10 @@ namespace bayesnet {
|
||||
int virtual getNumberOfStates() const = 0;
|
||||
vector<string> virtual show() const = 0;
|
||||
vector<string> virtual graph(const string& title = "") const = 0;
|
||||
const string inline getVersion() const { return "0.1.0"; };
|
||||
const string inline getVersion() const { return "0.2.0"; };
|
||||
vector<string> virtual topological_order() = 0;
|
||||
void virtual dump_cpt()const = 0;
|
||||
virtual void setHyperparameters(nlohmann::json& hyperparameters) = 0;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -21,6 +21,45 @@ namespace bayesnet {
|
||||
}
|
||||
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
|
||||
}
|
||||
vector<int> Metrics::SelectKBestWeighted(const torch::Tensor& weights, bool ascending, unsigned k)
|
||||
{
|
||||
// Return the K Best features
|
||||
auto n = samples.size(0) - 1;
|
||||
if (k == 0) {
|
||||
k = n;
|
||||
}
|
||||
// compute scores
|
||||
scoresKBest.clear();
|
||||
featuresKBest.clear();
|
||||
auto label = samples.index({ -1, "..." });
|
||||
for (int i = 0; i < n; ++i) {
|
||||
scoresKBest.push_back(mutualInformation(label, samples.index({ i, "..." }), weights));
|
||||
featuresKBest.push_back(i);
|
||||
}
|
||||
// sort & reduce scores and features
|
||||
if (ascending) {
|
||||
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
{ return scoresKBest[i] < scoresKBest[j]; });
|
||||
sort(scoresKBest.begin(), scoresKBest.end(), std::less<double>());
|
||||
if (k < n) {
|
||||
for (int i = 0; i < n - k; ++i) {
|
||||
featuresKBest.erase(featuresKBest.begin());
|
||||
scoresKBest.erase(scoresKBest.begin());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
|
||||
{ return scoresKBest[i] > scoresKBest[j]; });
|
||||
sort(scoresKBest.begin(), scoresKBest.end(), std::greater<double>());
|
||||
featuresKBest.resize(k);
|
||||
scoresKBest.resize(k);
|
||||
}
|
||||
return featuresKBest;
|
||||
}
|
||||
vector<double> Metrics::getScoresKBest() const
|
||||
{
|
||||
return scoresKBest;
|
||||
}
|
||||
vector<pair<string, string>> Metrics::doCombinations(const vector<string>& source)
|
||||
{
|
||||
vector<pair<string, string>> result;
|
||||
@@ -32,17 +71,17 @@ namespace bayesnet {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
torch::Tensor Metrics::conditionalEdge()
|
||||
torch::Tensor Metrics::conditionalEdge(const torch::Tensor& weights)
|
||||
{
|
||||
auto result = vector<double>();
|
||||
auto source = vector<string>(features);
|
||||
source.push_back(className);
|
||||
auto combinations = doCombinations(source);
|
||||
// Compute class prior
|
||||
auto margin = torch::zeros({ classNumStates });
|
||||
auto margin = torch::zeros({ classNumStates }, torch::kFloat);
|
||||
for (int value = 0; value < classNumStates; ++value) {
|
||||
auto mask = samples.index({ -1, "..." }) == value;
|
||||
margin[value] = mask.sum().item<float>() / samples.size(1);
|
||||
margin[value] = mask.sum().item<double>() / samples.size(1);
|
||||
}
|
||||
for (auto [first, second] : combinations) {
|
||||
int index_first = find(features.begin(), features.end(), first) - features.begin();
|
||||
@@ -52,8 +91,9 @@ namespace bayesnet {
|
||||
auto mask = samples.index({ -1, "..." }) == value;
|
||||
auto first_dataset = samples.index({ index_first, mask });
|
||||
auto second_dataset = samples.index({ index_second, mask });
|
||||
auto mi = mutualInformation(first_dataset, second_dataset);
|
||||
auto pb = margin[value].item<float>();
|
||||
auto weights_dataset = weights.index({ mask });
|
||||
auto mi = mutualInformation(first_dataset, second_dataset, weights_dataset);
|
||||
auto pb = margin[value].item<double>();
|
||||
accumulated += pb * mi;
|
||||
}
|
||||
result.push_back(accumulated);
|
||||
@@ -70,31 +110,32 @@ namespace bayesnet {
|
||||
return matrix;
|
||||
}
|
||||
// To use in Python
|
||||
vector<float> Metrics::conditionalEdgeWeights()
|
||||
vector<float> Metrics::conditionalEdgeWeights(vector<float>& weights_)
|
||||
{
|
||||
auto matrix = conditionalEdge();
|
||||
const torch::Tensor weights = torch::tensor(weights_);
|
||||
auto matrix = conditionalEdge(weights);
|
||||
std::vector<float> v(matrix.data_ptr<float>(), matrix.data_ptr<float>() + matrix.numel());
|
||||
return v;
|
||||
}
|
||||
double Metrics::entropy(const torch::Tensor& feature)
|
||||
double Metrics::entropy(const torch::Tensor& feature, const torch::Tensor& weights)
|
||||
{
|
||||
torch::Tensor counts = feature.bincount();
|
||||
int totalWeight = counts.sum().item<int>();
|
||||
torch::Tensor counts = feature.bincount(weights);
|
||||
double totalWeight = counts.sum().item<double>();
|
||||
torch::Tensor probs = counts.to(torch::kFloat) / totalWeight;
|
||||
torch::Tensor logProbs = torch::log(probs);
|
||||
torch::Tensor entropy = -probs * logProbs;
|
||||
return entropy.nansum().item<double>();
|
||||
}
|
||||
// H(Y|X) = sum_{x in X} p(x) H(Y|X=x)
|
||||
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature)
|
||||
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
|
||||
{
|
||||
int numSamples = firstFeature.sizes()[0];
|
||||
torch::Tensor featureCounts = secondFeature.bincount();
|
||||
torch::Tensor featureCounts = secondFeature.bincount(weights);
|
||||
unordered_map<int, unordered_map<int, double>> jointCounts;
|
||||
double totalWeight = 0;
|
||||
for (auto i = 0; i < numSamples; i++) {
|
||||
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += 1;
|
||||
totalWeight += 1;
|
||||
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += weights[i].item<double>();
|
||||
totalWeight += weights[i].item<float>();
|
||||
}
|
||||
if (totalWeight == 0)
|
||||
return 0;
|
||||
@@ -115,9 +156,9 @@ namespace bayesnet {
|
||||
return entropyValue;
|
||||
}
|
||||
// I(X;Y) = H(Y) - H(Y|X)
|
||||
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature)
|
||||
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights)
|
||||
{
|
||||
return entropy(firstFeature) - conditionalEntropy(firstFeature, secondFeature);
|
||||
return entropy(firstFeature, weights) - conditionalEntropy(firstFeature, secondFeature, weights);
|
||||
}
|
||||
/*
|
||||
Compute the maximum spanning tree considering the weights as distances
|
||||
|
@@ -12,16 +12,20 @@ namespace bayesnet {
|
||||
vector<string> features;
|
||||
string className;
|
||||
int classNumStates = 0;
|
||||
vector<double> scoresKBest;
|
||||
vector<int> featuresKBest; // sorted indices of the features
|
||||
double entropy(const Tensor& feature, const Tensor& weights);
|
||||
double conditionalEntropy(const Tensor& firstFeature, const Tensor& secondFeature, const Tensor& weights);
|
||||
vector<pair<string, string>> doCombinations(const vector<string>&);
|
||||
public:
|
||||
Metrics() = default;
|
||||
Metrics(const Tensor&, const vector<string>&, const string&, const int);
|
||||
Metrics(const vector<vector<int>>&, const vector<int>&, const vector<string>&, const string&, const int);
|
||||
double entropy(const Tensor&);
|
||||
double conditionalEntropy(const Tensor&, const Tensor&);
|
||||
double mutualInformation(const Tensor&, const Tensor&);
|
||||
vector<float> conditionalEdgeWeights(); // To use in Python
|
||||
Tensor conditionalEdge();
|
||||
vector<pair<string, string>> doCombinations(const vector<string>&);
|
||||
Metrics(const torch::Tensor& samples, const vector<string>& features, const string& className, const int classNumStates);
|
||||
Metrics(const vector<vector<int>>& vsamples, const vector<int>& labels, const vector<string>& features, const string& className, const int classNumStates);
|
||||
vector<int> SelectKBestWeighted(const torch::Tensor& weights, bool ascending=false, unsigned k = 0);
|
||||
vector<double> getScoresKBest() const;
|
||||
double mutualInformation(const Tensor& firstFeature, const Tensor& secondFeature, const Tensor& weights);
|
||||
vector<float> conditionalEdgeWeights(vector<float>& weights); // To use in Python
|
||||
Tensor conditionalEdge(const torch::Tensor& weights);
|
||||
vector<pair<int, int>> maximumSpanningTree(const vector<string>& features, const Tensor& weights, const int root);
|
||||
};
|
||||
}
|
||||
|
148
src/BayesNet/BoostAODE.cc
Normal file
148
src/BayesNet/BoostAODE.cc
Normal file
@@ -0,0 +1,148 @@
|
||||
#include "BoostAODE.h"
|
||||
#include <set>
|
||||
#include "BayesMetrics.h"
|
||||
#include "Colors.h"
|
||||
#include "Folding.h"
|
||||
#include <limits.h>
|
||||
|
||||
namespace bayesnet {
|
||||
BoostAODE::BoostAODE() : Ensemble() {}
|
||||
void BoostAODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// Models shall be built in trainModel
|
||||
}
|
||||
void BoostAODE::setHyperparameters(nlohmann::json& hyperparameters)
|
||||
{
|
||||
// Check if hyperparameters are valid
|
||||
const vector<string> validKeys = { "repeatSparent", "maxModels", "ascending", "convergence" };
|
||||
checkHyperparameters(validKeys, hyperparameters);
|
||||
if (hyperparameters.contains("repeatSparent")) {
|
||||
repeatSparent = hyperparameters["repeatSparent"];
|
||||
}
|
||||
if (hyperparameters.contains("maxModels")) {
|
||||
maxModels = hyperparameters["maxModels"];
|
||||
}
|
||||
if (hyperparameters.contains("ascending")) {
|
||||
ascending = hyperparameters["ascending"];
|
||||
}
|
||||
if (hyperparameters.contains("convergence")) {
|
||||
convergence = hyperparameters["convergence"];
|
||||
}
|
||||
}
|
||||
void BoostAODE::validationInit()
|
||||
{
|
||||
auto y_ = dataset.index({ -1, "..." });
|
||||
if (convergence) {
|
||||
// Prepare train & validation sets from train data
|
||||
auto fold = platform::StratifiedKFold(5, y_, 271);
|
||||
dataset_ = torch::clone(dataset);
|
||||
// save input dataset
|
||||
auto [train, test] = fold.getFold(0);
|
||||
auto train_t = torch::tensor(train);
|
||||
auto test_t = torch::tensor(test);
|
||||
// Get train and validation sets
|
||||
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });
|
||||
y_train = dataset.index({ -1, train_t });
|
||||
X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });
|
||||
y_test = dataset.index({ -1, test_t });
|
||||
dataset = X_train;
|
||||
m = X_train.size(1);
|
||||
auto n_classes = states.at(className).size();
|
||||
metrics = Metrics(dataset, features, className, n_classes);
|
||||
// Build dataset with train data
|
||||
buildDataset(y_train);
|
||||
} else {
|
||||
// Use all data to train
|
||||
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." });
|
||||
y_train = y_;
|
||||
}
|
||||
|
||||
}
|
||||
void BoostAODE::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
models.clear();
|
||||
n_models = 0;
|
||||
if (maxModels == 0)
|
||||
maxModels = .1 * n > 10 ? .1 * n : n;
|
||||
validationInit();
|
||||
Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
|
||||
bool exitCondition = false;
|
||||
unordered_set<int> featuresUsed;
|
||||
// Variables to control the accuracy finish condition
|
||||
double priorAccuracy = 0.0;
|
||||
double delta = 1.0;
|
||||
double threshold = 1e-4;
|
||||
int tolerance = 5; // number of times the accuracy can be lower than the threshold
|
||||
int count = 0; // number of times the accuracy is lower than the threshold
|
||||
fitted = true; // to enable predict
|
||||
// Step 0: Set the finish condition
|
||||
// if not repeatSparent a finish condition is run out of features
|
||||
// n_models == maxModels
|
||||
// epsiolon sub t > 0.5 => inverse the weights policy
|
||||
// validation error is not decreasing
|
||||
while (!exitCondition) {
|
||||
// Step 1: Build ranking with mutual information
|
||||
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
|
||||
unique_ptr<Classifier> model;
|
||||
auto feature = featureSelection[0];
|
||||
if (!repeatSparent || featuresUsed.size() < featureSelection.size()) {
|
||||
bool found = false;
|
||||
for (auto feat : featureSelection) {
|
||||
if (find(featuresUsed.begin(), featuresUsed.end(), feat) != featuresUsed.end()) {
|
||||
continue;
|
||||
}
|
||||
found = true;
|
||||
feature = feat;
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
exitCondition = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
featuresUsed.insert(feature);
|
||||
model = std::make_unique<SPODE>(feature);
|
||||
model->fit(dataset, features, className, states, weights_);
|
||||
auto ypred = model->predict(X_train);
|
||||
// Step 3.1: Compute the classifier amout of say
|
||||
auto mask_wrong = ypred != y_train;
|
||||
auto mask_right = ypred == y_train;
|
||||
auto masked_weights = weights_ * mask_wrong.to(weights_.dtype());
|
||||
double epsilon_t = masked_weights.sum().item<double>();
|
||||
double wt = (1 - epsilon_t) / epsilon_t;
|
||||
double alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
|
||||
// Step 3.2: Update weights for next classifier
|
||||
// Step 3.2.1: Update weights of wrong samples
|
||||
weights_ += mask_wrong.to(weights_.dtype()) * exp(alpha_t) * weights_;
|
||||
// Step 3.2.2: Update weights of right samples
|
||||
weights_ += mask_right.to(weights_.dtype()) * exp(-alpha_t) * weights_;
|
||||
// Step 3.3: Normalise the weights
|
||||
double totalWeights = torch::sum(weights_).item<double>();
|
||||
weights_ = weights_ / totalWeights;
|
||||
// Step 3.4: Store classifier and its accuracy to weigh its future vote
|
||||
models.push_back(std::move(model));
|
||||
significanceModels.push_back(alpha_t);
|
||||
n_models++;
|
||||
if (convergence) {
|
||||
auto y_val_predict = predict(X_test);
|
||||
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
|
||||
if (priorAccuracy == 0) {
|
||||
priorAccuracy = accuracy;
|
||||
} else {
|
||||
delta = accuracy - priorAccuracy;
|
||||
}
|
||||
if (delta < threshold) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
exitCondition = n_models == maxModels && repeatSparent || epsilon_t > 0.5 || count > tolerance;
|
||||
}
|
||||
if (featuresUsed.size() != features.size()) {
|
||||
status = WARNING;
|
||||
}
|
||||
}
|
||||
vector<string> BoostAODE::graph(const string& title) const
|
||||
{
|
||||
return Ensemble::graph(title);
|
||||
}
|
||||
}
|
25
src/BayesNet/BoostAODE.h
Normal file
25
src/BayesNet/BoostAODE.h
Normal file
@@ -0,0 +1,25 @@
|
||||
#ifndef BOOSTAODE_H
|
||||
#define BOOSTAODE_H
|
||||
#include "Ensemble.h"
|
||||
#include "SPODE.h"
|
||||
namespace bayesnet {
|
||||
class BoostAODE : public Ensemble {
|
||||
public:
|
||||
BoostAODE();
|
||||
virtual ~BoostAODE() {};
|
||||
vector<string> graph(const string& title = "BoostAODE") const override;
|
||||
void setHyperparameters(nlohmann::json& hyperparameters) override;
|
||||
protected:
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
private:
|
||||
torch::Tensor dataset_;
|
||||
torch::Tensor X_train, y_train, X_test, y_test;
|
||||
void validationInit();
|
||||
bool repeatSparent = false;
|
||||
int maxModels = 0;
|
||||
bool ascending = false; //Process KBest features ascending or descending order
|
||||
bool convergence = false; //if true, stop when the model does not improve
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,7 +1,9 @@
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
|
||||
add_library(BayesNet bayesnetUtils.cc Network.cc Node.cc BayesMetrics.cc Classifier.cc
|
||||
KDB.cc TAN.cc SPODE.cc Ensemble.cc AODE.cc TANLd.cc KDBLd.cc SPODELd.cc AODELd.cc Mst.cc Proposal.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
|
||||
target_link_libraries(BayesNet mdlp ArffFiles "${TORCH_LIBRARIES}")
|
||||
KDB.cc TAN.cc SPODE.cc Ensemble.cc AODE.cc TANLd.cc KDBLd.cc SPODELd.cc AODELd.cc BoostAODE.cc
|
||||
Mst.cc Proposal.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
|
||||
target_link_libraries(BayesNet mdlp "${TORCH_LIBRARIES}")
|
@@ -5,7 +5,7 @@ namespace bayesnet {
|
||||
using namespace torch;
|
||||
|
||||
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
|
||||
Classifier& Classifier::build(vector<string>& features, string className, map<string, vector<int>>& states)
|
||||
Classifier& Classifier::build(const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
this->features = features;
|
||||
this->className = className;
|
||||
@@ -13,15 +13,14 @@ namespace bayesnet {
|
||||
m = dataset.size(1);
|
||||
n = dataset.size(0) - 1;
|
||||
checkFitParameters();
|
||||
auto n_classes = states[className].size();
|
||||
auto n_classes = states.at(className).size();
|
||||
metrics = Metrics(dataset, features, className, n_classes);
|
||||
model.initialize();
|
||||
buildModel();
|
||||
trainModel();
|
||||
buildModel(weights);
|
||||
trainModel(weights);
|
||||
fitted = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
void Classifier::buildDataset(Tensor& ytmp)
|
||||
{
|
||||
try {
|
||||
@@ -35,19 +34,20 @@ namespace bayesnet {
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
void Classifier::trainModel()
|
||||
void Classifier::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
model.fit(dataset, features, className, states);
|
||||
model.fit(dataset, weights, features, className, states);
|
||||
}
|
||||
// X is nxm where n is the number of features and m the number of samples
|
||||
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states)
|
||||
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states)
|
||||
{
|
||||
dataset = X;
|
||||
buildDataset(y);
|
||||
return build(features, className, states);
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
// X is nxm where n is the number of features and m the number of samples
|
||||
Classifier& Classifier::fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states)
|
||||
Classifier& Classifier::fit(vector<vector<int>>& X, vector<int>& y, const vector<string>& features, const string& className, map<string, vector<int>>& states)
|
||||
{
|
||||
dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, kInt32);
|
||||
for (int i = 0; i < X.size(); ++i) {
|
||||
@@ -55,17 +55,27 @@ namespace bayesnet {
|
||||
}
|
||||
auto ytmp = torch::tensor(y, kInt32);
|
||||
buildDataset(ytmp);
|
||||
return build(features, className, states);
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
Classifier& Classifier::fit(torch::Tensor& dataset, vector<string>& features, string className, map<string, vector<int>>& states)
|
||||
Classifier& Classifier::fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states)
|
||||
{
|
||||
this->dataset = dataset;
|
||||
return build(features, className, states);
|
||||
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
Classifier& Classifier::fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
this->dataset = dataset;
|
||||
return build(features, className, states, weights);
|
||||
}
|
||||
void Classifier::checkFitParameters()
|
||||
{
|
||||
if (torch::is_floating_point(dataset)) {
|
||||
throw invalid_argument("dataset (X, y) must be of type Integer");
|
||||
}
|
||||
if (n != features.size()) {
|
||||
throw invalid_argument("X " + to_string(n) + " and features " + to_string(features.size()) + " must have the same number of features");
|
||||
throw invalid_argument("Classifier: X " + to_string(n) + " and features " + to_string(features.size()) + " must have the same number of features");
|
||||
}
|
||||
if (states.find(className) == states.end()) {
|
||||
throw invalid_argument("className not found in states");
|
||||
@@ -145,4 +155,18 @@ namespace bayesnet {
|
||||
{
|
||||
model.dump_cpt();
|
||||
}
|
||||
void Classifier::checkHyperparameters(const vector<string>& validKeys, nlohmann::json& hyperparameters)
|
||||
{
|
||||
for (const auto& item : hyperparameters.items()) {
|
||||
if (find(validKeys.begin(), validKeys.end(), item.key()) == validKeys.end()) {
|
||||
throw invalid_argument("Hyperparameter " + item.key() + " is not valid");
|
||||
}
|
||||
}
|
||||
}
|
||||
void Classifier::setHyperparameters(nlohmann::json& hyperparameters)
|
||||
{
|
||||
// Check if hyperparameters are valid, default is no hyperparameters
|
||||
const vector<string> validKeys = { };
|
||||
checkHyperparameters(validKeys, hyperparameters);
|
||||
}
|
||||
}
|
@@ -10,37 +10,42 @@ using namespace torch;
|
||||
namespace bayesnet {
|
||||
class Classifier : public BaseClassifier {
|
||||
private:
|
||||
void buildDataset(torch::Tensor& y);
|
||||
Classifier& build(vector<string>& features, string className, map<string, vector<int>>& states);
|
||||
Classifier& build(const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights);
|
||||
protected:
|
||||
bool fitted;
|
||||
Network model;
|
||||
int m, n; // m: number of samples, n: number of features
|
||||
Tensor dataset; // (n+1)xm tensor
|
||||
Network model;
|
||||
Metrics metrics;
|
||||
vector<string> features;
|
||||
string className;
|
||||
map<string, vector<int>> states;
|
||||
Tensor dataset; // (n+1)xm tensor
|
||||
status_t status = NORMAL;
|
||||
void checkFitParameters();
|
||||
virtual void buildModel() = 0;
|
||||
void trainModel() override;
|
||||
virtual void buildModel(const torch::Tensor& weights) = 0;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
void checkHyperparameters(const vector<string>& validKeys, nlohmann::json& hyperparameters);
|
||||
void buildDataset(torch::Tensor& y);
|
||||
public:
|
||||
Classifier(Network model);
|
||||
virtual ~Classifier() = default;
|
||||
Classifier& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& dataset, vector<string>& features, string className, map<string, vector<int>>& states) override;
|
||||
Classifier& fit(vector<vector<int>>& X, vector<int>& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
|
||||
Classifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights) override;
|
||||
void addNodes();
|
||||
int getNumberOfNodes() const override;
|
||||
int getNumberOfEdges() const override;
|
||||
int getNumberOfStates() const override;
|
||||
Tensor predict(Tensor& X) override;
|
||||
status_t getStatus() const override { return status; }
|
||||
vector<int> predict(vector<vector<int>>& X) override;
|
||||
float score(Tensor& X, Tensor& y) override;
|
||||
float score(vector<vector<int>>& X, vector<int>& y) override;
|
||||
vector<string> show() const override;
|
||||
vector<string> topological_order() override;
|
||||
void dump_cpt() const override;
|
||||
void setHyperparameters(nlohmann::json& hyperparameters) override;
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
@@ -3,9 +3,9 @@
|
||||
namespace bayesnet {
|
||||
using namespace torch;
|
||||
|
||||
Ensemble::Ensemble() : Classifier(Network()) {}
|
||||
Ensemble::Ensemble() : Classifier(Network()), n_models(0) {}
|
||||
|
||||
void Ensemble::trainModel()
|
||||
void Ensemble::trainModel(const torch::Tensor& weights)
|
||||
{
|
||||
n_models = models.size();
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
@@ -17,10 +17,14 @@ namespace bayesnet {
|
||||
{
|
||||
auto y_pred_ = y_pred.accessor<int, 2>();
|
||||
vector<int> y_pred_final;
|
||||
int numClasses = states.at(className).size();
|
||||
// y_pred is m x n_models with the prediction of every model for each sample
|
||||
for (int i = 0; i < y_pred.size(0); ++i) {
|
||||
vector<float> votes(y_pred.size(1), 0);
|
||||
for (int j = 0; j < y_pred.size(1); ++j) {
|
||||
votes[y_pred_[i][j]] += 1;
|
||||
// votes store in each index (value of class) the significance added by each model
|
||||
// i.e. votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions
|
||||
vector<double> votes(numClasses, 0.0);
|
||||
for (int j = 0; j < n_models; ++j) {
|
||||
votes[y_pred_[i][j]] += significanceModels.at(j);
|
||||
}
|
||||
// argsort in descending order
|
||||
auto indices = argsort(votes);
|
||||
@@ -34,7 +38,6 @@ namespace bayesnet {
|
||||
throw logic_error("Ensemble has not been fitted");
|
||||
}
|
||||
Tensor y_pred = torch::zeros({ X.size(1), n_models }, kInt32);
|
||||
//Create a threadpool
|
||||
auto threads{ vector<thread>() };
|
||||
mutex mtx;
|
||||
for (auto i = 0; i < n_models; ++i) {
|
||||
|
@@ -14,7 +14,8 @@ namespace bayesnet {
|
||||
protected:
|
||||
unsigned n_models;
|
||||
vector<unique_ptr<Classifier>> models;
|
||||
void trainModel() override;
|
||||
vector<double> significanceModels;
|
||||
void trainModel(const torch::Tensor& weights) override;
|
||||
vector<int> voting(Tensor& y_pred);
|
||||
public:
|
||||
Ensemble();
|
||||
|
@@ -4,7 +4,19 @@ namespace bayesnet {
|
||||
using namespace torch;
|
||||
|
||||
KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta) {}
|
||||
void KDB::buildModel()
|
||||
void KDB::setHyperparameters(nlohmann::json& hyperparameters)
|
||||
{
|
||||
// Check if hyperparameters are valid
|
||||
const vector<string> validKeys = { "k", "theta" };
|
||||
checkHyperparameters(validKeys, hyperparameters);
|
||||
if (hyperparameters.contains("k")) {
|
||||
k = hyperparameters["k"];
|
||||
}
|
||||
if (hyperparameters.contains("theta")) {
|
||||
theta = hyperparameters["theta"];
|
||||
}
|
||||
}
|
||||
void KDB::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
/*
|
||||
1. For each feature Xi, compute mutual information, I(X;C),
|
||||
@@ -29,13 +41,13 @@ namespace bayesnet {
|
||||
// where C is the class.
|
||||
addNodes();
|
||||
const Tensor& y = dataset.index({ -1, "..." });
|
||||
vector <float> mi;
|
||||
vector<double> mi;
|
||||
for (auto i = 0; i < features.size(); i++) {
|
||||
Tensor firstFeature = dataset.index({ i, "..." });
|
||||
mi.push_back(metrics.mutualInformation(firstFeature, y));
|
||||
mi.push_back(metrics.mutualInformation(firstFeature, y, weights));
|
||||
}
|
||||
// 2. Compute class conditional mutual information I(Xi;XjIC), f or each
|
||||
auto conditionalEdgeWeights = metrics.conditionalEdge();
|
||||
auto conditionalEdgeWeights = metrics.conditionalEdge(weights);
|
||||
// 3. Let the used variable list, S, be empty.
|
||||
vector<int> S;
|
||||
// 4. Let the DAG network being constructed, BN, begin with a single
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#ifndef KDB_H
|
||||
#define KDB_H
|
||||
#include <torch/torch.h>
|
||||
#include "Classifier.h"
|
||||
#include "bayesnetUtils.h"
|
||||
namespace bayesnet {
|
||||
@@ -11,10 +12,11 @@ namespace bayesnet {
|
||||
float theta;
|
||||
void add_m_edges(int idx, vector<int>& S, Tensor& weights);
|
||||
protected:
|
||||
void buildModel() override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
explicit KDB(int k, float theta = 0.03);
|
||||
virtual ~KDB() {};
|
||||
void setHyperparameters(nlohmann::json& hyperparameters) override;
|
||||
vector<string> graph(const string& name = "KDB") const override;
|
||||
};
|
||||
}
|
||||
|
@@ -3,9 +3,9 @@
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}
|
||||
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
|
||||
{
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
|
@@ -10,7 +10,7 @@ namespace bayesnet {
|
||||
public:
|
||||
explicit KDBLd(int k);
|
||||
virtual ~KDBLd() = default;
|
||||
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
|
||||
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
|
||||
vector<string> graph(const string& name = "KDB") const override;
|
||||
Tensor predict(Tensor& X) override;
|
||||
static inline string version() { return "0.0.1"; };
|
||||
|
@@ -3,9 +3,8 @@
|
||||
#include "Network.h"
|
||||
#include "bayesnetUtils.h"
|
||||
namespace bayesnet {
|
||||
Network::Network() : features(vector<string>()), className(""), classNumStates(0), fitted(false) {}
|
||||
Network::Network(float maxT) : features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
|
||||
Network::Network(float maxT, int smoothing) : laplaceSmoothing(smoothing), features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
|
||||
Network::Network() : features(vector<string>()), className(""), classNumStates(0), fitted(false), laplaceSmoothing(0) {}
|
||||
Network::Network(float maxT) : features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false), laplaceSmoothing(0) {}
|
||||
Network::Network(Network& other) : laplaceSmoothing(other.laplaceSmoothing), features(other.features), className(other.className), classNumStates(other.getClassNumStates()), maxThreads(other.
|
||||
getmaxThreads()), fitted(other.fitted)
|
||||
{
|
||||
@@ -104,8 +103,11 @@ namespace bayesnet {
|
||||
{
|
||||
return nodes;
|
||||
}
|
||||
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
if (weights.size(0) != n_samples) {
|
||||
throw invalid_argument("Weights (" + to_string(weights.size(0)) + ") must have the same number of elements as samples (" + to_string(n_samples) + ") in Network::fit");
|
||||
}
|
||||
if (n_samples != n_samples_y) {
|
||||
throw invalid_argument("X and y must have the same number of samples in Network::fit (" + to_string(n_samples) + " != " + to_string(n_samples_y) + ")");
|
||||
}
|
||||
@@ -130,34 +132,35 @@ namespace bayesnet {
|
||||
void Network::setStates(const map<string, vector<int>>& states)
|
||||
{
|
||||
// Set states to every Node in the network
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
nodes[features[i]]->setNumStates(states.at(features[i]).size());
|
||||
}
|
||||
classNumStates = nodes[className]->getNumStates();
|
||||
for_each(features.begin(), features.end(), [this, &states](const string& feature) {
|
||||
nodes.at(feature)->setNumStates(states.at(feature).size());
|
||||
});
|
||||
classNumStates = nodes.at(className)->getNumStates();
|
||||
}
|
||||
// X comes in nxm, where n is the number of features and m the number of samples
|
||||
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
{
|
||||
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states);
|
||||
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
Tensor ytmp = torch::transpose(y.view({ y.size(0), 1 }), 0, 1);
|
||||
samples = torch::cat({ X , ytmp }, 0);
|
||||
for (int i = 0; i < featureNames.size(); ++i) {
|
||||
auto row_feature = X.index({ i, "..." });
|
||||
}
|
||||
completeFit(states);
|
||||
completeFit(states, weights);
|
||||
}
|
||||
void Network::fit(const torch::Tensor& samples, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
{
|
||||
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states);
|
||||
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
this->samples = samples;
|
||||
completeFit(states);
|
||||
completeFit(states, weights);
|
||||
}
|
||||
// input_data comes in nxm, where n is the number of features and m the number of samples
|
||||
void Network::fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
void Network::fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<float>& weights_, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
|
||||
{
|
||||
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states);
|
||||
const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64);
|
||||
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights);
|
||||
this->className = className;
|
||||
// Build tensor of samples (nxm) (n+1 because of the class)
|
||||
samples = torch::zeros({ static_cast<int>(input_data.size() + 1), static_cast<int>(input_data[0].size()) }, torch::kInt32);
|
||||
@@ -165,42 +168,17 @@ namespace bayesnet {
|
||||
samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32));
|
||||
}
|
||||
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32));
|
||||
completeFit(states);
|
||||
completeFit(states, weights);
|
||||
}
|
||||
void Network::completeFit(const map<string, vector<int>>& states)
|
||||
void Network::completeFit(const map<string, vector<int>>& states, const torch::Tensor& weights)
|
||||
{
|
||||
setStates(states);
|
||||
int maxThreadsRunning = static_cast<int>(std::thread::hardware_concurrency() * maxThreads);
|
||||
if (maxThreadsRunning < 1) {
|
||||
maxThreadsRunning = 1;
|
||||
}
|
||||
laplaceSmoothing = 1.0 / samples.size(1); // To use in CPT computation
|
||||
vector<thread> threads;
|
||||
mutex mtx;
|
||||
condition_variable cv;
|
||||
int activeThreads = 0;
|
||||
int nextNodeIndex = 0;
|
||||
while (nextNodeIndex < nodes.size()) {
|
||||
unique_lock<mutex> lock(mtx);
|
||||
cv.wait(lock, [&activeThreads, &maxThreadsRunning]() { return activeThreads < maxThreadsRunning; });
|
||||
threads.emplace_back([this, &nextNodeIndex, &mtx, &cv, &activeThreads]() {
|
||||
while (true) {
|
||||
unique_lock<mutex> lock(mtx);
|
||||
if (nextNodeIndex >= nodes.size()) {
|
||||
break; // No more work remaining
|
||||
}
|
||||
auto& pair = *std::next(nodes.begin(), nextNodeIndex);
|
||||
++nextNodeIndex;
|
||||
lock.unlock();
|
||||
pair.second->computeCPT(samples, features, laplaceSmoothing);
|
||||
lock.lock();
|
||||
nodes[pair.first] = std::move(pair.second);
|
||||
lock.unlock();
|
||||
}
|
||||
lock_guard<mutex> lock(mtx);
|
||||
--activeThreads;
|
||||
cv.notify_one();
|
||||
for (auto& node : nodes) {
|
||||
threads.emplace_back([this, &node, &weights]() {
|
||||
node.second->computeCPT(samples, features, laplaceSmoothing, weights);
|
||||
});
|
||||
++activeThreads;
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
@@ -343,7 +321,7 @@ namespace bayesnet {
|
||||
}
|
||||
// Normalize result
|
||||
double sum = accumulate(result.begin(), result.end(), 0.0);
|
||||
transform(result.begin(), result.end(), result.begin(), [sum](double& value) { return value / sum; });
|
||||
transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; });
|
||||
return result;
|
||||
}
|
||||
vector<string> Network::show() const
|
||||
@@ -395,7 +373,6 @@ namespace bayesnet {
|
||||
auto result = features;
|
||||
result.erase(remove(result.begin(), result.end(), className), result.end());
|
||||
bool ending{ false };
|
||||
int idx = 0;
|
||||
while (!ending) {
|
||||
ending = true;
|
||||
for (auto feature : features) {
|
||||
@@ -431,6 +408,7 @@ namespace bayesnet {
|
||||
{
|
||||
for (auto& node : nodes) {
|
||||
cout << "* " << node.first << ": (" << node.second->getNumStates() << ") : " << node.second->getCPT().sizes() << endl;
|
||||
cout << node.second->getCPT() << endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -13,21 +13,21 @@ namespace bayesnet {
|
||||
int classNumStates;
|
||||
vector<string> features; // Including classname
|
||||
string className;
|
||||
int laplaceSmoothing = 1;
|
||||
double laplaceSmoothing;
|
||||
torch::Tensor samples; // nxm tensor used to fit the model
|
||||
bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&);
|
||||
vector<double> predict_sample(const vector<int>&);
|
||||
vector<double> predict_sample(const torch::Tensor&);
|
||||
vector<double> exactInference(map<string, int>&);
|
||||
double computeFactor(map<string, int>&);
|
||||
void completeFit(const map<string, vector<int>>&);
|
||||
void checkFitData(int n_features, int n_samples, int n_samples_y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>&);
|
||||
void completeFit(const map<string, vector<int>>& states, const torch::Tensor& weights);
|
||||
void checkFitData(int n_features, int n_samples, int n_samples_y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states, const torch::Tensor& weights);
|
||||
void setStates(const map<string, vector<int>>&);
|
||||
public:
|
||||
Network();
|
||||
explicit Network(float, int);
|
||||
explicit Network(float);
|
||||
explicit Network(Network&);
|
||||
~Network() = default;
|
||||
torch::Tensor& getSamples();
|
||||
float getmaxThreads();
|
||||
void addNode(const string&);
|
||||
@@ -39,9 +39,9 @@ namespace bayesnet {
|
||||
int getNumEdges() const;
|
||||
int getClassNumStates() const;
|
||||
string getClassName() const;
|
||||
void fit(const vector<vector<int>>&, const vector<int>&, const vector<string>&, const string&, const map<string, vector<int>>&);
|
||||
void fit(const torch::Tensor&, const torch::Tensor&, const vector<string>&, const string&, const map<string, vector<int>>&);
|
||||
void fit(const torch::Tensor&, const vector<string>&, const string&, const map<string, vector<int>>&);
|
||||
void fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<float>& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states);
|
||||
void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states);
|
||||
void fit(const torch::Tensor& samples, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states);
|
||||
vector<int> predict(const vector<vector<int>>&); // Return mx1 vector of predictions
|
||||
torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions
|
||||
torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba);
|
||||
@@ -53,7 +53,7 @@ namespace bayesnet {
|
||||
vector<string> graph(const string& title) const; // Returns a vector of strings representing the graph in graphviz format
|
||||
void initialize();
|
||||
void dump_cpt() const;
|
||||
inline string version() { return "0.1.0"; }
|
||||
inline string version() { return "0.2.0"; }
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -84,7 +84,7 @@ namespace bayesnet {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
void Node::computeCPT(const torch::Tensor& dataset, const vector<string>& features, const int laplaceSmoothing)
|
||||
void Node::computeCPT(const torch::Tensor& dataset, const vector<string>& features, const double laplaceSmoothing, const torch::Tensor& weights)
|
||||
{
|
||||
dimensions.clear();
|
||||
// Get dimensions of the CPT
|
||||
@@ -100,7 +100,7 @@ namespace bayesnet {
|
||||
}
|
||||
int name_index = pos - features.begin();
|
||||
for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) {
|
||||
torch::List<c10::optional<torch::Tensor>> coordinates;
|
||||
c10::List<c10::optional<at::Tensor>> coordinates;
|
||||
coordinates.push_back(dataset.index({ name_index, n_sample }));
|
||||
for (auto parent : parents) {
|
||||
pos = find(features.begin(), features.end(), parent->getName());
|
||||
@@ -111,17 +111,17 @@ namespace bayesnet {
|
||||
coordinates.push_back(dataset.index({ parent_index, n_sample }));
|
||||
}
|
||||
// Increment the count of the corresponding coordinate
|
||||
cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + 1);
|
||||
cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + weights.index({ n_sample }).item<double>());
|
||||
}
|
||||
// Normalize the counts
|
||||
cpTable = cpTable / cpTable.sum(0);
|
||||
}
|
||||
float Node::getFactorValue(map<string, int>& evidence)
|
||||
{
|
||||
torch::List<c10::optional<torch::Tensor>> coordinates;
|
||||
c10::List<c10::optional<at::Tensor>> coordinates;
|
||||
// following predetermined order of indices in the cpTable (see Node.h)
|
||||
coordinates.push_back(torch::tensor(evidence[name]));
|
||||
transform(parents.begin(), parents.end(), back_inserter(coordinates), [&evidence](const auto& parent) { return torch::tensor(evidence[parent->getName()]); });
|
||||
coordinates.push_back(at::tensor(evidence[name]));
|
||||
transform(parents.begin(), parents.end(), back_inserter(coordinates), [&evidence](const auto& parent) { return at::tensor(evidence[parent->getName()]); });
|
||||
return cpTable.index({ coordinates }).item<float>();
|
||||
}
|
||||
vector<string> Node::graph(const string& className)
|
||||
|
@@ -26,7 +26,7 @@ namespace bayesnet {
|
||||
vector<Node*>& getParents();
|
||||
vector<Node*>& getChildren();
|
||||
torch::Tensor& getCPT();
|
||||
void computeCPT(const torch::Tensor&, const vector<string>&, const int);
|
||||
void computeCPT(const torch::Tensor& dataset, const vector<string>& features, const double laplaceSmoothing, const torch::Tensor& weights);
|
||||
int getNumStates() const;
|
||||
void setNumStates(int);
|
||||
unsigned minFill();
|
||||
|
@@ -9,6 +9,15 @@ namespace bayesnet {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
|
||||
{
|
||||
if (!torch::is_floating_point(X)) {
|
||||
throw std::invalid_argument("X must be a floating point tensor");
|
||||
}
|
||||
if (torch::is_floating_point(y)) {
|
||||
throw std::invalid_argument("y must be an integer tensor");
|
||||
}
|
||||
}
|
||||
map<string, vector<int>> Proposal::localDiscretizationProposal(const map<string, vector<int>>& oldStates, Network& model)
|
||||
{
|
||||
// order of local discretization is important. no good 0, 1, 2...
|
||||
@@ -44,15 +53,6 @@ namespace bayesnet {
|
||||
auto xvf_ptr = Xf.index({ index }).data_ptr<float>();
|
||||
auto xvf = vector<mdlp::precision_t>(xvf_ptr, xvf_ptr + Xf.size(1));
|
||||
discretizers[feature]->fit(xvf, yxv);
|
||||
//
|
||||
//
|
||||
//
|
||||
// auto tmp = discretizers[feature]->transform(xvf);
|
||||
// Xv[index] = tmp;
|
||||
// auto xStates = vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
|
||||
// iota(xStates.begin(), xStates.end(), 0);
|
||||
// //Update new states of the feature/node
|
||||
// states[feature] = xStates;
|
||||
}
|
||||
if (upgrade) {
|
||||
// Discretize again X (only the affected indices) with the new fitted discretizers
|
||||
@@ -65,7 +65,8 @@ namespace bayesnet {
|
||||
//Update new states of the feature/node
|
||||
states[pFeatures[index]] = xStates;
|
||||
}
|
||||
model.fit(pDataset, pFeatures, pClassName, states);
|
||||
const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);
|
||||
model.fit(pDataset, weights, pFeatures, pClassName, states);
|
||||
}
|
||||
return states;
|
||||
}
|
||||
|
@@ -13,6 +13,7 @@ namespace bayesnet {
|
||||
Proposal(torch::Tensor& pDataset, vector<string>& features_, string& className_);
|
||||
virtual ~Proposal();
|
||||
protected:
|
||||
void checkInput(const torch::Tensor& X, const torch::Tensor& y);
|
||||
torch::Tensor prepareX(torch::Tensor& X);
|
||||
map<string, vector<int>> localDiscretizationProposal(const map<string, vector<int>>& states, Network& model);
|
||||
map<string, vector<int>> fit_local_discretization(const torch::Tensor& y);
|
||||
|
@@ -4,7 +4,7 @@ namespace bayesnet {
|
||||
|
||||
SPODE::SPODE(int root) : Classifier(Network()), root(root) {}
|
||||
|
||||
void SPODE::buildModel()
|
||||
void SPODE::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
|
@@ -7,7 +7,7 @@ namespace bayesnet {
|
||||
private:
|
||||
int root;
|
||||
protected:
|
||||
void buildModel() override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
explicit SPODE(int root);
|
||||
virtual ~SPODE() {};
|
||||
|
@@ -3,9 +3,9 @@
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
|
||||
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
|
||||
{
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
@@ -18,12 +18,13 @@ namespace bayesnet {
|
||||
states = localDiscretizationProposal(states, model);
|
||||
return *this;
|
||||
}
|
||||
SPODELd& SPODELd::fit(torch::Tensor& dataset, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
SPODELd& SPODELd::fit(torch::Tensor& dataset, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
|
||||
{
|
||||
if (!torch::is_floating_point(dataset)) {
|
||||
throw std::runtime_error("Dataset must be a floating point tensor");
|
||||
}
|
||||
Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone();
|
||||
cout << "Xf " << Xf.sizes() << " dtype: " << Xf.dtype() << endl;
|
||||
y = dataset.index({ -1, "..." }).clone();
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
features = features_;
|
||||
className = className_;
|
||||
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
|
||||
|
@@ -9,8 +9,8 @@ namespace bayesnet {
|
||||
public:
|
||||
explicit SPODELd(int root);
|
||||
virtual ~SPODELd() = default;
|
||||
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
|
||||
SPODELd& fit(torch::Tensor& dataset, vector<string>& features, string className, map<string, vector<int>>& states) override;
|
||||
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
|
||||
SPODELd& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
|
||||
vector<string> graph(const string& name = "SPODE") const override;
|
||||
Tensor predict(Tensor& X) override;
|
||||
static inline string version() { return "0.0.1"; };
|
||||
|
@@ -5,7 +5,7 @@ namespace bayesnet {
|
||||
|
||||
TAN::TAN() : Classifier(Network()) {}
|
||||
|
||||
void TAN::buildModel()
|
||||
void TAN::buildModel(const torch::Tensor& weights)
|
||||
{
|
||||
// 0. Add all nodes to the model
|
||||
addNodes();
|
||||
@@ -15,15 +15,15 @@ namespace bayesnet {
|
||||
Tensor class_dataset = dataset.index({ -1, "..." });
|
||||
for (int i = 0; i < static_cast<int>(features.size()); ++i) {
|
||||
Tensor feature_dataset = dataset.index({ i, "..." });
|
||||
auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset);
|
||||
auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset, weights);
|
||||
mi.push_back({ i, mi_value });
|
||||
}
|
||||
sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;});
|
||||
auto root = mi[mi.size() - 1].first;
|
||||
// 2. Compute mutual information between each feature and the class
|
||||
auto weights = metrics.conditionalEdge();
|
||||
auto weights_matrix = metrics.conditionalEdge(weights);
|
||||
// 3. Compute the maximum spanning tree
|
||||
auto mst = metrics.maximumSpanningTree(features, weights, root);
|
||||
auto mst = metrics.maximumSpanningTree(features, weights_matrix, root);
|
||||
// 4. Add edges from the maximum spanning tree to the model
|
||||
for (auto i = 0; i < mst.size(); ++i) {
|
||||
auto [from, to] = mst[i];
|
||||
|
@@ -3,11 +3,10 @@
|
||||
#include "Classifier.h"
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
using namespace torch;
|
||||
class TAN : public Classifier {
|
||||
private:
|
||||
protected:
|
||||
void buildModel() override;
|
||||
void buildModel(const torch::Tensor& weights) override;
|
||||
public:
|
||||
TAN();
|
||||
virtual ~TAN() {};
|
||||
|
@@ -3,9 +3,9 @@
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}
|
||||
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, vector<string>& features_, string className_, map<string, vector<int>>& states_)
|
||||
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
|
||||
{
|
||||
// This first part should go in a Classifier method called fit_local_discretization o fit_float...
|
||||
checkInput(X_, y_);
|
||||
features = features_;
|
||||
className = className_;
|
||||
Xf = X_;
|
||||
|
@@ -10,7 +10,7 @@ namespace bayesnet {
|
||||
public:
|
||||
TANLd();
|
||||
virtual ~TANLd() = default;
|
||||
TANLd& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
|
||||
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
|
||||
vector<string> graph(const string& name = "TAN") const override;
|
||||
Tensor predict(Tensor& X) override;
|
||||
static inline string version() { return "0.0.1"; };
|
||||
|
@@ -4,7 +4,7 @@ namespace bayesnet {
|
||||
using namespace std;
|
||||
using namespace torch;
|
||||
// Return the indices in descending order
|
||||
vector<int> argsort(vector<float>& nums)
|
||||
vector<int> argsort(vector<double>& nums)
|
||||
{
|
||||
int n = nums.size();
|
||||
vector<int> indices(n);
|
||||
|
@@ -5,7 +5,7 @@
|
||||
namespace bayesnet {
|
||||
using namespace std;
|
||||
using namespace torch;
|
||||
vector<int> argsort(vector<float>& nums);
|
||||
vector<int> argsort(vector<double>& nums);
|
||||
vector<vector<int>> tensorToVector(Tensor& tensor);
|
||||
}
|
||||
#endif //BAYESNET_UTILS_H
|
305
src/Platform/BestResults.cc
Normal file
305
src/Platform/BestResults.cc
Normal file
@@ -0,0 +1,305 @@
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <set>
|
||||
#include "BestResults.h"
|
||||
#include "Result.h"
|
||||
#include "Colors.h"
|
||||
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
// function ftime_to_string, Code taken from
|
||||
// https://stackoverflow.com/a/58237530/1389271
|
||||
template <typename TP>
|
||||
std::string ftime_to_string(TP tp)
|
||||
{
|
||||
using namespace std::chrono;
|
||||
auto sctp = time_point_cast<system_clock::duration>(tp - TP::clock::now()
|
||||
+ system_clock::now());
|
||||
auto tt = system_clock::to_time_t(sctp);
|
||||
std::tm* gmt = std::gmtime(&tt);
|
||||
std::stringstream buffer;
|
||||
buffer << std::put_time(gmt, "%Y-%m-%d %H:%M");
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
namespace platform {
|
||||
|
||||
string BestResults::build()
|
||||
{
|
||||
auto files = loadResultFiles();
|
||||
if (files.size() == 0) {
|
||||
cerr << Colors::MAGENTA() << "No result files were found!" << Colors::RESET() << endl;
|
||||
exit(1);
|
||||
}
|
||||
json bests;
|
||||
for (const auto& file : files) {
|
||||
auto result = Result(path, file);
|
||||
auto data = result.load();
|
||||
for (auto const& item : data.at("results")) {
|
||||
bool update = false;
|
||||
if (bests.contains(item.at("dataset").get<string>())) {
|
||||
if (item.at("score").get<double>() > bests[item.at("dataset").get<string>()].at(0).get<double>()) {
|
||||
update = true;
|
||||
}
|
||||
} else {
|
||||
update = true;
|
||||
}
|
||||
if (update) {
|
||||
bests[item.at("dataset").get<string>()] = { item.at("score").get<double>(), item.at("hyperparameters"), file };
|
||||
}
|
||||
}
|
||||
}
|
||||
string bestFileName = path + bestResultFile();
|
||||
if (FILE* fileTest = fopen(bestFileName.c_str(), "r")) {
|
||||
fclose(fileTest);
|
||||
cout << Colors::MAGENTA() << "File " << bestFileName << " already exists and it shall be overwritten." << Colors::RESET() << endl;
|
||||
}
|
||||
ofstream file(bestFileName);
|
||||
file << bests;
|
||||
file.close();
|
||||
return bestFileName;
|
||||
}
|
||||
|
||||
string BestResults::bestResultFile()
|
||||
{
|
||||
return "best_results_" + score + "_" + model + ".json";
|
||||
}
|
||||
|
||||
pair<string, string> getModelScore(string name)
|
||||
{
|
||||
// results_accuracy_BoostAODE_MacBookpro16_2023-09-06_12:27:00_1.json
|
||||
int i = 0;
|
||||
auto pos = name.find("_");
|
||||
auto pos2 = name.find("_", pos + 1);
|
||||
string score = name.substr(pos + 1, pos2 - pos - 1);
|
||||
pos = name.find("_", pos2 + 1);
|
||||
string model = name.substr(pos2 + 1, pos - pos2 - 1);
|
||||
return { model, score };
|
||||
}
|
||||
|
||||
vector<string> BestResults::loadResultFiles()
|
||||
{
|
||||
vector<string> files;
|
||||
using std::filesystem::directory_iterator;
|
||||
string fileModel, fileScore;
|
||||
for (const auto& file : directory_iterator(path)) {
|
||||
auto fileName = file.path().filename().string();
|
||||
if (fileName.find(".json") != string::npos && fileName.find("results_") == 0) {
|
||||
tie(fileModel, fileScore) = getModelScore(fileName);
|
||||
if (score == fileScore && (model == fileModel || model == "any")) {
|
||||
files.push_back(fileName);
|
||||
}
|
||||
}
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
json BestResults::loadFile(const string& fileName)
|
||||
{
|
||||
ifstream resultData(fileName);
|
||||
if (resultData.is_open()) {
|
||||
json data = json::parse(resultData);
|
||||
return data;
|
||||
}
|
||||
throw invalid_argument("Unable to open result file. [" + fileName + "]");
|
||||
}
|
||||
set<string> BestResults::getModels()
|
||||
{
|
||||
set<string> models;
|
||||
auto files = loadResultFiles();
|
||||
if (files.size() == 0) {
|
||||
cerr << Colors::MAGENTA() << "No result files were found!" << Colors::RESET() << endl;
|
||||
exit(1);
|
||||
}
|
||||
string fileModel, fileScore;
|
||||
for (const auto& file : files) {
|
||||
// extract the model from the file name
|
||||
tie(fileModel, fileScore) = getModelScore(file);
|
||||
// add the model to the vector of models
|
||||
models.insert(fileModel);
|
||||
}
|
||||
return models;
|
||||
}
|
||||
|
||||
void BestResults::buildAll()
|
||||
{
|
||||
auto models = getModels();
|
||||
for (const auto& model : models) {
|
||||
cout << "Building best results for model: " << model << endl;
|
||||
this->model = model;
|
||||
build();
|
||||
}
|
||||
model = "any";
|
||||
}
|
||||
|
||||
void BestResults::reportSingle()
|
||||
{
|
||||
string bestFileName = path + bestResultFile();
|
||||
if (FILE* fileTest = fopen(bestFileName.c_str(), "r")) {
|
||||
fclose(fileTest);
|
||||
} else {
|
||||
cerr << Colors::MAGENTA() << "File " << bestFileName << " doesn't exist." << Colors::RESET() << endl;
|
||||
exit(1);
|
||||
}
|
||||
auto date = ftime_to_string(filesystem::last_write_time(bestFileName));
|
||||
auto data = loadFile(bestFileName);
|
||||
cout << Colors::GREEN() << "Best results for " << model << " and " << score << " as of " << date << endl;
|
||||
cout << "--------------------------------------------------------" << endl;
|
||||
cout << Colors::GREEN() << " # Dataset Score File Hyperparameters" << endl;
|
||||
cout << "=== ========================= =========== ================================================================== ================================================= " << endl;
|
||||
auto i = 0;
|
||||
bool odd = true;
|
||||
for (auto const& item : data.items()) {
|
||||
auto color = odd ? Colors::BLUE() : Colors::CYAN();
|
||||
cout << color << setw(3) << fixed << right << i++ << " ";
|
||||
cout << setw(25) << left << item.key() << " ";
|
||||
cout << setw(11) << setprecision(9) << fixed << item.value().at(0).get<double>() << " ";
|
||||
cout << setw(66) << item.value().at(2).get<string>() << " ";
|
||||
cout << item.value().at(1) << " ";
|
||||
cout << endl;
|
||||
odd = !odd;
|
||||
}
|
||||
}
|
||||
json BestResults::buildTableResults(set<string> models)
|
||||
{
|
||||
int numberOfDatasets = 0;
|
||||
bool first = true;
|
||||
json origin;
|
||||
json table;
|
||||
auto maxDate = filesystem::file_time_type::max();
|
||||
for (const auto& model : models) {
|
||||
this->model = model;
|
||||
string bestFileName = path + bestResultFile();
|
||||
if (FILE* fileTest = fopen(bestFileName.c_str(), "r")) {
|
||||
fclose(fileTest);
|
||||
} else {
|
||||
cerr << Colors::MAGENTA() << "File " << bestFileName << " doesn't exist." << Colors::RESET() << endl;
|
||||
exit(1);
|
||||
}
|
||||
auto dateWrite = filesystem::last_write_time(bestFileName);
|
||||
if (dateWrite < maxDate) {
|
||||
maxDate = dateWrite;
|
||||
}
|
||||
auto data = loadFile(bestFileName);
|
||||
if (first) {
|
||||
// Get the number of datasets of the first file and check that is the same for all the models
|
||||
first = false;
|
||||
numberOfDatasets = data.size();
|
||||
origin = data;
|
||||
} else {
|
||||
if (numberOfDatasets != data.size()) {
|
||||
cerr << Colors::MAGENTA() << "The number of datasets in the best results files is not the same for all the models." << Colors::RESET() << endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
table[model] = data;
|
||||
}
|
||||
table["dateTable"] = ftime_to_string(maxDate);
|
||||
return table;
|
||||
}
|
||||
void BestResults::printTableResults(set<string> models, json table)
|
||||
{
|
||||
cout << Colors::GREEN() << "Best results for " << score << " as of " << table.at("dateTable").get<string>() << endl;
|
||||
cout << "------------------------------------------------" << endl;
|
||||
cout << Colors::GREEN() << " # Dataset ";
|
||||
for (const auto& model : models) {
|
||||
cout << setw(12) << left << model << " ";
|
||||
}
|
||||
cout << endl;
|
||||
cout << "=== ========================= ";
|
||||
for (const auto& model : models) {
|
||||
cout << "============ ";
|
||||
}
|
||||
cout << endl;
|
||||
auto i = 0;
|
||||
bool odd = true;
|
||||
map<string, double> totals;
|
||||
map<string, int> ranks;
|
||||
for (const auto& model : models) {
|
||||
totals[model] = 0.0;
|
||||
}
|
||||
json origin = table.begin().value();
|
||||
for (auto const& item : origin.items()) {
|
||||
auto color = odd ? Colors::BLUE() : Colors::CYAN();
|
||||
cout << color << setw(3) << fixed << right << i++ << " ";
|
||||
cout << setw(25) << left << item.key() << " ";
|
||||
double maxValue = 0;
|
||||
vector<pair<string, double>> ranksOrder;
|
||||
// Find out the max value for this dataset
|
||||
for (const auto& model : models) {
|
||||
double value = table[model].at(item.key()).at(0).get<double>();
|
||||
if (value > maxValue) {
|
||||
maxValue = value;
|
||||
}
|
||||
ranksOrder.push_back({ model, value });
|
||||
}
|
||||
// sort the ranksOrder vector by value
|
||||
sort(ranksOrder.begin(), ranksOrder.end(), [](const pair<string, double>& a, const pair<string, double>& b) {
|
||||
return a.second > b.second;
|
||||
});
|
||||
// Assign the ranks
|
||||
for (int i = 0; i < ranksOrder.size(); i++) {
|
||||
ranks[ranksOrder[i].first] = i + 1;
|
||||
}
|
||||
// Print the row with red colors on max values
|
||||
for (const auto& model : models) {
|
||||
string efectiveColor = color;
|
||||
double value = table[model].at(item.key()).at(0).get<double>();
|
||||
if (value == maxValue) {
|
||||
efectiveColor = Colors::RED();
|
||||
}
|
||||
totals[model] += value;
|
||||
cout << efectiveColor << setw(12) << setprecision(10) << fixed << value << " ";
|
||||
}
|
||||
cout << endl;
|
||||
odd = !odd;
|
||||
}
|
||||
cout << Colors::GREEN() << "=== ========================= ";
|
||||
for (const auto& model : models) {
|
||||
cout << "============ ";
|
||||
}
|
||||
cout << endl;
|
||||
cout << Colors::GREEN() << setw(30) << " Totals...................";
|
||||
double max = 0.0;
|
||||
for (const auto& total : totals) {
|
||||
if (total.second > max) {
|
||||
max = total.second;
|
||||
}
|
||||
}
|
||||
for (const auto& model : models) {
|
||||
string efectiveColor = Colors::GREEN();
|
||||
if (totals[model] == max) {
|
||||
efectiveColor = Colors::RED();
|
||||
}
|
||||
cout << efectiveColor << setw(12) << setprecision(9) << fixed << totals[model] << " ";
|
||||
}
|
||||
// Output the averaged ranks
|
||||
cout << endl;
|
||||
int min = 1;
|
||||
for (const auto& rank : ranks) {
|
||||
if (rank.second < min) {
|
||||
min = rank.second;
|
||||
}
|
||||
}
|
||||
cout << Colors::GREEN() << setw(30) << " Averaged ranks...........";
|
||||
for (const auto& model : models) {
|
||||
string efectiveColor = Colors::GREEN();
|
||||
if (ranks[model] == min) {
|
||||
efectiveColor = Colors::RED();
|
||||
}
|
||||
cout << efectiveColor << setw(12) << setprecision(10) << fixed << (double)ranks[model] / (double)origin.size() << " ";
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
void BestResults::reportAll()
|
||||
{
|
||||
auto models = getModels();
|
||||
// Build the table of results
|
||||
json table = buildTableResults(models);
|
||||
// Print the table of results
|
||||
printTableResults(models, table);
|
||||
}
|
||||
}
|
28
src/Platform/BestResults.h
Normal file
28
src/Platform/BestResults.h
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef BESTRESULTS_H
|
||||
#define BESTRESULTS_H
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <nlohmann/json.hpp>
|
||||
using namespace std;
|
||||
using json = nlohmann::json;
|
||||
namespace platform {
|
||||
class BestResults {
|
||||
public:
|
||||
explicit BestResults(const string& path, const string& score, const string& model) : path(path), score(score), model(model) {}
|
||||
string build();
|
||||
void reportSingle();
|
||||
void reportAll();
|
||||
void buildAll();
|
||||
private:
|
||||
set<string> getModels();
|
||||
vector<string> loadResultFiles();
|
||||
json buildTableResults(set<string> models);
|
||||
void printTableResults(set<string> models, json table);
|
||||
string bestResultFile();
|
||||
json loadFile(const string& fileName);
|
||||
string path;
|
||||
string score;
|
||||
string model;
|
||||
};
|
||||
}
|
||||
#endif //BESTRESULTS_H
|
10
src/Platform/BestScore.h
Normal file
10
src/Platform/BestScore.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#ifndef BESTSCORE_H
|
||||
#define BESTSCORE_H
|
||||
#include <string>
|
||||
class BestScore {
|
||||
public:
|
||||
static std::string title() { return "STree_default (linear-ovo)"; }
|
||||
static double score() { return 22.109799; }
|
||||
static std::string scoreName() { return "accuracy"; }
|
||||
};
|
||||
#endif
|
@@ -4,5 +4,16 @@ include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
|
||||
add_executable(main main.cc Folding.cc platformUtils.cc Experiment.cc Datasets.cc Models.cc Report.cc)
|
||||
target_link_libraries(main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/libxlsxwriter/include)
|
||||
add_executable(main main.cc Folding.cc platformUtils.cc Experiment.cc Datasets.cc Models.cc ReportConsole.cc ReportBase.cc)
|
||||
add_executable(manage manage.cc Results.cc Result.cc ReportConsole.cc ReportExcel.cc ReportBase.cc Datasets.cc platformUtils.cc)
|
||||
add_executable(list list.cc platformUtils Datasets.cc)
|
||||
add_executable(best best.cc BestResults.cc Result.cc)
|
||||
target_link_libraries(main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
|
||||
if (${CMAKE_HOST_SYSTEM_NAME} MATCHES "Linux")
|
||||
target_link_libraries(manage "${TORCH_LIBRARIES}" libxlsxwriter.so ArffFiles mdlp stdc++fs)
|
||||
target_link_libraries(best stdc++fs)
|
||||
else()
|
||||
target_link_libraries(manage "${TORCH_LIBRARIES}" "${XLSXWRITER_LIB}" ArffFiles mdlp)
|
||||
endif()
|
||||
target_link_libraries(list ArffFiles mdlp "${TORCH_LIBRARIES}")
|
14
src/Platform/Colors.h
Normal file
14
src/Platform/Colors.h
Normal file
@@ -0,0 +1,14 @@
|
||||
#ifndef COLORS_H
|
||||
#define COLORS_H
|
||||
class Colors {
|
||||
public:
|
||||
static std::string MAGENTA() { return "\033[1;35m"; }
|
||||
static std::string BLUE() { return "\033[1;34m"; }
|
||||
static std::string CYAN() { return "\033[1;36m"; }
|
||||
static std::string GREEN() { return "\033[1;32m"; }
|
||||
static std::string YELLOW() { return "\033[1;33m"; }
|
||||
static std::string RED() { return "\033[1;31m"; }
|
||||
static std::string WHITE() { return "\033[1;37m"; }
|
||||
static std::string RESET() { return "\033[0m"; }
|
||||
};
|
||||
#endif // COLORS_H
|
@@ -1,6 +1,7 @@
|
||||
#include "Datasets.h"
|
||||
#include "platformUtils.h"
|
||||
#include "ArffFiles.h"
|
||||
#include <fstream>
|
||||
namespace platform {
|
||||
void Datasets::load()
|
||||
{
|
||||
@@ -24,75 +25,110 @@ namespace platform {
|
||||
transform(datasets.begin(), datasets.end(), back_inserter(result), [](const auto& d) { return d.first; });
|
||||
return result;
|
||||
}
|
||||
vector<string> Datasets::getFeatures(string name)
|
||||
vector<string> Datasets::getFeatures(const string& name) const
|
||||
{
|
||||
if (datasets[name]->isLoaded()) {
|
||||
return datasets[name]->getFeatures();
|
||||
if (datasets.at(name)->isLoaded()) {
|
||||
return datasets.at(name)->getFeatures();
|
||||
} else {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
map<string, vector<int>> Datasets::getStates(string name)
|
||||
map<string, vector<int>> Datasets::getStates(const string& name) const
|
||||
{
|
||||
if (datasets[name]->isLoaded()) {
|
||||
return datasets[name]->getStates();
|
||||
if (datasets.at(name)->isLoaded()) {
|
||||
return datasets.at(name)->getStates();
|
||||
} else {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
string Datasets::getClassName(string name)
|
||||
void Datasets::loadDataset(const string& name) const
|
||||
{
|
||||
if (datasets[name]->isLoaded()) {
|
||||
return datasets[name]->getClassName();
|
||||
if (datasets.at(name)->isLoaded()) {
|
||||
return;
|
||||
} else {
|
||||
datasets.at(name)->load();
|
||||
}
|
||||
}
|
||||
string Datasets::getClassName(const string& name) const
|
||||
{
|
||||
if (datasets.at(name)->isLoaded()) {
|
||||
return datasets.at(name)->getClassName();
|
||||
} else {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
int Datasets::getNSamples(string name)
|
||||
int Datasets::getNSamples(const string& name) const
|
||||
{
|
||||
if (datasets[name]->isLoaded()) {
|
||||
return datasets[name]->getNSamples();
|
||||
if (datasets.at(name)->isLoaded()) {
|
||||
return datasets.at(name)->getNSamples();
|
||||
} else {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
pair<vector<vector<float>>&, vector<int>&> Datasets::getVectors(string name)
|
||||
int Datasets::getNClasses(const string& name)
|
||||
{
|
||||
if (datasets.at(name)->isLoaded()) {
|
||||
auto className = datasets.at(name)->getClassName();
|
||||
if (discretize) {
|
||||
auto states = getStates(name);
|
||||
return states.at(className).size();
|
||||
}
|
||||
auto [Xv, yv] = getVectors(name);
|
||||
return *max_element(yv.begin(), yv.end()) + 1;
|
||||
} else {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
vector<int> Datasets::getClassesCounts(const string& name) const
|
||||
{
|
||||
if (datasets.at(name)->isLoaded()) {
|
||||
auto [Xv, yv] = datasets.at(name)->getVectors();
|
||||
vector<int> counts(*max_element(yv.begin(), yv.end()) + 1);
|
||||
for (auto y : yv) {
|
||||
counts[y]++;
|
||||
}
|
||||
return counts;
|
||||
} else {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
pair<vector<vector<float>>&, vector<int>&> Datasets::getVectors(const string& name)
|
||||
{
|
||||
if (!datasets[name]->isLoaded()) {
|
||||
datasets[name]->load();
|
||||
}
|
||||
return datasets[name]->getVectors();
|
||||
}
|
||||
pair<vector<vector<int>>&, vector<int>&> Datasets::getVectorsDiscretized(string name)
|
||||
pair<vector<vector<int>>&, vector<int>&> Datasets::getVectorsDiscretized(const string& name)
|
||||
{
|
||||
if (!datasets[name]->isLoaded()) {
|
||||
datasets[name]->load();
|
||||
}
|
||||
return datasets[name]->getVectorsDiscretized();
|
||||
}
|
||||
pair<torch::Tensor&, torch::Tensor&> Datasets::getTensors(string name)
|
||||
pair<torch::Tensor&, torch::Tensor&> Datasets::getTensors(const string& name)
|
||||
{
|
||||
if (!datasets[name]->isLoaded()) {
|
||||
datasets[name]->load();
|
||||
}
|
||||
return datasets[name]->getTensors();
|
||||
}
|
||||
bool Datasets::isDataset(const string& name)
|
||||
bool Datasets::isDataset(const string& name) const
|
||||
{
|
||||
return datasets.find(name) != datasets.end();
|
||||
}
|
||||
Dataset::Dataset(const Dataset& dataset) : path(dataset.path), name(dataset.name), className(dataset.className), n_samples(dataset.n_samples), n_features(dataset.n_features), features(dataset.features), states(dataset.states), loaded(dataset.loaded), discretize(dataset.discretize), X(dataset.X), y(dataset.y), Xv(dataset.Xv), Xd(dataset.Xd), yv(dataset.yv), fileType(dataset.fileType)
|
||||
{
|
||||
}
|
||||
string Dataset::getName()
|
||||
string Dataset::getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
string Dataset::getClassName()
|
||||
string Dataset::getClassName() const
|
||||
{
|
||||
return className;
|
||||
}
|
||||
vector<string> Dataset::getFeatures()
|
||||
vector<string> Dataset::getFeatures() const
|
||||
{
|
||||
if (loaded) {
|
||||
return features;
|
||||
@@ -100,7 +136,7 @@ namespace platform {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
int Dataset::getNFeatures()
|
||||
int Dataset::getNFeatures() const
|
||||
{
|
||||
if (loaded) {
|
||||
return n_features;
|
||||
@@ -108,7 +144,7 @@ namespace platform {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
int Dataset::getNSamples()
|
||||
int Dataset::getNSamples() const
|
||||
{
|
||||
if (loaded) {
|
||||
return n_samples;
|
||||
@@ -116,7 +152,7 @@ namespace platform {
|
||||
throw invalid_argument("Dataset not loaded.");
|
||||
}
|
||||
}
|
||||
map<string, vector<int>> Dataset::getStates()
|
||||
map<string, vector<int>> Dataset::getStates() const
|
||||
{
|
||||
if (loaded) {
|
||||
return states;
|
||||
@@ -177,10 +213,11 @@ namespace platform {
|
||||
{
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
states[features[i]] = vector<int>(*max_element(Xd[i].begin(), Xd[i].end()) + 1);
|
||||
iota(begin(states[features[i]]), end(states[features[i]]), 0);
|
||||
auto item = states.at(features[i]);
|
||||
iota(begin(item), end(item), 0);
|
||||
}
|
||||
states[className] = vector<int>(*max_element(yv.begin(), yv.end()) + 1);
|
||||
iota(begin(states[className]), end(states[className]), 0);
|
||||
iota(begin(states.at(className)), end(states.at(className)), 0);
|
||||
}
|
||||
void Dataset::load_arff()
|
||||
{
|
||||
|
@@ -29,15 +29,15 @@ namespace platform {
|
||||
public:
|
||||
Dataset(const string& path, const string& name, const string& className, bool discretize, fileType_t fileType) : path(path), name(name), className(className), discretize(discretize), loaded(false), fileType(fileType) {};
|
||||
explicit Dataset(const Dataset&);
|
||||
string getName();
|
||||
string getClassName();
|
||||
vector<string> getFeatures();
|
||||
map<string, vector<int>> getStates();
|
||||
string getName() const;
|
||||
string getClassName() const;
|
||||
vector<string> getFeatures() const;
|
||||
map<string, vector<int>> getStates() const;
|
||||
pair<vector<vector<float>>&, vector<int>&> getVectors();
|
||||
pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized();
|
||||
pair<torch::Tensor&, torch::Tensor&> getTensors();
|
||||
int getNFeatures();
|
||||
int getNSamples();
|
||||
int getNFeatures() const;
|
||||
int getNSamples() const;
|
||||
void load();
|
||||
const bool inline isLoaded() const { return loaded; };
|
||||
};
|
||||
@@ -51,14 +51,17 @@ namespace platform {
|
||||
public:
|
||||
explicit Datasets(const string& path, bool discretize = false, fileType_t fileType = ARFF) : path(path), discretize(discretize), fileType(fileType) { load(); };
|
||||
vector<string> getNames();
|
||||
vector<string> getFeatures(string name);
|
||||
int getNSamples(string name);
|
||||
string getClassName(string name);
|
||||
map<string, vector<int>> getStates(string name);
|
||||
pair<vector<vector<float>>&, vector<int>&> getVectors(string name);
|
||||
pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized(string name);
|
||||
pair<torch::Tensor&, torch::Tensor&> getTensors(string name);
|
||||
bool isDataset(const string& name);
|
||||
vector<string> getFeatures(const string& name) const;
|
||||
int getNSamples(const string& name) const;
|
||||
string getClassName(const string& name) const;
|
||||
int getNClasses(const string& name);
|
||||
vector<int> getClassesCounts(const string& name) const;
|
||||
map<string, vector<int>> getStates(const string& name) const;
|
||||
pair<vector<vector<float>>&, vector<int>&> getVectors(const string& name);
|
||||
pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized(const string& name);
|
||||
pair<torch::Tensor&, torch::Tensor&> getTensors(const string& name);
|
||||
bool isDataset(const string& name) const;
|
||||
void loadDataset(const string& name) const;
|
||||
};
|
||||
};
|
||||
|
||||
|
@@ -1,8 +1,8 @@
|
||||
#include "Experiment.h"
|
||||
#include "Datasets.h"
|
||||
#include "Models.h"
|
||||
#include "Report.h"
|
||||
|
||||
#include "ReportConsole.h"
|
||||
#include <fstream>
|
||||
namespace platform {
|
||||
using json = nlohmann::json;
|
||||
string get_date()
|
||||
@@ -25,6 +25,7 @@ namespace platform {
|
||||
oss << std::put_time(timeinfo, "%H:%M:%S");
|
||||
return oss.str();
|
||||
}
|
||||
Experiment::Experiment() : hyperparameters(json::parse("{}")) {}
|
||||
string Experiment::get_file_name()
|
||||
{
|
||||
string result = "results_" + score_name + "_" + model + "_" + platform + "_" + get_date() + "_" + get_time() + "_" + (stratified ? "1" : "0") + ".json";
|
||||
@@ -90,7 +91,7 @@ namespace platform {
|
||||
void Experiment::report()
|
||||
{
|
||||
json data = build_json();
|
||||
Report report(data);
|
||||
ReportConsole report(data);
|
||||
report.show();
|
||||
}
|
||||
|
||||
@@ -110,6 +111,26 @@ namespace platform {
|
||||
}
|
||||
}
|
||||
|
||||
string getColor(bayesnet::status_t status)
|
||||
{
|
||||
switch (status) {
|
||||
case bayesnet::NORMAL:
|
||||
return Colors::GREEN();
|
||||
case bayesnet::WARNING:
|
||||
return Colors::YELLOW();
|
||||
case bayesnet::ERROR:
|
||||
return Colors::RED();
|
||||
default:
|
||||
return Colors::RESET();
|
||||
}
|
||||
}
|
||||
|
||||
void showProgress(int fold, const string& color, const string& phase)
|
||||
{
|
||||
string prefix = phase == "a" ? "" : "\b\b\b\b";
|
||||
cout << prefix << color << fold << Colors::RESET() << "(" << color << phase << Colors::RESET() << ")" << flush;
|
||||
|
||||
}
|
||||
void Experiment::cross_validation(const string& path, const string& fileName)
|
||||
{
|
||||
auto datasets = platform::Datasets(path, discretized, platform::ARFF);
|
||||
@@ -124,6 +145,8 @@ namespace platform {
|
||||
auto result = Result();
|
||||
auto [values, counts] = at::_unique(y);
|
||||
result.setSamples(X.size(1)).setFeatures(X.size(0)).setClasses(values.size(0));
|
||||
result.setHyperparameters(hyperparameters);
|
||||
// Initialize results vectors
|
||||
int nResults = nfolds * static_cast<int>(randomSeeds.size());
|
||||
auto accuracy_test = torch::zeros({ nResults }, torch::kFloat64);
|
||||
auto accuracy_train = torch::zeros({ nResults }, torch::kFloat64);
|
||||
@@ -144,6 +167,10 @@ namespace platform {
|
||||
for (int nfold = 0; nfold < nfolds; nfold++) {
|
||||
auto clf = Models::instance()->create(model);
|
||||
setModelVersion(clf->getVersion());
|
||||
if (hyperparameters.size() != 0) {
|
||||
clf->setHyperparameters(hyperparameters);
|
||||
}
|
||||
// Split train - test dataset
|
||||
train_timer.start();
|
||||
auto [train, test] = fold->getFold(nfold);
|
||||
auto train_t = torch::tensor(train);
|
||||
@@ -152,24 +179,31 @@ namespace platform {
|
||||
auto y_train = y.index({ train_t });
|
||||
auto X_test = X.index({ "...", test_t });
|
||||
auto y_test = y.index({ test_t });
|
||||
cout << nfold + 1 << ", " << flush;
|
||||
showProgress(nfold + 1, getColor(clf->getStatus()), "a");
|
||||
// Train model
|
||||
clf->fit(X_train, y_train, features, className, states);
|
||||
showProgress(nfold + 1, getColor(clf->getStatus()), "b");
|
||||
nodes[item] = clf->getNumberOfNodes();
|
||||
edges[item] = clf->getNumberOfEdges();
|
||||
num_states[item] = clf->getNumberOfStates();
|
||||
train_time[item] = train_timer.getDuration();
|
||||
// Score train
|
||||
auto accuracy_train_value = clf->score(X_train, y_train);
|
||||
// Test model
|
||||
showProgress(nfold + 1, getColor(clf->getStatus()), "c");
|
||||
test_timer.start();
|
||||
auto accuracy_test_value = clf->score(X_test, y_test);
|
||||
test_time[item] = test_timer.getDuration();
|
||||
accuracy_train[item] = accuracy_train_value;
|
||||
accuracy_test[item] = accuracy_test_value;
|
||||
cout << "\b\b\b, " << flush;
|
||||
// Store results and times in vector
|
||||
result.addScoreTrain(accuracy_train_value);
|
||||
result.addScoreTest(accuracy_test_value);
|
||||
result.addTimeTrain(train_time[item].item<double>());
|
||||
result.addTimeTest(test_time[item].item<double>());
|
||||
item++;
|
||||
clf.reset();
|
||||
}
|
||||
cout << "end. " << flush;
|
||||
delete fold;
|
||||
@@ -177,6 +211,7 @@ namespace platform {
|
||||
result.setScoreTest(torch::mean(accuracy_test).item<double>()).setScoreTrain(torch::mean(accuracy_train).item<double>());
|
||||
result.setScoreTestStd(torch::std(accuracy_test).item<double>()).setScoreTrainStd(torch::std(accuracy_train).item<double>());
|
||||
result.setTrainTime(torch::mean(train_time).item<double>()).setTestTime(torch::mean(test_time).item<double>());
|
||||
result.setTestTimeStd(torch::std(test_time).item<double>()).setTrainTimeStd(torch::std(train_time).item<double>());
|
||||
result.setNodes(torch::mean(nodes).item<double>()).setLeaves(torch::mean(edges).item<double>()).setDepth(torch::mean(num_states).item<double>());
|
||||
result.setDataset(fileName);
|
||||
addResult(result);
|
||||
|
@@ -29,7 +29,8 @@ namespace platform {
|
||||
};
|
||||
class Result {
|
||||
private:
|
||||
string dataset, hyperparameters, model_version;
|
||||
string dataset, model_version;
|
||||
json hyperparameters;
|
||||
int samples{ 0 }, features{ 0 }, classes{ 0 };
|
||||
double score_train{ 0 }, score_test{ 0 }, score_train_std{ 0 }, score_test_std{ 0 }, train_time{ 0 }, train_time_std{ 0 }, test_time{ 0 }, test_time_std{ 0 };
|
||||
float nodes{ 0 }, leaves{ 0 }, depth{ 0 };
|
||||
@@ -37,7 +38,7 @@ namespace platform {
|
||||
public:
|
||||
Result() = default;
|
||||
Result& setDataset(const string& dataset) { this->dataset = dataset; return *this; }
|
||||
Result& setHyperparameters(const string& hyperparameters) { this->hyperparameters = hyperparameters; return *this; }
|
||||
Result& setHyperparameters(const json& hyperparameters) { this->hyperparameters = hyperparameters; return *this; }
|
||||
Result& setSamples(int samples) { this->samples = samples; return *this; }
|
||||
Result& setFeatures(int features) { this->features = features; return *this; }
|
||||
Result& setClasses(int classes) { this->classes = classes; return *this; }
|
||||
@@ -59,7 +60,7 @@ namespace platform {
|
||||
const float get_score_train() const { return score_train; }
|
||||
float get_score_test() { return score_test; }
|
||||
const string& getDataset() const { return dataset; }
|
||||
const string& getHyperparameters() const { return hyperparameters; }
|
||||
const json& getHyperparameters() const { return hyperparameters; }
|
||||
const int getSamples() const { return samples; }
|
||||
const int getFeatures() const { return features; }
|
||||
const int getClasses() const { return classes; }
|
||||
@@ -85,11 +86,12 @@ namespace platform {
|
||||
bool discretized{ false }, stratified{ false };
|
||||
vector<Result> results;
|
||||
vector<int> randomSeeds;
|
||||
json hyperparameters = "{}";
|
||||
int nfolds{ 0 };
|
||||
float duration{ 0 };
|
||||
json build_json();
|
||||
public:
|
||||
Experiment() = default;
|
||||
Experiment();
|
||||
Experiment& setTitle(const string& title) { this->title = title; return *this; }
|
||||
Experiment& setModel(const string& model) { this->model = model; return *this; }
|
||||
Experiment& setPlatform(const string& platform) { this->platform = platform; return *this; }
|
||||
@@ -103,6 +105,7 @@ namespace platform {
|
||||
Experiment& addResult(Result result) { results.push_back(result); return *this; }
|
||||
Experiment& addRandomSeed(int randomSeed) { randomSeeds.push_back(randomSeed); return *this; }
|
||||
Experiment& setDuration(float duration) { this->duration = duration; return *this; }
|
||||
Experiment& setHyperparameters(const json& hyperparameters) { this->hyperparameters = hyperparameters; return *this; }
|
||||
string get_file_name();
|
||||
void save(const string& path);
|
||||
void cross_validation(const string& path, const string& fileName);
|
||||
|
@@ -1,95 +1,97 @@
|
||||
#include "Folding.h"
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
Fold::Fold(int k, int n, int seed) : k(k), n(n), seed(seed)
|
||||
{
|
||||
random_device rd;
|
||||
random_seed = default_random_engine(seed == -1 ? rd() : seed);
|
||||
srand(seed == -1 ? time(0) : seed);
|
||||
}
|
||||
KFold::KFold(int k, int n, int seed) : Fold(k, n, seed), indices(vector<int>(n))
|
||||
{
|
||||
iota(begin(indices), end(indices), 0); // fill with 0, 1, ..., n - 1
|
||||
shuffle(indices.begin(), indices.end(), random_seed);
|
||||
}
|
||||
pair<vector<int>, vector<int>> KFold::getFold(int nFold)
|
||||
{
|
||||
if (nFold >= k || nFold < 0) {
|
||||
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
|
||||
namespace platform {
|
||||
Fold::Fold(int k, int n, int seed) : k(k), n(n), seed(seed)
|
||||
{
|
||||
random_device rd;
|
||||
random_seed = default_random_engine(seed == -1 ? rd() : seed);
|
||||
srand(seed == -1 ? time(0) : seed);
|
||||
}
|
||||
int nTest = n / k;
|
||||
auto train = vector<int>();
|
||||
auto test = vector<int>();
|
||||
for (int i = 0; i < n; i++) {
|
||||
if (i >= nTest * nFold && i < nTest * (nFold + 1)) {
|
||||
test.push_back(indices[i]);
|
||||
} else {
|
||||
train.push_back(indices[i]);
|
||||
}
|
||||
}
|
||||
return { train, test };
|
||||
}
|
||||
StratifiedKFold::StratifiedKFold(int k, torch::Tensor& y, int seed) : Fold(k, y.numel(), seed)
|
||||
{
|
||||
n = y.numel();
|
||||
this->y = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + n);
|
||||
build();
|
||||
}
|
||||
StratifiedKFold::StratifiedKFold(int k, const vector<int>& y, int seed)
|
||||
: Fold(k, y.size(), seed)
|
||||
{
|
||||
this->y = y;
|
||||
n = y.size();
|
||||
build();
|
||||
}
|
||||
void StratifiedKFold::build()
|
||||
{
|
||||
stratified_indices = vector<vector<int>>(k);
|
||||
int fold_size = n / k;
|
||||
// Compute class counts and indices
|
||||
auto class_indices = map<int, vector<int>>();
|
||||
vector<int> class_counts(*max_element(y.begin(), y.end()) + 1, 0);
|
||||
for (auto i = 0; i < n; ++i) {
|
||||
class_counts[y[i]]++;
|
||||
class_indices[y[i]].push_back(i);
|
||||
}
|
||||
// Shuffle class indices
|
||||
for (auto& [cls, indices] : class_indices) {
|
||||
KFold::KFold(int k, int n, int seed) : Fold(k, n, seed), indices(vector<int>(n))
|
||||
{
|
||||
iota(begin(indices), end(indices), 0); // fill with 0, 1, ..., n - 1
|
||||
shuffle(indices.begin(), indices.end(), random_seed);
|
||||
}
|
||||
// Assign indices to folds
|
||||
for (auto label = 0; label < class_counts.size(); ++label) {
|
||||
auto num_samples_to_take = class_counts[label] / k;
|
||||
if (num_samples_to_take == 0)
|
||||
continue;
|
||||
auto remainder_samples_to_take = class_counts[label] % k;
|
||||
for (auto fold = 0; fold < k; ++fold) {
|
||||
auto it = next(class_indices[label].begin(), num_samples_to_take);
|
||||
move(class_indices[label].begin(), it, back_inserter(stratified_indices[fold])); // ##
|
||||
class_indices[label].erase(class_indices[label].begin(), it);
|
||||
pair<vector<int>, vector<int>> KFold::getFold(int nFold)
|
||||
{
|
||||
if (nFold >= k || nFold < 0) {
|
||||
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
|
||||
}
|
||||
while (remainder_samples_to_take > 0) {
|
||||
int fold = (rand() % static_cast<int>(k));
|
||||
if (stratified_indices[fold].size() == fold_size + 1) {
|
||||
continue;
|
||||
int nTest = n / k;
|
||||
auto train = vector<int>();
|
||||
auto test = vector<int>();
|
||||
for (int i = 0; i < n; i++) {
|
||||
if (i >= nTest * nFold && i < nTest * (nFold + 1)) {
|
||||
test.push_back(indices[i]);
|
||||
} else {
|
||||
train.push_back(indices[i]);
|
||||
}
|
||||
}
|
||||
return { train, test };
|
||||
}
|
||||
StratifiedKFold::StratifiedKFold(int k, torch::Tensor& y, int seed) : Fold(k, y.numel(), seed)
|
||||
{
|
||||
n = y.numel();
|
||||
this->y = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + n);
|
||||
build();
|
||||
}
|
||||
StratifiedKFold::StratifiedKFold(int k, const vector<int>& y, int seed)
|
||||
: Fold(k, y.size(), seed)
|
||||
{
|
||||
this->y = y;
|
||||
n = y.size();
|
||||
build();
|
||||
}
|
||||
void StratifiedKFold::build()
|
||||
{
|
||||
stratified_indices = vector<vector<int>>(k);
|
||||
int fold_size = n / k;
|
||||
// Compute class counts and indices
|
||||
auto class_indices = map<int, vector<int>>();
|
||||
vector<int> class_counts(*max_element(y.begin(), y.end()) + 1, 0);
|
||||
for (auto i = 0; i < n; ++i) {
|
||||
class_counts[y[i]]++;
|
||||
class_indices[y[i]].push_back(i);
|
||||
}
|
||||
// Shuffle class indices
|
||||
for (auto& [cls, indices] : class_indices) {
|
||||
shuffle(indices.begin(), indices.end(), random_seed);
|
||||
}
|
||||
// Assign indices to folds
|
||||
for (auto label = 0; label < class_counts.size(); ++label) {
|
||||
auto num_samples_to_take = class_counts[label] / k;
|
||||
if (num_samples_to_take == 0)
|
||||
continue;
|
||||
auto remainder_samples_to_take = class_counts[label] % k;
|
||||
for (auto fold = 0; fold < k; ++fold) {
|
||||
auto it = next(class_indices[label].begin(), num_samples_to_take);
|
||||
move(class_indices[label].begin(), it, back_inserter(stratified_indices[fold])); // ##
|
||||
class_indices[label].erase(class_indices[label].begin(), it);
|
||||
}
|
||||
while (remainder_samples_to_take > 0) {
|
||||
int fold = (rand() % static_cast<int>(k));
|
||||
if (stratified_indices[fold].size() == fold_size + 1) {
|
||||
continue;
|
||||
}
|
||||
auto it = next(class_indices[label].begin(), 1);
|
||||
stratified_indices[fold].push_back(*class_indices[label].begin());
|
||||
class_indices[label].erase(class_indices[label].begin(), it);
|
||||
remainder_samples_to_take--;
|
||||
}
|
||||
auto it = next(class_indices[label].begin(), 1);
|
||||
stratified_indices[fold].push_back(*class_indices[label].begin());
|
||||
class_indices[label].erase(class_indices[label].begin(), it);
|
||||
remainder_samples_to_take--;
|
||||
}
|
||||
}
|
||||
}
|
||||
pair<vector<int>, vector<int>> StratifiedKFold::getFold(int nFold)
|
||||
{
|
||||
if (nFold >= k || nFold < 0) {
|
||||
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
|
||||
pair<vector<int>, vector<int>> StratifiedKFold::getFold(int nFold)
|
||||
{
|
||||
if (nFold >= k || nFold < 0) {
|
||||
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
|
||||
}
|
||||
vector<int> test_indices = stratified_indices[nFold];
|
||||
vector<int> train_indices;
|
||||
for (int i = 0; i < k; ++i) {
|
||||
if (i == nFold) continue;
|
||||
train_indices.insert(train_indices.end(), stratified_indices[i].begin(), stratified_indices[i].end());
|
||||
}
|
||||
return { train_indices, test_indices };
|
||||
}
|
||||
vector<int> test_indices = stratified_indices[nFold];
|
||||
vector<int> train_indices;
|
||||
for (int i = 0; i < k; ++i) {
|
||||
if (i == nFold) continue;
|
||||
train_indices.insert(train_indices.end(), stratified_indices[i].begin(), stratified_indices[i].end());
|
||||
}
|
||||
return { train_indices, test_indices };
|
||||
}
|
@@ -4,34 +4,35 @@
|
||||
#include <vector>
|
||||
#include <random>
|
||||
using namespace std;
|
||||
|
||||
class Fold {
|
||||
protected:
|
||||
int k;
|
||||
int n;
|
||||
int seed;
|
||||
default_random_engine random_seed;
|
||||
public:
|
||||
Fold(int k, int n, int seed = -1);
|
||||
virtual pair<vector<int>, vector<int>> getFold(int nFold) = 0;
|
||||
virtual ~Fold() = default;
|
||||
int getNumberOfFolds() { return k; }
|
||||
};
|
||||
class KFold : public Fold {
|
||||
private:
|
||||
vector<int> indices;
|
||||
public:
|
||||
KFold(int k, int n, int seed = -1);
|
||||
pair<vector<int>, vector<int>> getFold(int nFold) override;
|
||||
};
|
||||
class StratifiedKFold : public Fold {
|
||||
private:
|
||||
vector<int> y;
|
||||
vector<vector<int>> stratified_indices;
|
||||
void build();
|
||||
public:
|
||||
StratifiedKFold(int k, const vector<int>& y, int seed = -1);
|
||||
StratifiedKFold(int k, torch::Tensor& y, int seed = -1);
|
||||
pair<vector<int>, vector<int>> getFold(int nFold) override;
|
||||
};
|
||||
namespace platform {
|
||||
class Fold {
|
||||
protected:
|
||||
int k;
|
||||
int n;
|
||||
int seed;
|
||||
default_random_engine random_seed;
|
||||
public:
|
||||
Fold(int k, int n, int seed = -1);
|
||||
virtual pair<vector<int>, vector<int>> getFold(int nFold) = 0;
|
||||
virtual ~Fold() = default;
|
||||
int getNumberOfFolds() { return k; }
|
||||
};
|
||||
class KFold : public Fold {
|
||||
private:
|
||||
vector<int> indices;
|
||||
public:
|
||||
KFold(int k, int n, int seed = -1);
|
||||
pair<vector<int>, vector<int>> getFold(int nFold) override;
|
||||
};
|
||||
class StratifiedKFold : public Fold {
|
||||
private:
|
||||
vector<int> y;
|
||||
vector<vector<int>> stratified_indices;
|
||||
void build();
|
||||
public:
|
||||
StratifiedKFold(int k, const vector<int>& y, int seed = -1);
|
||||
StratifiedKFold(int k, torch::Tensor& y, int seed = -1);
|
||||
pair<vector<int>, vector<int>> getFold(int nFold) override;
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -26,7 +26,7 @@ namespace platform {
|
||||
instance = it->second();
|
||||
// wrap instance in a shared ptr and return
|
||||
if (instance != nullptr)
|
||||
return shared_ptr<bayesnet::BaseClassifier>(instance);
|
||||
return unique_ptr<bayesnet::BaseClassifier>(instance);
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include "KDBLd.h"
|
||||
#include "SPODELd.h"
|
||||
#include "AODELd.h"
|
||||
#include "BoostAODE.h"
|
||||
namespace platform {
|
||||
class Models {
|
||||
private:
|
||||
|
12
src/Platform/Paths.h
Normal file
12
src/Platform/Paths.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef PATHS_H
|
||||
#define PATHS_H
|
||||
#include <string>
|
||||
namespace platform {
|
||||
class Paths {
|
||||
public:
|
||||
static std::string datasets() { return "datasets/"; }
|
||||
static std::string results() { return "results/"; }
|
||||
static std::string excel() { return "excel/"; }
|
||||
};
|
||||
}
|
||||
#endif
|
@@ -1,67 +0,0 @@
|
||||
#include "Report.h"
|
||||
|
||||
namespace platform {
|
||||
string headerLine(const string& text)
|
||||
{
|
||||
int n = MAXL - text.length() - 3;
|
||||
n = n < 0 ? 0 : n;
|
||||
return "* " + text + string(n, ' ') + "*\n";
|
||||
}
|
||||
string Report::fromVector(const string& key)
|
||||
{
|
||||
string result = "";
|
||||
|
||||
for (auto& item : data[key]) {
|
||||
result += to_string(item) + ", ";
|
||||
}
|
||||
return "[" + result.substr(0, result.size() - 2) + "]";
|
||||
}
|
||||
string fVector(const json& data)
|
||||
{
|
||||
string result = "";
|
||||
for (const auto& item : data) {
|
||||
result += to_string(item) + ", ";
|
||||
}
|
||||
return "[" + result.substr(0, result.size() - 2) + "]";
|
||||
}
|
||||
void Report::show()
|
||||
{
|
||||
header();
|
||||
body();
|
||||
}
|
||||
void Report::header()
|
||||
{
|
||||
cout << string(MAXL, '*') << endl;
|
||||
cout << headerLine("Report " + data["model"].get<string>() + " ver. " + data["version"].get<string>() + " with " + to_string(data["folds"].get<int>()) + " Folds cross validation and " + to_string(data["seeds"].size()) + " random seeds. " + data["date"].get<string>() + " " + data["time"].get<string>());
|
||||
cout << headerLine(data["title"].get<string>());
|
||||
cout << headerLine("Random seeds: " + fromVector("seeds") + " Stratified: " + (data["stratified"].get<bool>() ? "True" : "False"));
|
||||
cout << headerLine("Execution took " + to_string(data["duration"].get<float>()) + " seconds, " + to_string(data["duration"].get<float>() / 3600) + " hours, on " + data["platform"].get<string>());
|
||||
cout << headerLine("Score is " + data["score_name"].get<string>());
|
||||
cout << string(MAXL, '*') << endl;
|
||||
cout << endl;
|
||||
}
|
||||
void Report::body()
|
||||
{
|
||||
cout << "Dataset Sampl. Feat. Cls Nodes Edges States Score Time Hyperparameters" << endl;
|
||||
cout << "============================== ====== ===== === ======= ======= ======= =============== ================= ===============" << endl;
|
||||
for (const auto& r : data["results"]) {
|
||||
cout << setw(30) << left << r["dataset"].get<string>() << " ";
|
||||
cout << setw(6) << right << r["samples"].get<int>() << " ";
|
||||
cout << setw(5) << right << r["features"].get<int>() << " ";
|
||||
cout << setw(3) << right << r["classes"].get<int>() << " ";
|
||||
cout << setw(7) << setprecision(2) << fixed << r["nodes"].get<float>() << " ";
|
||||
cout << setw(7) << setprecision(2) << fixed << r["leaves"].get<float>() << " ";
|
||||
cout << setw(7) << setprecision(2) << fixed << r["depth"].get<float>() << " ";
|
||||
cout << setw(8) << right << setprecision(6) << fixed << r["score_test"].get<double>() << "±" << setw(6) << setprecision(4) << fixed << r["score_test_std"].get<double>() << " ";
|
||||
cout << setw(10) << right << setprecision(6) << fixed << r["test_time"].get<double>() << "±" << setw(6) << setprecision(4) << fixed << r["test_time_std"].get<double>() << " ";
|
||||
cout << " " << r["hyperparameters"].get<string>();
|
||||
cout << endl;
|
||||
cout << string(MAXL, '*') << endl;
|
||||
cout << headerLine("Train scores: " + fVector(r["scores_train"]));
|
||||
cout << headerLine("Test scores: " + fVector(r["scores_test"]));
|
||||
cout << headerLine("Train times: " + fVector(r["times_train"]));
|
||||
cout << headerLine("Test times: " + fVector(r["times_test"]));
|
||||
cout << string(MAXL, '*') << endl;
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,23 +0,0 @@
|
||||
#ifndef REPORT_H
|
||||
#define REPORT_H
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
const int MAXL = 121;
|
||||
namespace platform {
|
||||
using namespace std;
|
||||
class Report {
|
||||
public:
|
||||
explicit Report(json data_) { data = data_; };
|
||||
virtual ~Report() = default;
|
||||
void show();
|
||||
private:
|
||||
void header();
|
||||
void body();
|
||||
string fromVector(const string& key);
|
||||
json data;
|
||||
};
|
||||
};
|
||||
#endif
|
114
src/Platform/ReportBase.cc
Normal file
114
src/Platform/ReportBase.cc
Normal file
@@ -0,0 +1,114 @@
|
||||
#include <sstream>
|
||||
#include <locale>
|
||||
#include "Datasets.h"
|
||||
#include "ReportBase.h"
|
||||
#include "BestScore.h"
|
||||
|
||||
|
||||
namespace platform {
|
||||
ReportBase::ReportBase(json data_, bool compare) : data(data_), compare(compare), margin(0.1)
|
||||
{
|
||||
stringstream oss;
|
||||
oss << "Better than ZeroR + " << setprecision(1) << fixed << margin * 100 << "%";
|
||||
meaning = {
|
||||
{Symbols::equal_best, "Equal to best"},
|
||||
{Symbols::better_best, "Better than best"},
|
||||
{Symbols::cross, "Less than or equal to ZeroR"},
|
||||
{Symbols::upward_arrow, oss.str()}
|
||||
};
|
||||
}
|
||||
string ReportBase::fromVector(const string& key)
|
||||
{
|
||||
stringstream oss;
|
||||
string sep = "";
|
||||
oss << "[";
|
||||
for (auto& item : data[key]) {
|
||||
oss << sep << item.get<double>();
|
||||
sep = ", ";
|
||||
}
|
||||
oss << "]";
|
||||
return oss.str();
|
||||
}
|
||||
string ReportBase::fVector(const string& title, const json& data, const int width, const int precision)
|
||||
{
|
||||
stringstream oss;
|
||||
string sep = "";
|
||||
oss << title << "[";
|
||||
for (const auto& item : data) {
|
||||
oss << sep << fixed << setw(width) << setprecision(precision) << item.get<double>();
|
||||
sep = ", ";
|
||||
}
|
||||
oss << "]";
|
||||
return oss.str();
|
||||
}
|
||||
void ReportBase::show()
|
||||
{
|
||||
header();
|
||||
body();
|
||||
}
|
||||
string ReportBase::compareResult(const string& dataset, double result)
|
||||
{
|
||||
string status = " ";
|
||||
if (compare) {
|
||||
double best = bestResult(dataset, data["model"].get<string>());
|
||||
if (result == best) {
|
||||
status = Symbols::equal_best;
|
||||
} else if (result > best) {
|
||||
status = Symbols::better_best;
|
||||
}
|
||||
} else {
|
||||
if (data["score_name"].get<string>() == "accuracy") {
|
||||
auto dt = Datasets(Paths::datasets(), false);
|
||||
dt.loadDataset(dataset);
|
||||
auto numClasses = dt.getNClasses(dataset);
|
||||
if (numClasses == 2) {
|
||||
vector<int> distribution = dt.getClassesCounts(dataset);
|
||||
double nSamples = dt.getNSamples(dataset);
|
||||
vector<int>::iterator maxValue = max_element(distribution.begin(), distribution.end());
|
||||
double mark = *maxValue / nSamples * (1 + margin);
|
||||
if (mark > 1) {
|
||||
mark = 0.9995;
|
||||
}
|
||||
status = result < mark ? Symbols::cross : result > mark ? Symbols::upward_arrow : "=";
|
||||
}
|
||||
}
|
||||
}
|
||||
if (status != " ") {
|
||||
auto item = summary.find(status);
|
||||
if (item != summary.end()) {
|
||||
summary[status]++;
|
||||
} else {
|
||||
summary[status] = 1;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
double ReportBase::bestResult(const string& dataset, const string& model)
|
||||
{
|
||||
double value = 0.0;
|
||||
if (bestResults.size() == 0) {
|
||||
// try to load the best results
|
||||
string score = data["score_name"];
|
||||
replace(score.begin(), score.end(), '_', '-');
|
||||
string fileName = "best_results_" + score + "_" + model + ".json";
|
||||
ifstream resultData(Paths::results() + "/" + fileName);
|
||||
if (resultData.is_open()) {
|
||||
bestResults = json::parse(resultData);
|
||||
} else {
|
||||
existBestFile = false;
|
||||
}
|
||||
}
|
||||
try {
|
||||
value = bestResults.at(dataset).at(0);
|
||||
}
|
||||
catch (exception) {
|
||||
value = 1.0;
|
||||
|
||||
}
|
||||
return value;
|
||||
}
|
||||
bool ReportBase::getExistBestFile()
|
||||
{
|
||||
return existBestFile;
|
||||
}
|
||||
}
|
46
src/Platform/ReportBase.h
Normal file
46
src/Platform/ReportBase.h
Normal file
@@ -0,0 +1,46 @@
|
||||
#ifndef REPORTBASE_H
|
||||
#define REPORTBASE_H
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include "Paths.h"
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
namespace platform {
|
||||
using namespace std;
|
||||
class Symbols {
|
||||
public:
|
||||
inline static const string check_mark{ "\u2714" };
|
||||
inline static const string exclamation{ "\u2757" };
|
||||
inline static const string black_star{ "\u2605" };
|
||||
inline static const string cross{ "\u2717" };
|
||||
inline static const string upward_arrow{ "\u27B6" };
|
||||
inline static const string down_arrow{ "\u27B4" };
|
||||
inline static const string equal_best{ check_mark };
|
||||
inline static const string better_best{ black_star };
|
||||
};
|
||||
class ReportBase {
|
||||
public:
|
||||
explicit ReportBase(json data_, bool compare);
|
||||
virtual ~ReportBase() = default;
|
||||
void show();
|
||||
protected:
|
||||
json data;
|
||||
string fromVector(const string& key);
|
||||
string fVector(const string& title, const json& data, const int width, const int precision);
|
||||
bool getExistBestFile();
|
||||
virtual void header() = 0;
|
||||
virtual void body() = 0;
|
||||
virtual void showSummary() = 0;
|
||||
string compareResult(const string& dataset, double result);
|
||||
map<string, int> summary;
|
||||
double margin;
|
||||
map<string, string> meaning;
|
||||
bool compare;
|
||||
private:
|
||||
double bestResult(const string& dataset, const string& model);
|
||||
json bestResults;
|
||||
bool existBestFile = true;
|
||||
};
|
||||
};
|
||||
#endif
|
112
src/Platform/ReportConsole.cc
Normal file
112
src/Platform/ReportConsole.cc
Normal file
@@ -0,0 +1,112 @@
|
||||
#include <sstream>
|
||||
#include <locale>
|
||||
#include "ReportConsole.h"
|
||||
#include "BestScore.h"
|
||||
|
||||
|
||||
namespace platform {
|
||||
struct separated : numpunct<char> {
|
||||
char do_decimal_point() const { return ','; }
|
||||
char do_thousands_sep() const { return '.'; }
|
||||
string do_grouping() const { return "\03"; }
|
||||
};
|
||||
|
||||
string ReportConsole::headerLine(const string& text, int utf = 0)
|
||||
{
|
||||
int n = MAXL - text.length() - 3;
|
||||
n = n < 0 ? 0 : n;
|
||||
return "* " + text + string(n + utf, ' ') + "*\n";
|
||||
}
|
||||
|
||||
void ReportConsole::header()
|
||||
{
|
||||
locale mylocale(cout.getloc(), new separated);
|
||||
locale::global(mylocale);
|
||||
cout.imbue(mylocale);
|
||||
stringstream oss;
|
||||
cout << Colors::MAGENTA() << string(MAXL, '*') << endl;
|
||||
cout << headerLine("Report " + data["model"].get<string>() + " ver. " + data["version"].get<string>() + " with " + to_string(data["folds"].get<int>()) + " Folds cross validation and " + to_string(data["seeds"].size()) + " random seeds. " + data["date"].get<string>() + " " + data["time"].get<string>());
|
||||
cout << headerLine(data["title"].get<string>());
|
||||
cout << headerLine("Random seeds: " + fromVector("seeds") + " Stratified: " + (data["stratified"].get<bool>() ? "True" : "False"));
|
||||
oss << "Execution took " << setprecision(2) << fixed << data["duration"].get<float>() << " seconds, " << data["duration"].get<float>() / 3600 << " hours, on " << data["platform"].get<string>();
|
||||
cout << headerLine(oss.str());
|
||||
cout << headerLine("Score is " + data["score_name"].get<string>());
|
||||
cout << string(MAXL, '*') << endl;
|
||||
cout << endl;
|
||||
}
|
||||
void ReportConsole::body()
|
||||
{
|
||||
cout << Colors::GREEN() << " # Dataset Sampl. Feat. Cls Nodes Edges States Score Time Hyperparameters" << endl;
|
||||
cout << "=== ========================= ====== ===== === ========= ========= ========= =============== =================== ====================" << endl;
|
||||
json lastResult;
|
||||
double totalScore = 0.0;
|
||||
bool odd = true;
|
||||
int index = 0;
|
||||
for (const auto& r : data["results"]) {
|
||||
if (selectedIndex != -1 && index != selectedIndex) {
|
||||
index++;
|
||||
continue;
|
||||
}
|
||||
auto color = odd ? Colors::CYAN() : Colors::BLUE();
|
||||
cout << color;
|
||||
cout << setw(3) << index++ << " ";
|
||||
cout << setw(25) << left << r["dataset"].get<string>() << " ";
|
||||
cout << setw(6) << right << r["samples"].get<int>() << " ";
|
||||
cout << setw(5) << right << r["features"].get<int>() << " ";
|
||||
cout << setw(3) << right << r["classes"].get<int>() << " ";
|
||||
cout << setw(9) << setprecision(2) << fixed << r["nodes"].get<float>() << " ";
|
||||
cout << setw(9) << setprecision(2) << fixed << r["leaves"].get<float>() << " ";
|
||||
cout << setw(9) << setprecision(2) << fixed << r["depth"].get<float>() << " ";
|
||||
cout << setw(8) << right << setprecision(6) << fixed << r["score"].get<double>() << "±" << setw(6) << setprecision(4) << fixed << r["score_std"].get<double>();
|
||||
const string status = compareResult(r["dataset"].get<string>(), r["score"].get<double>());
|
||||
cout << status;
|
||||
cout << setw(12) << right << setprecision(6) << fixed << r["time"].get<double>() << "±" << setw(6) << setprecision(4) << fixed << r["time_std"].get<double>() << " ";
|
||||
try {
|
||||
cout << r["hyperparameters"].get<string>();
|
||||
}
|
||||
catch (const exception& err) {
|
||||
cout << r["hyperparameters"];
|
||||
}
|
||||
cout << endl;
|
||||
lastResult = r;
|
||||
totalScore += r["score"].get<double>();
|
||||
odd = !odd;
|
||||
}
|
||||
if (data["results"].size() == 1 || selectedIndex != -1) {
|
||||
cout << string(MAXL, '*') << endl;
|
||||
cout << headerLine(fVector("Train scores: ", lastResult["scores_train"], 14, 12));
|
||||
cout << headerLine(fVector("Test scores: ", lastResult["scores_test"], 14, 12));
|
||||
cout << headerLine(fVector("Train times: ", lastResult["times_train"], 10, 3));
|
||||
cout << headerLine(fVector("Test times: ", lastResult["times_test"], 10, 3));
|
||||
cout << string(MAXL, '*') << endl;
|
||||
} else {
|
||||
footer(totalScore);
|
||||
}
|
||||
}
|
||||
void ReportConsole::showSummary()
|
||||
{
|
||||
for (const auto& item : summary) {
|
||||
stringstream oss;
|
||||
oss << setw(3) << left << item.first;
|
||||
oss << setw(3) << right << item.second << " ";
|
||||
oss << left << meaning.at(item.first);
|
||||
cout << headerLine(oss.str(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
void ReportConsole::footer(double totalScore)
|
||||
{
|
||||
cout << Colors::MAGENTA() << string(MAXL, '*') << endl;
|
||||
showSummary();
|
||||
auto score = data["score_name"].get<string>();
|
||||
if (score == BestScore::scoreName()) {
|
||||
stringstream oss;
|
||||
oss << score << " compared to " << BestScore::title() << " .: " << totalScore / BestScore::score();
|
||||
cout << headerLine(oss.str());
|
||||
}
|
||||
if (!getExistBestFile() && compare) {
|
||||
cout << headerLine("*** Best Results File not found. Couldn't compare any result!");
|
||||
}
|
||||
cout << string(MAXL, '*') << endl << Colors::RESET();
|
||||
}
|
||||
}
|
24
src/Platform/ReportConsole.h
Normal file
24
src/Platform/ReportConsole.h
Normal file
@@ -0,0 +1,24 @@
|
||||
#ifndef REPORTCONSOLE_H
|
||||
#define REPORTCONSOLE_H
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include "ReportBase.h"
|
||||
#include "Colors.h"
|
||||
|
||||
namespace platform {
|
||||
using namespace std;
|
||||
const int MAXL = 133;
|
||||
class ReportConsole : public ReportBase {
|
||||
public:
|
||||
explicit ReportConsole(json data_, bool compare = false, int index = -1) : ReportBase(data_, compare), selectedIndex(index) {};
|
||||
virtual ~ReportConsole() = default;
|
||||
private:
|
||||
int selectedIndex;
|
||||
string headerLine(const string& text, int utf);
|
||||
void header() override;
|
||||
void body() override;
|
||||
void footer(double totalScore);
|
||||
void showSummary() override;
|
||||
};
|
||||
};
|
||||
#endif
|
333
src/Platform/ReportExcel.cc
Normal file
333
src/Platform/ReportExcel.cc
Normal file
@@ -0,0 +1,333 @@
|
||||
#include <sstream>
|
||||
#include <locale>
|
||||
#include "ReportExcel.h"
|
||||
#include "BestScore.h"
|
||||
|
||||
|
||||
namespace platform {
|
||||
struct separated : numpunct<char> {
|
||||
char do_decimal_point() const { return ','; }
|
||||
|
||||
char do_thousands_sep() const { return '.'; }
|
||||
|
||||
string do_grouping() const { return "\03"; }
|
||||
};
|
||||
|
||||
ReportExcel::ReportExcel(json data_, bool compare, lxw_workbook* workbook) : ReportBase(data_, compare), row(0), workbook(workbook)
|
||||
{
|
||||
normalSize = 14; //font size for report body
|
||||
colorTitle = 0xB1A0C7;
|
||||
colorOdd = 0xDCE6F1;
|
||||
colorEven = 0xFDE9D9;
|
||||
createFile();
|
||||
}
|
||||
|
||||
lxw_workbook* ReportExcel::getWorkbook()
|
||||
{
|
||||
return workbook;
|
||||
}
|
||||
|
||||
lxw_format* ReportExcel::efectiveStyle(const string& style)
|
||||
{
|
||||
lxw_format* efectiveStyle;
|
||||
if (style == "") {
|
||||
efectiveStyle = NULL;
|
||||
} else {
|
||||
string suffix = row % 2 ? "_odd" : "_even";
|
||||
efectiveStyle = styles.at(style + suffix);
|
||||
}
|
||||
return efectiveStyle;
|
||||
}
|
||||
|
||||
void ReportExcel::writeString(int row, int col, const string& text, const string& style)
|
||||
{
|
||||
worksheet_write_string(worksheet, row, col, text.c_str(), efectiveStyle(style));
|
||||
}
|
||||
void ReportExcel::writeInt(int row, int col, const int number, const string& style)
|
||||
{
|
||||
worksheet_write_number(worksheet, row, col, number, efectiveStyle(style));
|
||||
}
|
||||
void ReportExcel::writeDouble(int row, int col, const double number, const string& style)
|
||||
{
|
||||
worksheet_write_number(worksheet, row, col, number, efectiveStyle(style));
|
||||
}
|
||||
|
||||
void ReportExcel::formatColumns()
|
||||
{
|
||||
worksheet_freeze_panes(worksheet, 6, 1);
|
||||
vector<int> columns_sizes = { 22, 10, 9, 7, 12, 12, 12, 12, 12, 3, 15, 12, 23 };
|
||||
for (int i = 0; i < columns_sizes.size(); ++i) {
|
||||
worksheet_set_column(worksheet, i, i, columns_sizes.at(i), NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void ReportExcel::addColor(lxw_format* style, bool odd)
|
||||
{
|
||||
uint32_t efectiveColor = odd ? colorEven : colorOdd;
|
||||
format_set_bg_color(style, lxw_color_t(efectiveColor));
|
||||
}
|
||||
void ReportExcel::createStyle(const string& name, lxw_format* style, bool odd)
|
||||
{
|
||||
addColor(style, odd);
|
||||
if (name == "textCentered") {
|
||||
format_set_align(style, LXW_ALIGN_CENTER);
|
||||
format_set_font_size(style, normalSize);
|
||||
format_set_border(style, LXW_BORDER_THIN);
|
||||
} else if (name == "text") {
|
||||
format_set_font_size(style, normalSize);
|
||||
format_set_border(style, LXW_BORDER_THIN);
|
||||
} else if (name == "bodyHeader") {
|
||||
format_set_bold(style);
|
||||
format_set_font_size(style, normalSize);
|
||||
format_set_align(style, LXW_ALIGN_CENTER);
|
||||
format_set_align(style, LXW_ALIGN_VERTICAL_CENTER);
|
||||
format_set_border(style, LXW_BORDER_THIN);
|
||||
format_set_bg_color(style, lxw_color_t(colorTitle));
|
||||
} else if (name == "result") {
|
||||
format_set_font_size(style, normalSize);
|
||||
format_set_border(style, LXW_BORDER_THIN);
|
||||
format_set_num_format(style, "0.0000000");
|
||||
} else if (name == "time") {
|
||||
format_set_font_size(style, normalSize);
|
||||
format_set_border(style, LXW_BORDER_THIN);
|
||||
format_set_num_format(style, "#,##0.000000");
|
||||
} else if (name == "ints") {
|
||||
format_set_font_size(style, normalSize);
|
||||
format_set_num_format(style, "###,##0");
|
||||
format_set_border(style, LXW_BORDER_THIN);
|
||||
} else if (name == "floats") {
|
||||
format_set_border(style, LXW_BORDER_THIN);
|
||||
format_set_font_size(style, normalSize);
|
||||
format_set_num_format(style, "#,##0.00");
|
||||
}
|
||||
}
|
||||
|
||||
void ReportExcel::createFormats()
|
||||
{
|
||||
auto styleNames = { "text", "textCentered", "bodyHeader", "result", "time", "ints", "floats" };
|
||||
lxw_format* style;
|
||||
for (string name : styleNames) {
|
||||
lxw_format* style = workbook_add_format(workbook);
|
||||
style = workbook_add_format(workbook);
|
||||
createStyle(name, style, true);
|
||||
styles[name + "_odd"] = style;
|
||||
style = workbook_add_format(workbook);
|
||||
createStyle(name, style, false);
|
||||
styles[name + "_even"] = style;
|
||||
}
|
||||
|
||||
// Header 1st line
|
||||
lxw_format* headerFirst = workbook_add_format(workbook);
|
||||
format_set_bold(headerFirst);
|
||||
format_set_font_size(headerFirst, 18);
|
||||
format_set_align(headerFirst, LXW_ALIGN_CENTER);
|
||||
format_set_align(headerFirst, LXW_ALIGN_VERTICAL_CENTER);
|
||||
format_set_border(headerFirst, LXW_BORDER_THIN);
|
||||
format_set_bg_color(headerFirst, lxw_color_t(colorTitle));
|
||||
|
||||
// Header rest
|
||||
lxw_format* headerRest = workbook_add_format(workbook);
|
||||
format_set_bold(headerRest);
|
||||
format_set_align(headerRest, LXW_ALIGN_CENTER);
|
||||
format_set_font_size(headerRest, 16);
|
||||
format_set_align(headerRest, LXW_ALIGN_VERTICAL_CENTER);
|
||||
format_set_border(headerRest, LXW_BORDER_THIN);
|
||||
format_set_bg_color(headerRest, lxw_color_t(colorOdd));
|
||||
|
||||
// Header small
|
||||
lxw_format* headerSmall = workbook_add_format(workbook);
|
||||
format_set_bold(headerSmall);
|
||||
format_set_align(headerSmall, LXW_ALIGN_LEFT);
|
||||
format_set_font_size(headerSmall, 12);
|
||||
format_set_border(headerSmall, LXW_BORDER_THIN);
|
||||
format_set_align(headerSmall, LXW_ALIGN_VERTICAL_CENTER);
|
||||
format_set_bg_color(headerSmall, lxw_color_t(colorOdd));
|
||||
|
||||
// Summary style
|
||||
lxw_format* summaryStyle = workbook_add_format(workbook);
|
||||
format_set_bold(summaryStyle);
|
||||
format_set_font_size(summaryStyle, 16);
|
||||
format_set_border(summaryStyle, LXW_BORDER_THIN);
|
||||
format_set_align(summaryStyle, LXW_ALIGN_VERTICAL_CENTER);
|
||||
|
||||
styles["headerFirst"] = headerFirst;
|
||||
styles["headerRest"] = headerRest;
|
||||
styles["headerSmall"] = headerSmall;
|
||||
styles["summaryStyle"] = summaryStyle;
|
||||
}
|
||||
|
||||
void ReportExcel::setProperties()
|
||||
{
|
||||
char line[data["title"].get<string>().size() + 1];
|
||||
strcpy(line, data["title"].get<string>().c_str());
|
||||
lxw_doc_properties properties = {
|
||||
.title = line,
|
||||
.subject = (char*)"Machine learning results",
|
||||
.author = (char*)"Ricardo Montañana Gómez",
|
||||
.manager = (char*)"Dr. J. A. Gámez, Dr. J. M. Puerta",
|
||||
.company = (char*)"UCLM",
|
||||
.comments = (char*)"Created with libxlsxwriter and c++",
|
||||
};
|
||||
workbook_set_properties(workbook, &properties);
|
||||
}
|
||||
|
||||
void ReportExcel::createFile()
|
||||
{
|
||||
if (workbook == NULL) {
|
||||
workbook = workbook_new((Paths::excel() + fileName).c_str());
|
||||
}
|
||||
const string name = data["model"].get<string>();
|
||||
string suffix = "";
|
||||
string efectiveName;
|
||||
int num = 1;
|
||||
// Create a sheet with the name of the model
|
||||
while (true) {
|
||||
efectiveName = name + suffix;
|
||||
if (workbook_get_worksheet_by_name(workbook, efectiveName.c_str())) {
|
||||
suffix = to_string(++num);
|
||||
} else {
|
||||
worksheet = workbook_add_worksheet(workbook, efectiveName.c_str());
|
||||
break;
|
||||
}
|
||||
if (num > 100) {
|
||||
throw invalid_argument("Couldn't create sheet " + efectiveName);
|
||||
}
|
||||
}
|
||||
cout << "Adding sheet " << efectiveName << " to " << Paths::excel() + fileName << endl;
|
||||
setProperties();
|
||||
createFormats();
|
||||
formatColumns();
|
||||
}
|
||||
|
||||
void ReportExcel::closeFile()
|
||||
{
|
||||
workbook_close(workbook);
|
||||
}
|
||||
|
||||
void ReportExcel::header()
|
||||
{
|
||||
locale mylocale(cout.getloc(), new separated);
|
||||
locale::global(mylocale);
|
||||
cout.imbue(mylocale);
|
||||
stringstream oss;
|
||||
string message = data["model"].get<string>() + " ver. " + data["version"].get<string>() + " " +
|
||||
data["language"].get<string>() + " ver. " + data["language_version"].get<string>() +
|
||||
" with " + to_string(data["folds"].get<int>()) + " Folds cross validation and " + to_string(data["seeds"].size()) +
|
||||
" random seeds. " + data["date"].get<string>() + " " + data["time"].get<string>();
|
||||
worksheet_merge_range(worksheet, 0, 0, 0, 12, message.c_str(), styles["headerFirst"]);
|
||||
worksheet_merge_range(worksheet, 1, 0, 1, 12, data["title"].get<string>().c_str(), styles["headerRest"]);
|
||||
worksheet_merge_range(worksheet, 2, 0, 3, 0, ("Score is " + data["score_name"].get<string>()).c_str(), styles["headerRest"]);
|
||||
worksheet_merge_range(worksheet, 2, 1, 3, 3, "Execution time", styles["headerRest"]);
|
||||
oss << setprecision(2) << fixed << data["duration"].get<float>() << " s";
|
||||
worksheet_merge_range(worksheet, 2, 4, 2, 5, oss.str().c_str(), styles["headerRest"]);
|
||||
oss.str("");
|
||||
oss.clear();
|
||||
oss << setprecision(2) << fixed << data["duration"].get<float>() / 3600 << " h";
|
||||
worksheet_merge_range(worksheet, 3, 4, 3, 5, oss.str().c_str(), styles["headerRest"]);
|
||||
worksheet_merge_range(worksheet, 2, 6, 3, 7, "Platform", styles["headerRest"]);
|
||||
worksheet_merge_range(worksheet, 2, 8, 3, 9, data["platform"].get<string>().c_str(), styles["headerRest"]);
|
||||
worksheet_merge_range(worksheet, 2, 10, 2, 12, ("Random seeds: " + fromVector("seeds")).c_str(), styles["headerSmall"]);
|
||||
oss.str("");
|
||||
oss.clear();
|
||||
oss << "Stratified: " << (data["stratified"].get<bool>() ? "True" : "False");
|
||||
worksheet_merge_range(worksheet, 3, 10, 3, 11, oss.str().c_str(), styles["headerSmall"]);
|
||||
oss.str("");
|
||||
oss.clear();
|
||||
oss << "Discretized: " << (data["discretized"].get<bool>() ? "True" : "False");
|
||||
worksheet_write_string(worksheet, 3, 12, oss.str().c_str(), styles["headerSmall"]);
|
||||
}
|
||||
|
||||
void ReportExcel::body()
|
||||
{
|
||||
auto head = vector<string>(
|
||||
{ "Dataset", "Samples", "Features", "Classes", "Nodes", "Edges", "States", "Score", "Score Std.", "St.", "Time",
|
||||
"Time Std.", "Hyperparameters" });
|
||||
int col = 0;
|
||||
for (const auto& item : head) {
|
||||
writeString(5, col++, item, "bodyHeader");
|
||||
}
|
||||
row = 6;
|
||||
col = 0;
|
||||
int hypSize = 22;
|
||||
json lastResult;
|
||||
double totalScore = 0.0;
|
||||
string hyperparameters;
|
||||
for (const auto& r : data["results"]) {
|
||||
writeString(row, col, r["dataset"].get<string>(), "text");
|
||||
writeInt(row, col + 1, r["samples"].get<int>(), "ints");
|
||||
writeInt(row, col + 2, r["features"].get<int>(), "ints");
|
||||
writeInt(row, col + 3, r["classes"].get<int>(), "ints");
|
||||
writeDouble(row, col + 4, r["nodes"].get<float>(), "floats");
|
||||
writeDouble(row, col + 5, r["leaves"].get<float>(), "floats");
|
||||
writeDouble(row, col + 6, r["depth"].get<double>(), "floats");
|
||||
writeDouble(row, col + 7, r["score"].get<double>(), "result");
|
||||
writeDouble(row, col + 8, r["score_std"].get<double>(), "result");
|
||||
const string status = compareResult(r["dataset"].get<string>(), r["score"].get<double>());
|
||||
writeString(row, col + 9, status, "textCentered");
|
||||
writeDouble(row, col + 10, r["time"].get<double>(), "time");
|
||||
writeDouble(row, col + 11, r["time_std"].get<double>(), "time");
|
||||
try {
|
||||
hyperparameters = r["hyperparameters"].get<string>();
|
||||
}
|
||||
catch (const exception& err) {
|
||||
stringstream oss;
|
||||
oss << r["hyperparameters"];
|
||||
hyperparameters = oss.str();
|
||||
}
|
||||
if (hyperparameters.size() > hypSize) {
|
||||
hypSize = hyperparameters.size();
|
||||
}
|
||||
writeString(row, col + 12, hyperparameters, "text");
|
||||
lastResult = r;
|
||||
totalScore += r["score"].get<double>();
|
||||
row++;
|
||||
|
||||
}
|
||||
// Set the right column width of hyperparameters with the maximum length
|
||||
worksheet_set_column(worksheet, 12, 12, hypSize + 5, NULL);
|
||||
// Show totals if only one dataset is present in the result
|
||||
if (data["results"].size() == 1) {
|
||||
for (const string& group : { "scores_train", "scores_test", "times_train", "times_test" }) {
|
||||
row++;
|
||||
col = 1;
|
||||
writeString(row, col, group, "text");
|
||||
for (double item : lastResult[group]) {
|
||||
string style = group.find("scores") != string::npos ? "result" : "time";
|
||||
writeDouble(row, ++col, item, style);
|
||||
}
|
||||
}
|
||||
// Set with of columns to show those totals completely
|
||||
worksheet_set_column(worksheet, 1, 1, 12, NULL);
|
||||
for (int i = 2; i < 7; ++i) {
|
||||
// doesn't work with from col to col, so...
|
||||
worksheet_set_column(worksheet, i, i, 15, NULL);
|
||||
}
|
||||
} else {
|
||||
footer(totalScore, row);
|
||||
}
|
||||
}
|
||||
|
||||
void ReportExcel::showSummary()
|
||||
{
|
||||
for (const auto& item : summary) {
|
||||
worksheet_write_string(worksheet, row + 2, 1, item.first.c_str(), styles["summaryStyle"]);
|
||||
worksheet_write_number(worksheet, row + 2, 2, item.second, styles["summaryStyle"]);
|
||||
worksheet_merge_range(worksheet, row + 2, 3, row + 2, 5, meaning.at(item.first).c_str(), styles["summaryStyle"]);
|
||||
row += 1;
|
||||
}
|
||||
}
|
||||
|
||||
void ReportExcel::footer(double totalScore, int row)
|
||||
{
|
||||
showSummary();
|
||||
row += 4 + summary.size();
|
||||
auto score = data["score_name"].get<string>();
|
||||
if (score == BestScore::scoreName()) {
|
||||
worksheet_merge_range(worksheet, row, 1, row, 5, (score + " compared to " + BestScore::title() + " .:").c_str(), efectiveStyle("text"));
|
||||
writeDouble(row, 6, totalScore / BestScore::score(), "result");
|
||||
}
|
||||
if (!getExistBestFile() && compare) {
|
||||
worksheet_write_string(worksheet, row + 1, 0, "*** Best Results File not found. Couldn't compare any result!", styles["summaryStyle"]);
|
||||
}
|
||||
}
|
||||
}
|
42
src/Platform/ReportExcel.h
Normal file
42
src/Platform/ReportExcel.h
Normal file
@@ -0,0 +1,42 @@
|
||||
#ifndef REPORTEXCEL_H
|
||||
#define REPORTEXCEL_H
|
||||
#include<map>
|
||||
#include "xlsxwriter.h"
|
||||
#include "ReportBase.h"
|
||||
#include "Colors.h"
|
||||
namespace platform {
|
||||
using namespace std;
|
||||
const int MAXLL = 128;
|
||||
|
||||
class ReportExcel : public ReportBase {
|
||||
public:
|
||||
explicit ReportExcel(json data_, bool compare, lxw_workbook* workbook);
|
||||
lxw_workbook* getWorkbook();
|
||||
private:
|
||||
void writeString(int row, int col, const string& text, const string& style = "");
|
||||
void writeInt(int row, int col, const int number, const string& style = "");
|
||||
void writeDouble(int row, int col, const double number, const string& style = "");
|
||||
void formatColumns();
|
||||
void createFormats();
|
||||
void setProperties();
|
||||
void createFile();
|
||||
void closeFile();
|
||||
lxw_workbook* workbook;
|
||||
lxw_worksheet* worksheet;
|
||||
map<string, lxw_format*> styles;
|
||||
int row;
|
||||
int normalSize; //font size for report body
|
||||
uint32_t colorTitle;
|
||||
uint32_t colorOdd;
|
||||
uint32_t colorEven;
|
||||
const string fileName = "some_results.xlsx";
|
||||
void header() override;
|
||||
void body() override;
|
||||
void showSummary() override;
|
||||
void footer(double totalScore, int row);
|
||||
void createStyle(const string& name, lxw_format* style, bool odd);
|
||||
void addColor(lxw_format* style, bool odd);
|
||||
lxw_format* efectiveStyle(const string& name);
|
||||
};
|
||||
};
|
||||
#endif // !REPORTEXCEL_H
|
51
src/Platform/Result.cc
Normal file
51
src/Platform/Result.cc
Normal file
@@ -0,0 +1,51 @@
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include "Result.h"
|
||||
#include "Colors.h"
|
||||
#include "BestScore.h"
|
||||
namespace platform {
|
||||
Result::Result(const string& path, const string& filename)
|
||||
: path(path)
|
||||
, filename(filename)
|
||||
{
|
||||
auto data = load();
|
||||
date = data["date"];
|
||||
score = 0;
|
||||
for (const auto& result : data["results"]) {
|
||||
score += result["score"].get<double>();
|
||||
}
|
||||
scoreName = data["score_name"];
|
||||
if (scoreName == BestScore::scoreName()) {
|
||||
score /= BestScore::score();
|
||||
}
|
||||
title = data["title"];
|
||||
duration = data["duration"];
|
||||
model = data["model"];
|
||||
complete = data["results"].size() > 1;
|
||||
}
|
||||
|
||||
json Result::load() const
|
||||
{
|
||||
ifstream resultData(path + "/" + filename);
|
||||
if (resultData.is_open()) {
|
||||
json data = json::parse(resultData);
|
||||
return data;
|
||||
}
|
||||
throw invalid_argument("Unable to open result file. [" + path + "/" + filename + "]");
|
||||
}
|
||||
|
||||
string Result::to_string() const
|
||||
{
|
||||
stringstream oss;
|
||||
oss << date << " ";
|
||||
oss << setw(12) << left << model << " ";
|
||||
oss << setw(11) << left << scoreName << " ";
|
||||
oss << right << setw(11) << setprecision(7) << fixed << score << " ";
|
||||
auto completeString = isComplete() ? "C" : "P";
|
||||
oss << setw(1) << " " << completeString << " ";
|
||||
oss << setw(9) << setprecision(3) << fixed << duration << " ";
|
||||
oss << setw(50) << left << title << " ";
|
||||
return oss.str();
|
||||
}
|
||||
}
|
37
src/Platform/Result.h
Normal file
37
src/Platform/Result.h
Normal file
@@ -0,0 +1,37 @@
|
||||
#ifndef RESULT_H
|
||||
#define RESULT_H
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <nlohmann/json.hpp>
|
||||
namespace platform {
|
||||
using namespace std;
|
||||
using json = nlohmann::json;
|
||||
|
||||
class Result {
|
||||
public:
|
||||
Result(const string& path, const string& filename);
|
||||
json load() const;
|
||||
string to_string() const;
|
||||
string getFilename() const { return filename; };
|
||||
string getDate() const { return date; };
|
||||
double getScore() const { return score; };
|
||||
string getTitle() const { return title; };
|
||||
double getDuration() const { return duration; };
|
||||
string getModel() const { return model; };
|
||||
string getScoreName() const { return scoreName; };
|
||||
bool isComplete() const { return complete; };
|
||||
private:
|
||||
string path;
|
||||
string filename;
|
||||
string date;
|
||||
double score;
|
||||
string title;
|
||||
double duration;
|
||||
string model;
|
||||
string scoreName;
|
||||
bool complete;
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
268
src/Platform/Results.cc
Normal file
268
src/Platform/Results.cc
Normal file
@@ -0,0 +1,268 @@
|
||||
#include <filesystem>
|
||||
#include "platformUtils.h"
|
||||
#include "Results.h"
|
||||
#include "ReportConsole.h"
|
||||
#include "ReportExcel.h"
|
||||
#include "BestScore.h"
|
||||
#include "Colors.h"
|
||||
namespace platform {
|
||||
void Results::load()
|
||||
{
|
||||
using std::filesystem::directory_iterator;
|
||||
for (const auto& file : directory_iterator(path)) {
|
||||
auto filename = file.path().filename().string();
|
||||
if (filename.find(".json") != string::npos && filename.find("results_") == 0) {
|
||||
auto result = Result(path, filename);
|
||||
bool addResult = true;
|
||||
if (model != "any" && result.getModel() != model || scoreName != "any" && scoreName != result.getScoreName() || complete && !result.isComplete() || partial && result.isComplete())
|
||||
addResult = false;
|
||||
if (addResult)
|
||||
files.push_back(result);
|
||||
}
|
||||
}
|
||||
if (max == 0) {
|
||||
max = files.size();
|
||||
}
|
||||
}
|
||||
void Results::show() const
|
||||
{
|
||||
cout << Colors::GREEN() << "Results found: " << files.size() << endl;
|
||||
cout << "-------------------" << endl;
|
||||
if (complete) {
|
||||
cout << Colors::MAGENTA() << "Only listing complete results" << endl;
|
||||
}
|
||||
if (partial) {
|
||||
cout << Colors::MAGENTA() << "Only listing partial results" << endl;
|
||||
}
|
||||
auto i = 0;
|
||||
cout << Colors::GREEN() << " # Date Model Score Name Score C/P Duration Title" << endl;
|
||||
cout << "=== ========== ============ =========== =========== === ========= =============================================================" << endl;
|
||||
bool odd = true;
|
||||
for (const auto& result : files) {
|
||||
auto color = odd ? Colors::BLUE() : Colors::CYAN();
|
||||
cout << color << setw(3) << fixed << right << i++ << " ";
|
||||
cout << result.to_string() << endl;
|
||||
if (i == max && max != 0) {
|
||||
break;
|
||||
}
|
||||
odd = !odd;
|
||||
}
|
||||
}
|
||||
int Results::getIndex(const string& intent) const
|
||||
{
|
||||
string color;
|
||||
if (intent == "delete") {
|
||||
color = Colors::RED();
|
||||
} else {
|
||||
color = Colors::YELLOW();
|
||||
}
|
||||
cout << color << "Choose result to " << intent << " (cancel=-1): ";
|
||||
string line;
|
||||
getline(cin, line);
|
||||
int index = stoi(line);
|
||||
if (index >= -1 && index < static_cast<int>(files.size())) {
|
||||
return index;
|
||||
}
|
||||
cout << "Invalid index" << endl;
|
||||
return -1;
|
||||
}
|
||||
void Results::report(const int index, const bool excelReport)
|
||||
{
|
||||
cout << Colors::YELLOW() << "Reporting " << files.at(index).getFilename() << endl;
|
||||
auto data = files.at(index).load();
|
||||
if (excelReport) {
|
||||
ReportExcel reporter(data, compare, workbook);
|
||||
reporter.show();
|
||||
openExcel = true;
|
||||
workbook = reporter.getWorkbook();
|
||||
} else {
|
||||
ReportConsole reporter(data, compare);
|
||||
reporter.show();
|
||||
}
|
||||
}
|
||||
void Results::showIndex(const int index, const int idx) const
|
||||
{
|
||||
auto data = files.at(index).load();
|
||||
if (idx < 0 or idx >= static_cast<int>(data["results"].size())) {
|
||||
cout << "Invalid index" << endl;
|
||||
return;
|
||||
}
|
||||
cout << Colors::YELLOW() << "Showing " << files.at(index).getFilename() << endl;
|
||||
ReportConsole reporter(data, compare, idx);
|
||||
reporter.show();
|
||||
}
|
||||
void Results::menu()
|
||||
{
|
||||
char option;
|
||||
int index;
|
||||
bool finished = false;
|
||||
string color, context;
|
||||
string filename, line, options = "qldhsre";
|
||||
while (!finished) {
|
||||
if (indexList) {
|
||||
color = Colors::GREEN();
|
||||
context = " (quit='q', list='l', delete='d', hide='h', sort='s', report='r', excel='e'): ";
|
||||
options = "qldhsre";
|
||||
} else {
|
||||
color = Colors::MAGENTA();
|
||||
context = " (quit='q', list='l'): ";
|
||||
options = "ql";
|
||||
}
|
||||
cout << Colors::RESET() << color;
|
||||
|
||||
cout << "Choose option " << context;
|
||||
getline(cin, line);
|
||||
if (line.size() == 0)
|
||||
continue;
|
||||
if (options.find(line[0]) != string::npos) {
|
||||
if (line.size() > 1) {
|
||||
cout << "Invalid option" << endl;
|
||||
continue;
|
||||
}
|
||||
option = line[0];
|
||||
} else {
|
||||
if (all_of(line.begin(), line.end(), ::isdigit)) {
|
||||
int idx = stoi(line);
|
||||
if (indexList) {
|
||||
// The value is about the files list
|
||||
index = idx;
|
||||
if (index >= 0 && index < max) {
|
||||
report(index, false);
|
||||
indexList = false;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
// The value is about the result showed on screen
|
||||
showIndex(index, idx);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
cout << "Invalid option" << endl;
|
||||
continue;
|
||||
}
|
||||
switch (option) {
|
||||
case 'q':
|
||||
finished = true;
|
||||
break;
|
||||
case 'l':
|
||||
show();
|
||||
indexList = true;
|
||||
break;
|
||||
case 'd':
|
||||
index = getIndex("delete");
|
||||
if (index == -1)
|
||||
break;
|
||||
filename = files[index].getFilename();
|
||||
cout << "Deleting " << filename << endl;
|
||||
remove((path + "/" + filename).c_str());
|
||||
files.erase(files.begin() + index);
|
||||
cout << "File: " + filename + " deleted!" << endl;
|
||||
show();
|
||||
indexList = true;
|
||||
break;
|
||||
case 'h':
|
||||
index = getIndex("hide");
|
||||
if (index == -1)
|
||||
break;
|
||||
filename = files[index].getFilename();
|
||||
cout << "Hiding " << filename << endl;
|
||||
rename((path + "/" + filename).c_str(), (path + "/." + filename).c_str());
|
||||
files.erase(files.begin() + index);
|
||||
show();
|
||||
menu();
|
||||
indexList = true;
|
||||
break;
|
||||
case 's':
|
||||
sortList();
|
||||
indexList = true;
|
||||
show();
|
||||
break;
|
||||
case 'r':
|
||||
index = getIndex("report");
|
||||
if (index == -1)
|
||||
break;
|
||||
indexList = false;
|
||||
report(index, false);
|
||||
break;
|
||||
case 'e':
|
||||
index = getIndex("excel");
|
||||
if (index == -1)
|
||||
break;
|
||||
indexList = true;
|
||||
report(index, true);
|
||||
break;
|
||||
default:
|
||||
cout << "Invalid option" << endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
void Results::sortList()
|
||||
{
|
||||
cout << Colors::YELLOW() << "Choose sorting field (date='d', score='s', duration='u', model='m'): ";
|
||||
string line;
|
||||
char option;
|
||||
getline(cin, line);
|
||||
if (line.size() == 0)
|
||||
return;
|
||||
if (line.size() > 1) {
|
||||
cout << "Invalid option" << endl;
|
||||
return;
|
||||
}
|
||||
option = line[0];
|
||||
switch (option) {
|
||||
case 'd':
|
||||
sortDate();
|
||||
break;
|
||||
case 's':
|
||||
sortScore();
|
||||
break;
|
||||
case 'u':
|
||||
sortDuration();
|
||||
break;
|
||||
case 'm':
|
||||
sortModel();
|
||||
break;
|
||||
default:
|
||||
cout << "Invalid option" << endl;
|
||||
}
|
||||
}
|
||||
void Results::sortDate()
|
||||
{
|
||||
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
|
||||
return a.getDate() > b.getDate();
|
||||
});
|
||||
}
|
||||
void Results::sortModel()
|
||||
{
|
||||
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
|
||||
return a.getModel() > b.getModel();
|
||||
});
|
||||
}
|
||||
void Results::sortDuration()
|
||||
{
|
||||
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
|
||||
return a.getDuration() > b.getDuration();
|
||||
});
|
||||
}
|
||||
void Results::sortScore()
|
||||
{
|
||||
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
|
||||
return a.getScore() > b.getScore();
|
||||
});
|
||||
}
|
||||
void Results::manage()
|
||||
{
|
||||
if (files.size() == 0) {
|
||||
cout << "No results found!" << endl;
|
||||
exit(0);
|
||||
}
|
||||
sortDate();
|
||||
show();
|
||||
menu();
|
||||
if (openExcel) {
|
||||
workbook_close(workbook);
|
||||
}
|
||||
cout << Colors::RESET() << "Done!" << endl;
|
||||
}
|
||||
|
||||
}
|
47
src/Platform/Results.h
Normal file
47
src/Platform/Results.h
Normal file
@@ -0,0 +1,47 @@
|
||||
#ifndef RESULTS_H
|
||||
#define RESULTS_H
|
||||
#include "xlsxwriter.h"
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "Result.h"
|
||||
namespace platform {
|
||||
using namespace std;
|
||||
using json = nlohmann::json;
|
||||
|
||||
class Results {
|
||||
public:
|
||||
Results(const string& path, const int max, const string& model, const string& score, bool complete, bool partial, bool compare) :
|
||||
path(path), max(max), model(model), scoreName(score), complete(complete), partial(partial), compare(compare)
|
||||
{
|
||||
load();
|
||||
};
|
||||
void manage();
|
||||
private:
|
||||
string path;
|
||||
int max;
|
||||
string model;
|
||||
string scoreName;
|
||||
bool complete;
|
||||
bool partial;
|
||||
bool indexList = true;
|
||||
bool openExcel = false;
|
||||
bool compare;
|
||||
lxw_workbook* workbook = NULL;
|
||||
vector<Result> files;
|
||||
void load(); // Loads the list of results
|
||||
void show() const;
|
||||
void report(const int index, const bool excelReport);
|
||||
void showIndex(const int index, const int idx) const;
|
||||
int getIndex(const string& intent) const;
|
||||
void menu();
|
||||
void sortList();
|
||||
void sortDate();
|
||||
void sortScore();
|
||||
void sortModel();
|
||||
void sortDuration();
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
63
src/Platform/best.cc
Normal file
63
src/Platform/best.cc
Normal file
@@ -0,0 +1,63 @@
|
||||
#include <iostream>
|
||||
#include <argparse/argparse.hpp>
|
||||
#include "Paths.h"
|
||||
#include "BestResults.h"
|
||||
#include "Colors.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
argparse::ArgumentParser manageArguments(int argc, char** argv)
|
||||
{
|
||||
argparse::ArgumentParser program("best");
|
||||
program.add_argument("-m", "--model").default_value("").help("Filter results of the selected model) (any for all models)");
|
||||
program.add_argument("-s", "--score").default_value("").help("Filter results of the score name supplied");
|
||||
program.add_argument("--build").help("build best score results file").default_value(false).implicit_value(true);
|
||||
program.add_argument("--report").help("report of best score results file").default_value(false).implicit_value(true);
|
||||
try {
|
||||
program.parse_args(argc, argv);
|
||||
auto model = program.get<string>("model");
|
||||
auto score = program.get<string>("score");
|
||||
auto build = program.get<bool>("build");
|
||||
auto report = program.get<bool>("report");
|
||||
if (model == "" || score == "") {
|
||||
throw runtime_error("Model and score name must be supplied");
|
||||
}
|
||||
}
|
||||
catch (const exception& err) {
|
||||
cerr << err.what() << endl;
|
||||
cerr << program;
|
||||
exit(1);
|
||||
}
|
||||
return program;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
auto program = manageArguments(argc, argv);
|
||||
auto model = program.get<string>("model");
|
||||
auto score = program.get<string>("score");
|
||||
auto build = program.get<bool>("build");
|
||||
auto report = program.get<bool>("report");
|
||||
if (!report && !build) {
|
||||
cerr << "Either build, report or both, have to be selected to do anything!" << endl;
|
||||
cerr << program;
|
||||
exit(1);
|
||||
}
|
||||
auto results = platform::BestResults(platform::Paths::results(), score, model);
|
||||
if (build) {
|
||||
if (model == "any") {
|
||||
results.buildAll();
|
||||
} else {
|
||||
string fileName = results.build();
|
||||
cout << Colors::GREEN() << fileName << " created!" << Colors::RESET() << endl;
|
||||
}
|
||||
}
|
||||
if (report) {
|
||||
if (model == "any") {
|
||||
results.reportAll();
|
||||
} else {
|
||||
results.reportSingle();
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
57
src/Platform/list.cc
Normal file
57
src/Platform/list.cc
Normal file
@@ -0,0 +1,57 @@
|
||||
#include <iostream>
|
||||
#include <locale>
|
||||
#include "Paths.h"
|
||||
#include "Colors.h"
|
||||
#include "Datasets.h"
|
||||
|
||||
using namespace std;
|
||||
const int BALANCE_LENGTH = 75;
|
||||
|
||||
struct separated : numpunct<char> {
|
||||
char do_decimal_point() const { return ','; }
|
||||
char do_thousands_sep() const { return '.'; }
|
||||
string do_grouping() const { return "\03"; }
|
||||
};
|
||||
|
||||
void outputBalance(const string& balance)
|
||||
{
|
||||
auto temp = string(balance);
|
||||
while (temp.size() > BALANCE_LENGTH - 1) {
|
||||
auto part = temp.substr(0, BALANCE_LENGTH);
|
||||
cout << part << endl;
|
||||
cout << setw(48) << " ";
|
||||
temp = temp.substr(BALANCE_LENGTH);
|
||||
}
|
||||
cout << temp << endl;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
auto data = platform::Datasets(platform::Paths().datasets(), false);
|
||||
locale mylocale(cout.getloc(), new separated);
|
||||
locale::global(mylocale);
|
||||
cout.imbue(mylocale);
|
||||
cout << Colors::GREEN() << "Dataset Sampl. Feat. Cls. Balance" << endl;
|
||||
string balanceBars = string(BALANCE_LENGTH, '=');
|
||||
cout << "============================== ====== ===== === " << balanceBars << endl;
|
||||
bool odd = true;
|
||||
for (const auto& dataset : data.getNames()) {
|
||||
auto color = odd ? Colors::CYAN() : Colors::BLUE();
|
||||
cout << color << setw(30) << left << dataset << " ";
|
||||
data.loadDataset(dataset);
|
||||
auto nSamples = data.getNSamples(dataset);
|
||||
cout << setw(6) << right << nSamples << " ";
|
||||
cout << setw(5) << right << data.getFeatures(dataset).size() << " ";
|
||||
cout << setw(3) << right << data.getNClasses(dataset) << " ";
|
||||
stringstream oss;
|
||||
string sep = "";
|
||||
for (auto number : data.getClassesCounts(dataset)) {
|
||||
oss << sep << setprecision(2) << fixed << (float)number / nSamples * 100.0 << "% (" << number << ")";
|
||||
sep = " / ";
|
||||
}
|
||||
outputBalance(oss.str());
|
||||
odd = !odd;
|
||||
}
|
||||
cout << Colors::RESET() << endl;
|
||||
return 0;
|
||||
}
|
@@ -1,25 +1,27 @@
|
||||
#include <iostream>
|
||||
#include <argparse/argparse.hpp>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "platformUtils.h"
|
||||
#include "Experiment.h"
|
||||
#include "Datasets.h"
|
||||
#include "DotEnv.h"
|
||||
#include "Models.h"
|
||||
#include "modelRegister.h"
|
||||
#include "Paths.h"
|
||||
|
||||
|
||||
using namespace std;
|
||||
const string PATH_RESULTS = "results";
|
||||
const string PATH_DATASETS = "datasets";
|
||||
using json = nlohmann::json;
|
||||
|
||||
argparse::ArgumentParser manageArguments(int argc, char** argv)
|
||||
{
|
||||
auto env = platform::DotEnv();
|
||||
argparse::ArgumentParser program("BayesNetSample");
|
||||
argparse::ArgumentParser program("main");
|
||||
program.add_argument("-d", "--dataset").default_value("").help("Dataset file name");
|
||||
program.add_argument("--hyperparameters").default_value("{}").help("Hyperparamters passed to the model in Experiment");
|
||||
program.add_argument("-p", "--path")
|
||||
.help("folder where the data files are located, default")
|
||||
.default_value(string{ PATH_DATASETS }
|
||||
);
|
||||
.default_value(string{ platform::Paths::datasets() });
|
||||
program.add_argument("-m", "--model")
|
||||
.help("Model to use " + platform::Models::instance()->toString())
|
||||
.action([](const std::string& value) {
|
||||
@@ -32,6 +34,7 @@ argparse::ArgumentParser manageArguments(int argc, char** argv)
|
||||
);
|
||||
program.add_argument("--title").default_value("").help("Experiment title");
|
||||
program.add_argument("--discretize").help("Discretize input dataset").default_value((bool)stoi(env.get("discretize"))).implicit_value(true);
|
||||
program.add_argument("--save").help("Save result (always save if no dataset is supplied)").default_value(false).implicit_value(true);
|
||||
program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value((bool)stoi(env.get("stratified"))).implicit_value(true);
|
||||
program.add_argument("-f", "--folds").help("Number of folds").default_value(stoi(env.get("n_folds"))).scan<'i', int>().action([](const string& value) {
|
||||
try {
|
||||
@@ -60,6 +63,8 @@ argparse::ArgumentParser manageArguments(int argc, char** argv)
|
||||
auto seeds = program.get<vector<int>>("seeds");
|
||||
auto complete_file_name = path + file_name + ".arff";
|
||||
auto title = program.get<string>("title");
|
||||
auto hyperparameters = program.get<string>("hyperparameters");
|
||||
auto saveResults = program.get<bool>("save");
|
||||
if (title == "" && file_name == "") {
|
||||
throw runtime_error("title is mandatory if dataset is not provided");
|
||||
}
|
||||
@@ -75,7 +80,6 @@ argparse::ArgumentParser manageArguments(int argc, char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
auto program = manageArguments(argc, argv);
|
||||
bool saveResults = false;
|
||||
auto file_name = program.get<string>("dataset");
|
||||
auto path = program.get<string>("path");
|
||||
auto model_name = program.get<string>("model");
|
||||
@@ -83,9 +87,11 @@ int main(int argc, char** argv)
|
||||
auto stratified = program.get<bool>("stratified");
|
||||
auto n_folds = program.get<int>("folds");
|
||||
auto seeds = program.get<vector<int>>("seeds");
|
||||
auto hyperparameters = program.get<string>("hyperparameters");
|
||||
vector<string> filesToTest;
|
||||
auto datasets = platform::Datasets(path, true, platform::ARFF);
|
||||
auto title = program.get<string>("title");
|
||||
auto saveResults = program.get<bool>("save");
|
||||
if (file_name != "") {
|
||||
if (!datasets.isDataset(file_name)) {
|
||||
cerr << "Dataset " << file_name << " not found" << endl;
|
||||
@@ -96,7 +102,7 @@ int main(int argc, char** argv)
|
||||
}
|
||||
filesToTest.push_back(file_name);
|
||||
} else {
|
||||
filesToTest = platform::Datasets(path, true, platform::ARFF).getNames();
|
||||
filesToTest = datasets.getNames();
|
||||
saveResults = true;
|
||||
}
|
||||
/*
|
||||
@@ -104,9 +110,10 @@ int main(int argc, char** argv)
|
||||
*/
|
||||
auto env = platform::DotEnv();
|
||||
auto experiment = platform::Experiment();
|
||||
experiment.setTitle(title).setLanguage("cpp").setLanguageVersion("1.0.0");
|
||||
experiment.setTitle(title).setLanguage("cpp").setLanguageVersion("14.0.3");
|
||||
experiment.setDiscretized(discretize_dataset).setModel(model_name).setPlatform(env.get("platform"));
|
||||
experiment.setStratified(stratified).setNFolds(n_folds).setScoreName("accuracy");
|
||||
experiment.setHyperparameters(json::parse(hyperparameters));
|
||||
for (auto seed : seeds) {
|
||||
experiment.addRandomSeed(seed);
|
||||
}
|
||||
@@ -114,10 +121,10 @@ int main(int argc, char** argv)
|
||||
timer.start();
|
||||
experiment.go(filesToTest, path);
|
||||
experiment.setDuration(timer.getDuration());
|
||||
if (saveResults)
|
||||
experiment.save(PATH_RESULTS);
|
||||
else
|
||||
experiment.report();
|
||||
if (saveResults) {
|
||||
experiment.save(platform::Paths::results());
|
||||
}
|
||||
experiment.report();
|
||||
cout << "Done!" << endl;
|
||||
return 0;
|
||||
}
|
||||
|
52
src/Platform/manage.cc
Normal file
52
src/Platform/manage.cc
Normal file
@@ -0,0 +1,52 @@
|
||||
#include <iostream>
|
||||
#include <argparse/argparse.hpp>
|
||||
#include "platformUtils.h"
|
||||
#include "Paths.h"
|
||||
#include "Results.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
argparse::ArgumentParser manageArguments(int argc, char** argv)
|
||||
{
|
||||
argparse::ArgumentParser program("manage");
|
||||
program.add_argument("-n", "--number").default_value(0).help("Number of results to show (0 = all)").scan<'i', int>();
|
||||
program.add_argument("-m", "--model").default_value("any").help("Filter results of the selected model)");
|
||||
program.add_argument("-s", "--score").default_value("any").help("Filter results of the score name supplied");
|
||||
program.add_argument("--complete").help("Show only results with all datasets").default_value(false).implicit_value(true);
|
||||
program.add_argument("--partial").help("Show only partial results").default_value(false).implicit_value(true);
|
||||
program.add_argument("--compare").help("Compare with best results").default_value(false).implicit_value(true);
|
||||
try {
|
||||
program.parse_args(argc, argv);
|
||||
auto number = program.get<int>("number");
|
||||
if (number < 0) {
|
||||
throw runtime_error("Number of results must be greater than or equal to 0");
|
||||
}
|
||||
auto model = program.get<string>("model");
|
||||
auto score = program.get<string>("score");
|
||||
auto complete = program.get<bool>("complete");
|
||||
auto partial = program.get<bool>("partial");
|
||||
auto compare = program.get<bool>("compare");
|
||||
}
|
||||
catch (const exception& err) {
|
||||
cerr << err.what() << endl;
|
||||
cerr << program;
|
||||
exit(1);
|
||||
}
|
||||
return program;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
auto program = manageArguments(argc, argv);
|
||||
auto number = program.get<int>("number");
|
||||
auto model = program.get<string>("model");
|
||||
auto score = program.get<string>("score");
|
||||
auto complete = program.get<bool>("complete");
|
||||
auto partial = program.get<bool>("partial");
|
||||
auto compare = program.get<bool>("compare");
|
||||
if (complete)
|
||||
partial = false;
|
||||
auto results = platform::Results(platform::Paths::results(), number, model, score, complete, partial, compare);
|
||||
results.manage();
|
||||
return 0;
|
||||
}
|
@@ -16,4 +16,6 @@ static platform::Registrar registrarA("AODE",
|
||||
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODE();});
|
||||
static platform::Registrar registrarALD("AODELd",
|
||||
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODELd();});
|
||||
static platform::Registrar registrarBA("BoostAODE",
|
||||
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostAODE();});
|
||||
#endif
|
@@ -1,4 +1,5 @@
|
||||
#include "platformUtils.h"
|
||||
#include "Paths.h"
|
||||
|
||||
using namespace torch;
|
||||
|
||||
@@ -68,11 +69,12 @@ tuple<Tensor, Tensor, vector<string>, string, map<string, vector<int>>> loadData
|
||||
Xd = torch::zeros({ static_cast<int>(Xr[0].size()), static_cast<int>(Xr.size()) }, torch::kInt32);
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
states[features[i]] = vector<int>(*max_element(Xr[i].begin(), Xr[i].end()) + 1);
|
||||
iota(begin(states[features[i]]), end(states[features[i]]), 0);
|
||||
auto item = states.at(features[i]);
|
||||
iota(begin(item), end(item), 0);
|
||||
Xd.index_put_({ "...", i }, torch::tensor(Xr[i], torch::kInt32));
|
||||
}
|
||||
states[className] = vector<int>(*max_element(y.begin(), y.end()) + 1);
|
||||
iota(begin(states[className]), end(states[className]), 0);
|
||||
iota(begin(states.at(className)), end(states.at(className)), 0);
|
||||
} else {
|
||||
Xd = torch::zeros({ static_cast<int>(X[0].size()), static_cast<int>(X.size()) }, torch::kFloat32);
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
@@ -85,7 +87,7 @@ tuple<Tensor, Tensor, vector<string>, string, map<string, vector<int>>> loadData
|
||||
tuple<vector<vector<int>>, vector<int>, vector<string>, string, map<string, vector<int>>> loadFile(const string& name)
|
||||
{
|
||||
auto handler = ArffFiles();
|
||||
handler.load(PATH + static_cast<string>(name) + ".arff");
|
||||
handler.load(platform::Paths::datasets() + static_cast<string>(name) + ".arff");
|
||||
// Get Dataset X, y
|
||||
vector<mdlp::samples_t>& X = handler.getX();
|
||||
mdlp::labels_t& y = handler.getY();
|
||||
|
@@ -8,7 +8,6 @@
|
||||
#include "ArffFiles.h"
|
||||
#include "CPPFImdlp.h"
|
||||
using namespace std;
|
||||
const string PATH = "../../data/";
|
||||
|
||||
bool file_exists(const std::string& name);
|
||||
vector<string> split(const string& text, char delimiter);
|
||||
|
@@ -4,6 +4,7 @@ if(ENABLE_TESTING)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
|
||||
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
|
||||
set(TEST_SOURCES BayesModels.cc BayesNetwork.cc ${BayesNet_SOURCE_DIR}/src/Platform/platformUtils.cc ${BayesNet_SOURCES})
|
||||
add_executable(${TEST_MAIN} ${TEST_SOURCES})
|
||||
target_link_libraries(${TEST_MAIN} PUBLIC "${TORCH_LIBRARIES}" ArffFiles mdlp Catch2::Catch2WithMain)
|
||||
|
Reference in New Issue
Block a user