Compare commits

..

3 Commits

Author SHA1 Message Date
d8764db716 Fix linter warnings 2023-07-29 01:16:19 +02:00
8049df436c Add Models class 2023-07-28 12:11:52 +02:00
b420ad2bc2 Add dotenv and possible multiple seeds 2023-07-28 00:53:16 +02:00
156 changed files with 1850 additions and 9450 deletions

View File

@@ -13,4 +13,5 @@ HeaderFilterRegex: 'src/*'
AnalyzeTemporaryDtors: false AnalyzeTemporaryDtors: false
WarningsAsErrors: '' WarningsAsErrors: ''
FormatStyle: file FormatStyle: file
FormatStyleOptions: ''
... ...

View File

@@ -1,31 +0,0 @@
compilation_database_dir: build
output_directory: puml
diagrams:
BayesNet:
type: class
glob:
- src/BayesNet/*.cc
- src/Platform/*.cc
using_namespace: bayesnet
include:
namespaces:
- bayesnet
- platform
plantuml:
after:
- "note left of {{ alias(\"MyProjectMain\") }}: Main class of myproject library."
sequence:
type: sequence
glob:
- src/Platform/main.cc
combine_free_functions_into_file_participants: true
using_namespace:
- std
- bayesnet
- platform
include:
paths:
- src/BayesNet
- src/Platform
start_from:
- function: main(int,const char **)

5
.gitignore vendored
View File

@@ -31,10 +31,7 @@
*.exe *.exe
*.out *.out
*.app *.app
build/** build/
build_*/**
*.dSYM/** *.dSYM/**
cmake-build*/** cmake-build*/**
.idea .idea
puml/**
.vscode/settings.json

13
.gitmodules vendored
View File

@@ -1,25 +1,12 @@
[submodule "lib/mdlp"] [submodule "lib/mdlp"]
path = lib/mdlp path = lib/mdlp
url = https://github.com/rmontanana/mdlp url = https://github.com/rmontanana/mdlp
main = main
update = merge
[submodule "lib/catch2"] [submodule "lib/catch2"]
path = lib/catch2 path = lib/catch2
main = v2.x
update = merge
url = https://github.com/catchorg/Catch2.git url = https://github.com/catchorg/Catch2.git
[submodule "lib/argparse"] [submodule "lib/argparse"]
path = lib/argparse path = lib/argparse
url = https://github.com/p-ranav/argparse url = https://github.com/p-ranav/argparse
master = master
update = merge
[submodule "lib/json"] [submodule "lib/json"]
path = lib/json path = lib/json
url = https://github.com/nlohmann/json.git url = https://github.com/nlohmann/json.git
master = master
update = merge
[submodule "lib/libxlsxwriter"]
path = lib/libxlsxwriter
url = https://github.com/jmcnamara/libxlsxwriter.git
main = main
update = merge

View File

@@ -1,18 +0,0 @@
{
"configurations": [
{
"name": "Mac",
"includePath": [
"${workspaceFolder}/**"
],
"defines": [],
"macFrameworkPath": [
"/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks"
],
"cStandard": "c17",
"cppStandard": "c++17",
"compileCommands": "${workspaceFolder}/cmake-build-release/compile_commands.json"
}
],
"version": 4
}

109
.vscode/launch.json vendored
View File

@@ -5,119 +5,44 @@
"type": "lldb", "type": "lldb",
"request": "launch", "request": "launch",
"name": "sample", "name": "sample",
"program": "${workspaceFolder}/build_debug/sample/BayesNetSample", "program": "${workspaceFolder}/build/sample/BayesNetSample",
"args": [ "args": [
"-d", "-d",
"iris", "iris",
"-m", "-m",
"TANLd", "TAN",
"-s",
"271",
"-p", "-p",
"/Users/rmontanana/Code/discretizbench/datasets/", "../../data/",
"--tensors"
], ],
//"cwd": "${workspaceFolder}/build/sample/", "cwd": "${workspaceFolder}/build/sample/",
}, },
{ {
"type": "lldb", "type": "lldb",
"request": "launch", "request": "launch",
"name": "experimentPy", "name": "experiment",
"program": "${workspaceFolder}/build_debug/src/Platform/b_main", "program": "${workspaceFolder}/build/src/Platform/main",
"args": [
"-m",
"STree",
"--stratified",
"-d",
"iris",
//"--discretize"
// "--hyperparameters",
// "{\"repeatSparent\": true, \"maxModels\": 12}"
],
"cwd": "${workspaceFolder}/../discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "gridsearch",
"program": "${workspaceFolder}/build_debug/src/Platform/b_grid",
"args": [
"-m",
"KDB",
"--discretize",
"--continue",
"glass",
"--only",
"--compute"
],
"cwd": "${workspaceFolder}/../discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "experimentBayes",
"program": "${workspaceFolder}/build_debug/src/Platform/b_main",
"args": [ "args": [
"-m", "-m",
"TAN", "TAN",
"--stratified", "-p",
"/Users/rmontanana/Code/discretizbench/datasets",
"--discretize", "--discretize",
"--stratified",
"--title",
"Debug test",
"--seeds",
"1",
"-d", "-d",
"iris", "ionosphere"
"--hyperparameters",
"{\"repeatSparent\": true, \"maxModels\": 12}"
], ],
"cwd": "/home/rmontanana/Code/discretizbench", "cwd": "${workspaceFolder}/build/src/Platform",
},
{
"type": "lldb",
"request": "launch",
"name": "best",
"program": "${workspaceFolder}/build_debug/src/Platform/b_best",
"args": [
"-m",
"BoostAODE",
"-s",
"accuracy",
"--build",
],
"cwd": "${workspaceFolder}/../discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "manage",
"program": "${workspaceFolder}/build_debug/src/Platform/b_manage",
"args": [
"-n",
"20"
],
"cwd": "${workspaceFolder}/../discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "list",
"program": "${workspaceFolder}/build_debug/src/Platform/b_list",
"args": [],
//"cwd": "/Users/rmontanana/Code/discretizbench",
"cwd": "${workspaceFolder}/../discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "test",
"program": "${workspaceFolder}/build_debug/tests/unit_tests",
"args": [
"-c=\"Metrics Test\"",
// "-s",
],
"cwd": "${workspaceFolder}/build/tests",
}, },
{ {
"name": "Build & debug active file", "name": "Build & debug active file",
"type": "cppdbg", "type": "cppdbg",
"request": "launch", "request": "launch",
"program": "${workspaceFolder}/build_debug/bayesnet", "program": "${workspaceFolder}/build/bayesnet",
"args": [], "args": [],
"stopAtEntry": false, "stopAtEntry": false,
"cwd": "${workspaceFolder}", "cwd": "${workspaceFolder}",

109
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,109 @@
{
"files.associations": {
"*.rmd": "markdown",
"*.py": "python",
"vector": "cpp",
"__bit_reference": "cpp",
"__bits": "cpp",
"__config": "cpp",
"__debug": "cpp",
"__errc": "cpp",
"__hash_table": "cpp",
"__locale": "cpp",
"__mutex_base": "cpp",
"__node_handle": "cpp",
"__nullptr": "cpp",
"__split_buffer": "cpp",
"__string": "cpp",
"__threading_support": "cpp",
"__tuple": "cpp",
"array": "cpp",
"atomic": "cpp",
"bitset": "cpp",
"cctype": "cpp",
"chrono": "cpp",
"clocale": "cpp",
"cmath": "cpp",
"compare": "cpp",
"complex": "cpp",
"concepts": "cpp",
"cstdarg": "cpp",
"cstddef": "cpp",
"cstdint": "cpp",
"cstdio": "cpp",
"cstdlib": "cpp",
"cstring": "cpp",
"ctime": "cpp",
"cwchar": "cpp",
"cwctype": "cpp",
"exception": "cpp",
"initializer_list": "cpp",
"ios": "cpp",
"iosfwd": "cpp",
"istream": "cpp",
"limits": "cpp",
"locale": "cpp",
"memory": "cpp",
"mutex": "cpp",
"new": "cpp",
"optional": "cpp",
"ostream": "cpp",
"ratio": "cpp",
"sstream": "cpp",
"stdexcept": "cpp",
"streambuf": "cpp",
"string": "cpp",
"string_view": "cpp",
"system_error": "cpp",
"tuple": "cpp",
"type_traits": "cpp",
"typeinfo": "cpp",
"unordered_map": "cpp",
"variant": "cpp",
"algorithm": "cpp",
"iostream": "cpp",
"iomanip": "cpp",
"numeric": "cpp",
"set": "cpp",
"__tree": "cpp",
"deque": "cpp",
"list": "cpp",
"map": "cpp",
"unordered_set": "cpp",
"any": "cpp",
"condition_variable": "cpp",
"forward_list": "cpp",
"fstream": "cpp",
"stack": "cpp",
"thread": "cpp",
"__memory": "cpp",
"filesystem": "cpp",
"*.toml": "toml",
"utility": "cpp",
"__verbose_abort": "cpp",
"bit": "cpp",
"random": "cpp",
"*.tcc": "cpp",
"functional": "cpp",
"iterator": "cpp",
"memory_resource": "cpp",
"format": "cpp",
"valarray": "cpp",
"regex": "cpp",
"span": "cpp",
"cfenv": "cpp",
"cinttypes": "cpp",
"csetjmp": "cpp",
"future": "cpp",
"queue": "cpp",
"typeindex": "cpp",
"shared_mutex": "cpp",
"*.ipp": "cpp",
"cassert": "cpp",
"charconv": "cpp",
"source_location": "cpp",
"ranges": "cpp"
},
"cmake.configureOnOpen": false,
"C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools"
}

23
.vscode/tasks.json vendored
View File

@@ -32,29 +32,6 @@
], ],
"group": "build", "group": "build",
"detail": "Task generated by Debugger." "detail": "Task generated by Debugger."
},
{
"type": "cppbuild",
"label": "C/C++: g++ build active file",
"command": "/usr/bin/g++",
"args": [
"-fdiagnostics-color=always",
"-g",
"${file}",
"-o",
"${fileDirname}/${fileBasenameNoExtension}"
],
"options": {
"cwd": "${fileDirname}"
},
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"detail": "Task generated by Debugger."
} }
] ]
} }

View File

@@ -1,20 +1,16 @@
cmake_minimum_required(VERSION 3.20) cmake_minimum_required(VERSION 3.20)
project(BayesNet project(BayesNet
VERSION 0.2.0 VERSION 0.1.0
DESCRIPTION "Bayesian Network and basic classifiers Library." DESCRIPTION "Bayesian Network and basic classifiers Library."
HOMEPAGE_URL "https://github.com/rmontanana/bayesnet" HOMEPAGE_URL "https://github.com/rmontanana/bayesnet"
LANGUAGES CXX LANGUAGES CXX
) )
if (CODE_COVERAGE AND NOT ENABLE_TESTING)
MESSAGE(FATAL_ERROR "Code coverage requires testing enabled")
endif (CODE_COVERAGE AND NOT ENABLE_TESTING)
find_package(Torch REQUIRED) find_package(Torch REQUIRED)
if (POLICY CMP0135) if (POLICY CMP0135)
cmake_policy(SET CMP0135 NEW) cmake_policy(SET CMP0135 NEW)
endif () endif ()
# Global CMake variables # Global CMake variables
@@ -24,82 +20,57 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
# Options # Options
# ------- # -------
option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF) option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF)
option(ENABLE_TESTING "Unit testing build" OFF) option(ENABLE_TESTING "Unit testing build" ON)
option(CODE_COVERAGE "Collect coverage from test library" OFF) option(CODE_COVERAGE "Collect coverage from test library" ON)
option(MPI_ENABLED "Enable MPI options" ON)
if (MPI_ENABLED) set(CMAKE_BUILD_TYPE "Debug")
find_package(MPI REQUIRED)
message("MPI_CXX_LIBRARIES=${MPI_CXX_LIBRARIES}")
message("MPI_CXX_INCLUDE_DIRS=${MPI_CXX_INCLUDE_DIRS}")
endif (MPI_ENABLED)
# Boost Library
set(Boost_USE_STATIC_LIBS OFF)
set(Boost_USE_MULTITHREADED ON)
set(Boost_USE_STATIC_RUNTIME OFF)
find_package(Boost 1.66.0 REQUIRED COMPONENTS python3 numpy3)
if(Boost_FOUND)
message("Boost_INCLUDE_DIRS=${Boost_INCLUDE_DIRS}")
include_directories(${Boost_INCLUDE_DIRS})
endif()
# Python
find_package(Python3 3.11...3.11.9 COMPONENTS Interpreter Development REQUIRED)
message("Python3_LIBRARIES=${Python3_LIBRARIES}")
# CMakes modules # CMakes modules
# -------------- # --------------
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH}) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH})
include(AddGitSubmodule) include(AddGitSubmodule)
include(StaticAnalyzers) # clang-tidy
if (CODE_COVERAGE) include(CodeCoverage)
enable_testing()
include(CodeCoverage)
MESSAGE("Code coverage enabled")
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -O0 -g")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
endif (CODE_COVERAGE)
if (ENABLE_CLANG_TIDY)
include(StaticAnalyzers) # clang-tidy
endif (ENABLE_CLANG_TIDY)
# External libraries - dependencies of BayesNet # External libraries - dependencies of BayesNet
# --------------------------------------------- # ---------------------------------------------
# include(FetchContent) # include(FetchContent)
add_git_submodule("lib/mdlp") add_git_submodule("lib/mdlp")
add_git_submodule("lib/catch2")
add_git_submodule("lib/argparse") add_git_submodule("lib/argparse")
add_git_submodule("lib/json") add_git_submodule("lib/json")
find_library(XLSXWRITER_LIB NAMES libxlsxwriter.dylib libxlsxwriter.so PATHS ${BayesNet_SOURCE_DIR}/lib/libxlsxwriter/lib)
message("XLSXWRITER_LIB=${XLSXWRITER_LIB}")
# Subdirectories # Subdirectories
# -------------- # --------------
add_subdirectory(config) add_subdirectory(config)
add_subdirectory(lib/Files) add_subdirectory(lib/Files)
add_subdirectory(src/BayesNet) add_subdirectory(src/BayesNet)
add_subdirectory(src/Platform) add_subdirectory(src/Platform)
add_subdirectory(src/PyClassifiers)
add_subdirectory(sample) add_subdirectory(sample)
file(GLOB BayesNet_HEADERS CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.h ${BayesNet_SOURCE_DIR}/BayesNet/*.h) file(GLOB BayesNet_HEADERS CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.h ${BayesNet_SOURCE_DIR}/BayesNet/*.hpp)
file(GLOB BayesNet_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cc ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cpp) file(GLOB BayesNet_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cc ${BayesNet_SOURCE_DIR}/src/BayesNet/*.cpp)
file(GLOB Platform_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/Platform/*.cc ${BayesNet_SOURCE_DIR}/src/Platform/*.cpp) file(GLOB Platform_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/Platform/*.cc ${BayesNet_SOURCE_DIR}/src/Platform/*.cpp)
# Testing # Testing
# ------- # -------
if (ENABLE_TESTING) if (ENABLE_TESTING)
MESSAGE("Testing enabled") MESSAGE("Testing enabled")
add_git_submodule("lib/catch2") enable_testing()
if (CODE_COVERAGE)
#include(CodeCoverage)
MESSAGE("Code coverage enabled")
set(CMAKE_C_FLAGS " ${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage")
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
endif (CODE_COVERAGE)
#find_package(Catch2 3 REQUIRED)
include(CTest) include(CTest)
#include(Catch)
add_subdirectory(tests) add_subdirectory(tests)
endif (ENABLE_TESTING) endif (ENABLE_TESTING)

120
Makefile
View File

@@ -1,26 +1,6 @@
SHELL := /bin/bash SHELL := /bin/bash
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
.PHONY: coverage setup help build test clean debug release .PHONY: coverage setup help build test
f_release = build_release
f_debug = build_debug
app_targets = b_best b_list b_main b_manage b_grid
test_targets = unit_tests_bayesnet unit_tests_platform
n_procs = -j 16
define ClearTests
@for t in $(test_targets); do \
if [ -f $(f_debug)/tests/$$t ]; then \
echo ">>> Cleaning $$t..." ; \
rm -f $(f_debug)/tests/$$t ; \
fi ; \
done
@nfiles="$(find . -name "*.gcda" -print0)" ; \
if test "${nfiles}" != "" ; then \
find . -name "*.gcda" -print0 | xargs -0 rm 2>/dev/null ;\
fi ;
endef
setup: ## Install dependencies for tests and coverage setup: ## Install dependencies for tests and coverage
@if [ "$(shell uname)" = "Darwin" ]; then \ @if [ "$(shell uname)" = "Darwin" ]; then \
@@ -31,87 +11,35 @@ setup: ## Install dependencies for tests and coverage
pip install gcovr; \ pip install gcovr; \
fi fi
dest ?= ${HOME}/bin
install: ## Copy binary files to bin folder
@echo "Destination folder: $(dest)"
make buildr
@echo "*******************************************"
@echo ">>> Copying files to $(dest)"
@echo "*******************************************"
@for item in $(app_targets); do \
echo ">>> Copying $$item" ; \
cp $(f_release)/src/Platform/$$item $(dest) ; \
done
dependency: ## Create a dependency graph diagram of the project (build/dependency.png) dependency: ## Create a dependency graph diagram of the project (build/dependency.png)
@echo ">>> Creating dependency graph diagram of the project..."; cd build && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png
$(MAKE) debug
cd $(f_debug) && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png
buildd: ## Build the debug targets build: ## Build the project
cmake --build $(f_debug) -t $(app_targets) BayesNetSample $(n_procs) @echo ">>> Building BayesNet ...";
@if [ -d ./build ]; then rm -rf ./build; fi
buildr: ## Build the release targets @mkdir build;
cmake --build $(f_release) -t $(app_targets) BayesNetSample $(n_procs) cmake -S . -B build; \
cd build; \
clean: ## Clean the tests info make; \
@echo ">>> Cleaning Debug BayesNet tests...";
$(call ClearTests)
@echo ">>> Done"; @echo ">>> Done";
clang-uml: ## Create uml class and sequence diagrams test: ## Run tests
clang-uml -p --add-compile-flag -I /usr/lib/gcc/x86_64-redhat-linux/8/include/ @echo "* Running tests...";
find . -name "*.gcda" -print0 | xargs -0 rm
debug: ## Build a debug version of the project @cd build; \
@echo ">>> Building Debug BayesNet..."; cmake --build . --target unit_tests ;
@if [ -d ./$(f_debug) ]; then rm -rf ./$(f_debug); fi @cd build/tests; \
@mkdir $(f_debug); ./unit_tests;
@cmake -S . -B $(f_debug) -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON
@echo ">>> Done";
release: ## Build a Release version of the project
@echo ">>> Building Release BayesNet...";
@if [ -d ./$(f_release) ]; then rm -rf ./$(f_release); fi
@mkdir $(f_release);
@cmake -S . -B $(f_release) -D CMAKE_BUILD_TYPE=Release
@echo ">>> Done";
opt = ""
test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
@echo ">>> Running BayesNet & Platform tests...";
@$(MAKE) clean
@cmake --build $(f_debug) -t $(test_targets) $(n_procs)
@for t in $(test_targets); do \
if [ -f $(f_debug)/tests/$$t ]; then \
cd $(f_debug)/tests ; \
./$$t $(opt) ; \
fi ; \
done
@echo ">>> Done";
opt = ""
testp: ## Run platform tests (opt="-s") to verbose output the tests, (opt="-c='Stratified Fold Test'") to run only that section
@echo ">>> Running Platform tests...";
@$(MAKE) clean
@cmake --build $(f_debug) --target unit_tests_platform $(n_procs)
@if [ -f $(f_debug)/tests/unit_tests_platform ]; then cd $(f_debug)/tests ; ./unit_tests_platform $(opt) ; fi ;
@echo ">>> Done";
opt = ""
testb: ## Run BayesNet tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
@echo ">>> Running BayesNet tests...";
@$(MAKE) clean
@cmake --build $(f_debug) --target unit_tests_bayesnet $(n_procs)
@if [ -f $(f_debug)/tests/unit_tests_bayesnet ]; then cd $(f_debug)/tests ; ./unit_tests_bayesnet $(opt) ; fi ;
@echo ">>> Done";
coverage: ## Run tests and generate coverage report (build/index.html) coverage: ## Run tests and generate coverage report (build/index.html)
@echo ">>> Building tests with coverage..."; @echo "*Building tests...";
@$(MAKE) test find . -name "*.gcda" -print0 | xargs -0 rm
@cd $(f_debug) ; \ @cd build; \
gcovr --config ../gcovr.cfg tests ; cmake --build . --target unit_tests ;
@echo ">>> Done"; @cd build/tests; \
./unit_tests;
gcovr ;
help: ## Show help message help: ## Show help message
@IFS=$$'\n' ; \ @IFS=$$'\n' ; \

View File

@@ -1,79 +1,5 @@
# BayesNet # BayesNet
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
Bayesian Network Classifier with libtorch from scratch Bayesian Network Classifier with libtorch from scratch
## 0. Setup
Before compiling BayesNet.
### MPI
In Linux just install openmpi & openmpi-devel packages. Only cmake can't find openmpi install (like in Oracle Linux) set the following variable:
```bash
export MPI_HOME="/usr/lib64/openmpi"
```
In Mac OS X, install mpich with brew and if cmake doesn't find it, edit mpicxx wrapper to remove the ",-commons,use_dylibs" from final_ldflags
```bash
vi /opt/homebrew/bin/mpicx
```
### boost library
[Getting Started](<https://www.boost.org/doc/libs/1_83_0/more/getting_started/index.html>)
The best option is install the packages that the Linux distribution have in its repository. If this is the case:
```bash
sudo dnf install boost-devel
```
If this is not possible and the compressed packaged is installed, the following environment variable has to be set pointing to the folder where it was unzipped to:
```bash
export BOOST_ROOT=/path/to/library/
```
In some cases, it is needed to build the library, to do so:
```bash
cd /path/to/library
mkdir own
./bootstrap.sh --prefix=/path/to/library/own
./b2 install
export BOOST_ROOT=/path/to/library/own/
```
Don't forget to add the export BOOST_ROOT statement to .bashrc or wherever it is meant to be.
### libxlswriter
```bash
cd lib/libxlsxwriter
make
make install DESTDIR=/home/rmontanana/Code PREFIX=
```
Environment variable has to be set:
```bash
export LD_LIBRARY_PATH=/usr/local/lib
```
### Release
```bash
make release
```
### Debug & Tests
```bash
make debug
```
## 1. Introduction ## 1. Introduction

View File

@@ -0,0 +1 @@
null

Binary file not shown.

View File

@@ -1,4 +1,5 @@
filter = src/ filter = src/
exclude-directories = build/lib/ exclude = external/
exclude = tests/
print-summary = yes print-summary = yes
sort-percentage = yes sort-percentage = yes

View File

@@ -1,162 +0,0 @@
{
"balance-scale": {
"C": 10000.0,
"gamma": 0.1,
"kernel": "rbf",
"max_iter": 10000
},
"balloons": {
"C": 7,
"gamma": 0.1,
"kernel": "rbf",
"max_iter": 10000
},
"breast-cancer-wisc-diag": {
"C": 0.2,
"max_iter": 10000
},
"breast-cancer-wisc-prog": {
"C": 0.2,
"max_iter": 10000
},
"breast-cancer-wisc": {},
"breast-cancer": {},
"cardiotocography-10clases": {},
"cardiotocography-3clases": {},
"conn-bench-sonar-mines-rocks": {},
"cylinder-bands": {},
"dermatology": {
"C": 55,
"max_iter": 10000
},
"echocardiogram": {
"C": 7,
"gamma": 0.1,
"kernel": "poly",
"max_features": "auto",
"max_iter": 10000
},
"fertility": {
"C": 0.05,
"max_features": "auto",
"max_iter": 10000
},
"haberman-survival": {},
"heart-hungarian": {
"C": 0.05,
"max_iter": 10000
},
"hepatitis": {
"C": 7,
"gamma": 0.1,
"kernel": "rbf",
"max_iter": 10000
},
"ilpd-indian-liver": {},
"ionosphere": {
"C": 7,
"gamma": 0.1,
"kernel": "rbf",
"max_iter": 10000
},
"iris": {},
"led-display": {},
"libras": {
"C": 0.08,
"max_iter": 10000
},
"low-res-spect": {
"C": 0.05,
"max_iter": 10000
},
"lymphography": {
"C": 0.05,
"max_iter": 10000
},
"mammographic": {},
"molec-biol-promoter": {
"C": 0.05,
"gamma": 0.1,
"kernel": "poly",
"max_iter": 10000
},
"musk-1": {
"C": 0.05,
"gamma": 0.1,
"kernel": "poly",
"max_iter": 10000
},
"oocytes_merluccius_nucleus_4d": {
"C": 8.25,
"gamma": 0.1,
"kernel": "poly"
},
"oocytes_merluccius_states_2f": {},
"oocytes_trisopterus_nucleus_2f": {},
"oocytes_trisopterus_states_5b": {
"C": 0.11,
"max_iter": 10000
},
"parkinsons": {},
"pima": {},
"pittsburg-bridges-MATERIAL": {
"C": 7,
"gamma": 0.1,
"kernel": "rbf",
"max_iter": 10000
},
"pittsburg-bridges-REL-L": {},
"pittsburg-bridges-SPAN": {
"C": 0.05,
"max_iter": 10000
},
"pittsburg-bridges-T-OR-D": {},
"planning": {
"C": 7,
"gamma": 10.0,
"kernel": "rbf",
"max_iter": 10000
},
"post-operative": {
"C": 55,
"degree": 5,
"gamma": 0.1,
"kernel": "poly",
"max_iter": 10000
},
"seeds": {
"C": 10000.0,
"max_iter": 10000
},
"statlog-australian-credit": {
"C": 0.05,
"max_features": "auto",
"max_iter": 10000
},
"statlog-german-credit": {},
"statlog-heart": {},
"statlog-image": {
"C": 7,
"max_iter": 10000
},
"statlog-vehicle": {},
"synthetic-control": {
"C": 0.55,
"max_iter": 10000
},
"tic-tac-toe": {
"C": 0.2,
"gamma": 0.1,
"kernel": "poly",
"max_iter": 10000
},
"vertebral-column-2clases": {},
"wine": {
"C": 0.55,
"max_iter": 10000
},
"zoo": {
"C": 0.1,
"max_iter": 10000
}
}

View File

@@ -4,9 +4,11 @@
#include <map> #include <map>
#include <iostream> #include <iostream>
using namespace std;
ArffFiles::ArffFiles() = default; ArffFiles::ArffFiles() = default;
std::vector<std::string> ArffFiles::getLines() const vector<string> ArffFiles::getLines() const
{ {
return lines; return lines;
} }
@@ -16,48 +18,48 @@ unsigned long int ArffFiles::getSize() const
return lines.size(); return lines.size();
} }
std::vector<std::pair<std::string, std::string>> ArffFiles::getAttributes() const vector<pair<string, string>> ArffFiles::getAttributes() const
{ {
return attributes; return attributes;
} }
std::string ArffFiles::getClassName() const string ArffFiles::getClassName() const
{ {
return className; return className;
} }
std::string ArffFiles::getClassType() const string ArffFiles::getClassType() const
{ {
return classType; return classType;
} }
std::vector<std::vector<float>>& ArffFiles::getX() vector<vector<float>>& ArffFiles::getX()
{ {
return X; return X;
} }
std::vector<int>& ArffFiles::getY() vector<int>& ArffFiles::getY()
{ {
return y; return y;
} }
void ArffFiles::loadCommon(std::string fileName) void ArffFiles::loadCommon(string fileName)
{ {
std::ifstream file(fileName); ifstream file(fileName);
if (!file.is_open()) { if (!file.is_open()) {
throw std::invalid_argument("Unable to open file"); throw invalid_argument("Unable to open file");
} }
std::string line; string line;
std::string keyword; string keyword;
std::string attribute; string attribute;
std::string type; string type;
std::string type_w; string type_w;
while (getline(file, line)) { while (getline(file, line)) {
if (line.empty() || line[0] == '%' || line == "\r" || line == " ") { if (line.empty() || line[0] == '%' || line == "\r" || line == " ") {
continue; continue;
} }
if (line.find("@attribute") != std::string::npos || line.find("@ATTRIBUTE") != std::string::npos) { if (line.find("@attribute") != string::npos || line.find("@ATTRIBUTE") != string::npos) {
std::stringstream ss(line); stringstream ss(line);
ss >> keyword >> attribute; ss >> keyword >> attribute;
type = ""; type = "";
while (ss >> type_w) while (ss >> type_w)
@@ -72,35 +74,35 @@ void ArffFiles::loadCommon(std::string fileName)
} }
file.close(); file.close();
if (attributes.empty()) if (attributes.empty())
throw std::invalid_argument("No attributes found"); throw invalid_argument("No attributes found");
} }
void ArffFiles::load(const std::string& fileName, bool classLast) void ArffFiles::load(const string& fileName, bool classLast)
{ {
int labelIndex; int labelIndex;
loadCommon(fileName); loadCommon(fileName);
if (classLast) { if (classLast) {
className = std::get<0>(attributes.back()); className = get<0>(attributes.back());
classType = std::get<1>(attributes.back()); classType = get<1>(attributes.back());
attributes.pop_back(); attributes.pop_back();
labelIndex = static_cast<int>(attributes.size()); labelIndex = static_cast<int>(attributes.size());
} else { } else {
className = std::get<0>(attributes.front()); className = get<0>(attributes.front());
classType = std::get<1>(attributes.front()); classType = get<1>(attributes.front());
attributes.erase(attributes.begin()); attributes.erase(attributes.begin());
labelIndex = 0; labelIndex = 0;
} }
generateDataset(labelIndex); generateDataset(labelIndex);
} }
void ArffFiles::load(const std::string& fileName, const std::string& name) void ArffFiles::load(const string& fileName, const string& name)
{ {
int labelIndex; int labelIndex;
loadCommon(fileName); loadCommon(fileName);
bool found = false; bool found = false;
for (int i = 0; i < attributes.size(); ++i) { for (int i = 0; i < attributes.size(); ++i) {
if (attributes[i].first == name) { if (attributes[i].first == name) {
className = std::get<0>(attributes[i]); className = get<0>(attributes[i]);
classType = std::get<1>(attributes[i]); classType = get<1>(attributes[i]);
attributes.erase(attributes.begin() + i); attributes.erase(attributes.begin() + i);
labelIndex = i; labelIndex = i;
found = true; found = true;
@@ -108,19 +110,19 @@ void ArffFiles::load(const std::string& fileName, const std::string& name)
} }
} }
if (!found) { if (!found) {
throw std::invalid_argument("Class name not found"); throw invalid_argument("Class name not found");
} }
generateDataset(labelIndex); generateDataset(labelIndex);
} }
void ArffFiles::generateDataset(int labelIndex) void ArffFiles::generateDataset(int labelIndex)
{ {
X = std::vector<std::vector<float>>(attributes.size(), std::vector<float>(lines.size())); X = vector<vector<float>>(attributes.size(), vector<float>(lines.size()));
auto yy = std::vector<std::string>(lines.size(), ""); auto yy = vector<string>(lines.size(), "");
auto removeLines = std::vector<int>(); // Lines with missing values auto removeLines = vector<int>(); // Lines with missing values
for (size_t i = 0; i < lines.size(); i++) { for (size_t i = 0; i < lines.size(); i++) {
std::stringstream ss(lines[i]); stringstream ss(lines[i]);
std::string value; string value;
int pos = 0; int pos = 0;
int xIndex = 0; int xIndex = 0;
while (getline(ss, value, ',')) { while (getline(ss, value, ',')) {
@@ -144,21 +146,21 @@ void ArffFiles::generateDataset(int labelIndex)
y = factorize(yy); y = factorize(yy);
} }
std::string ArffFiles::trim(const std::string& source) string ArffFiles::trim(const string& source)
{ {
std::string s(source); string s(source);
s.erase(0, s.find_first_not_of(" '\n\r\t")); s.erase(0, s.find_first_not_of(" '\n\r\t"));
s.erase(s.find_last_not_of(" '\n\r\t") + 1); s.erase(s.find_last_not_of(" '\n\r\t") + 1);
return s; return s;
} }
std::vector<int> ArffFiles::factorize(const std::vector<std::string>& labels_t) vector<int> ArffFiles::factorize(const vector<string>& labels_t)
{ {
std::vector<int> yy; vector<int> yy;
yy.reserve(labels_t.size()); yy.reserve(labels_t.size());
std::map<std::string, int> labelMap; map<string, int> labelMap;
int i = 0; int i = 0;
for (const std::string& label : labels_t) { for (const string& label : labels_t) {
if (labelMap.find(label) == labelMap.end()) { if (labelMap.find(label) == labelMap.end()) {
labelMap[label] = i++; labelMap[label] = i++;
} }

View File

@@ -4,29 +4,31 @@
#include <string> #include <string>
#include <vector> #include <vector>
using namespace std;
class ArffFiles { class ArffFiles {
private: private:
std::vector<std::string> lines; vector<string> lines;
std::vector<std::pair<std::string, std::string>> attributes; vector<pair<string, string>> attributes;
std::string className; string className;
std::string classType; string classType;
std::vector<std::vector<float>> X; vector<vector<float>> X;
std::vector<int> y; vector<int> y;
void generateDataset(int); void generateDataset(int);
void loadCommon(std::string); void loadCommon(string);
public: public:
ArffFiles(); ArffFiles();
void load(const std::string&, bool = true); void load(const string&, bool = true);
void load(const std::string&, const std::string&); void load(const string&, const string&);
std::vector<std::string> getLines() const; vector<string> getLines() const;
unsigned long int getSize() const; unsigned long int getSize() const;
std::string getClassName() const; string getClassName() const;
std::string getClassType() const; string getClassType() const;
static std::string trim(const std::string&); static string trim(const string&);
std::vector<std::vector<float>>& getX(); vector<vector<float>>& getX();
std::vector<int>& getY(); vector<int>& getY();
std::vector<std::pair<std::string, std::string>> getAttributes() const; vector<pair<string, string>> getAttributes() const;
static std::vector<int> factorize(const std::vector<std::string>& labels_t); static vector<int> factorize(const vector<string>& labels_t);
}; };
#endif #endif

View File

@@ -1 +1,2 @@
add_library(ArffFiles ArffFiles.cc) add_library(ArffFiles ArffFiles.cc)
#target_link_libraries(BayesNet "${TORCH_LIBRARIES}")

View File

@@ -1,10 +1,7 @@
include_directories(${BayesNet_SOURCE_DIR}/src/Platform) include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet) include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
include_directories(${BayesNet_SOURCE_DIR}/src/PyClassifiers)
include_directories(${Python3_INCLUDE_DIRS})
include_directories(${BayesNet_SOURCE_DIR}/lib/Files) include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp) include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include) include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
add_executable(BayesNetSample sample.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc) add_executable(BayesNetSample sample.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
target_link_libraries(BayesNetSample BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}" PyWrap) target_link_libraries(BayesNetSample BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")

View File

@@ -1,23 +1,24 @@
#include <iostream> #include <iostream>
#include <torch/torch.h> #include <torch/torch.h>
#include <string> #include <string>
#include <thread>
#include <map> #include <map>
#include <argparse/argparse.hpp> #include <argparse/argparse.hpp>
#include <nlohmann/json.hpp>
#include "ArffFiles.h" #include "ArffFiles.h"
#include "BayesMetrics.h" #include "BayesMetrics.h"
#include "CPPFImdlp.h" #include "CPPFImdlp.h"
#include "Folding.h" #include "Folding.h"
#include "Models.h" #include "Models.h"
#include "modelRegister.h"
#include <fstream>
const std::string PATH = "../../data/";
pair<std::vector<mdlp::labels_t>, map<std::string, int>> discretize(std::vector<mdlp::samples_t>& X, mdlp::labels_t& y, std::vector<std::string> features) using namespace std;
const string PATH = "../../data/";
pair<vector<mdlp::labels_t>, map<string, int>> discretize(vector<mdlp::samples_t>& X, mdlp::labels_t& y, vector<string> features)
{ {
std::vector<mdlp::labels_t>Xd; vector<mdlp::labels_t>Xd;
map<std::string, int> maxes; map<string, int> maxes;
auto fimdlp = mdlp::CPPFImdlp(); auto fimdlp = mdlp::CPPFImdlp();
for (int i = 0; i < X.size(); i++) { for (int i = 0; i < X.size(); i++) {
@@ -38,12 +39,12 @@ bool file_exists(const std::string& name)
return false; return false;
} }
} }
pair<std::vector<std::vector<int>>, std::vector<int>> extract_indices(std::vector<int> indices, std::vector<std::vector<int>> X, std::vector<int> y) pair<vector<vector<int>>, vector<int>> extract_indices(vector<int> indices, vector<vector<int>> X, vector<int> y)
{ {
std::vector<std::vector<int>> Xr; // nxm vector<vector<int>> Xr;
std::vector<int> yr; vector<int> yr;
for (int col = 0; col < X.size(); ++col) { for (int col = 0; col < X.size(); ++col) {
Xr.push_back(std::vector<int>()); Xr.push_back(vector<int>());
} }
for (auto index : indices) { for (auto index : indices) {
for (int col = 0; col < X.size(); ++col) { for (int col = 0; col < X.size(); ++col) {
@@ -56,7 +57,7 @@ pair<std::vector<std::vector<int>>, std::vector<int>> extract_indices(std::vecto
int main(int argc, char** argv) int main(int argc, char** argv)
{ {
map<std::string, bool> datasets = { map<string, bool> datasets = {
{"diabetes", true}, {"diabetes", true},
{"ecoli", true}, {"ecoli", true},
{"glass", true}, {"glass", true},
@@ -66,9 +67,10 @@ int main(int argc, char** argv)
{"liver-disorders", true}, {"liver-disorders", true},
{"mfeat-factors", true}, {"mfeat-factors", true},
}; };
auto valid_datasets = std::vector<std::string>(); auto valid_datasets = vector<string>();
transform(datasets.begin(), datasets.end(), back_inserter(valid_datasets), for (auto dataset : datasets) {
[](const pair<std::string, bool>& pair) { return pair.first; }); valid_datasets.push_back(dataset.first);
}
argparse::ArgumentParser program("BayesNetSample"); argparse::ArgumentParser program("BayesNetSample");
program.add_argument("-d", "--dataset") program.add_argument("-d", "--dataset")
.help("Dataset file name") .help("Dataset file name")
@@ -81,23 +83,22 @@ int main(int argc, char** argv)
); );
program.add_argument("-p", "--path") program.add_argument("-p", "--path")
.help(" folder where the data files are located, default") .help(" folder where the data files are located, default")
.default_value(std::string{ PATH } .default_value(string{ PATH }
); );
program.add_argument("-m", "--model") program.add_argument("-m", "--model")
.help("Model to use " + platform::Models::instance()->tostring()) .help("Model to use " + platform::Models::toString())
.action([](const std::string& value) { .action([](const std::string& value) {
static const std::vector<std::string> choices = platform::Models::instance()->getNames(); static const vector<string> choices = platform::Models::getNames();
if (find(choices.begin(), choices.end(), value) != choices.end()) { if (find(choices.begin(), choices.end(), value) != choices.end()) {
return value; return value;
} }
throw runtime_error("Model must be one of " + platform::Models::instance()->tostring()); throw runtime_error("Model must be one of " + platform::Models::toString());
} }
); );
program.add_argument("--discretize").help("Discretize input dataset").default_value(false).implicit_value(true); program.add_argument("--discretize").help("Discretize input dataset").default_value(false).implicit_value(true);
program.add_argument("--dumpcpt").help("Dump CPT Tables").default_value(false).implicit_value(true);
program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value(false).implicit_value(true); program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value(false).implicit_value(true);
program.add_argument("--tensors").help("Use tensors to store samples").default_value(false).implicit_value(true); program.add_argument("--tensors").help("Use tensors to store samples").default_value(false).implicit_value(true);
program.add_argument("-f", "--folds").help("Number of folds").default_value(5).scan<'i', int>().action([](const std::string& value) { program.add_argument("-f", "--folds").help("Number of folds").default_value(5).scan<'i', int>().action([](const string& value) {
try { try {
auto k = stoi(value); auto k = stoi(value);
if (k < 2) { if (k < 2) {
@@ -112,27 +113,26 @@ int main(int argc, char** argv)
throw runtime_error("Number of folds must be an integer"); throw runtime_error("Number of folds must be an integer");
}}); }});
program.add_argument("-s", "--seed").help("Random seed").default_value(-1).scan<'i', int>(); program.add_argument("-s", "--seed").help("Random seed").default_value(-1).scan<'i', int>();
bool class_last, stratified, tensors, dump_cpt; bool class_last, stratified, tensors;
std::string model_name, file_name, path, complete_file_name; string model_name, file_name, path, complete_file_name;
int nFolds, seed; int nFolds, seed;
try { try {
program.parse_args(argc, argv); program.parse_args(argc, argv);
file_name = program.get<std::string>("dataset"); file_name = program.get<string>("dataset");
path = program.get<std::string>("path"); path = program.get<string>("path");
model_name = program.get<std::string>("model"); model_name = program.get<string>("model");
complete_file_name = path + file_name + ".arff"; complete_file_name = path + file_name + ".arff";
stratified = program.get<bool>("stratified"); stratified = program.get<bool>("stratified");
tensors = program.get<bool>("tensors"); tensors = program.get<bool>("tensors");
nFolds = program.get<int>("folds"); nFolds = program.get<int>("folds");
seed = program.get<int>("seed"); seed = program.get<int>("seed");
dump_cpt = program.get<bool>("dumpcpt");
class_last = datasets[file_name]; class_last = datasets[file_name];
if (!file_exists(complete_file_name)) { if (!file_exists(complete_file_name)) {
throw runtime_error("Data File " + path + file_name + ".arff" + " does not exist"); throw runtime_error("Data File " + path + file_name + ".arff" + " does not exist");
} }
} }
catch (const exception& err) { catch (const exception& err) {
cerr << err.what() << std::endl; cerr << err.what() << endl;
cerr << program; cerr << program;
exit(1); exit(1);
} }
@@ -143,64 +143,55 @@ int main(int argc, char** argv)
auto handler = ArffFiles(); auto handler = ArffFiles();
handler.load(complete_file_name, class_last); handler.load(complete_file_name, class_last);
// Get Dataset X, y // Get Dataset X, y
std::vector<mdlp::samples_t>& X = handler.getX(); vector<mdlp::samples_t>& X = handler.getX();
mdlp::labels_t& y = handler.getY(); mdlp::labels_t& y = handler.getY();
// Get className & Features // Get className & Features
auto className = handler.getClassName(); auto className = handler.getClassName();
std::vector<std::string> features; vector<string> features;
auto attributes = handler.getAttributes(); for (auto feature : handler.getAttributes()) {
transform(attributes.begin(), attributes.end(), back_inserter(features), features.push_back(feature.first);
[](const pair<std::string, std::string>& item) { return item.first; }); }
// Discretize Dataset // Discretize Dataset
auto [Xd, maxes] = discretize(X, y, features); auto [Xd, maxes] = discretize(X, y, features);
maxes[className] = *max_element(y.begin(), y.end()) + 1; maxes[className] = *max_element(y.begin(), y.end()) + 1;
map<std::string, std::vector<int>> states; map<string, vector<int>> states;
for (auto feature : features) { for (auto feature : features) {
states[feature] = std::vector<int>(maxes[feature]); states[feature] = vector<int>(maxes[feature]);
} }
states[className] = std::vector<int>(maxes[className]); states[className] = vector<int>(maxes[className]);
auto clf = platform::Models::instance()->create(model_name);
bayesnet::BaseClassifier* clf = platform::Models::get(model_name);
clf->fit(Xd, y, features, className, states); clf->fit(Xd, y, features, className, states);
if (dump_cpt) {
std::cout << "--- CPT Tables ---" << std::endl;
clf->dump_cpt();
}
auto lines = clf->show();
for (auto line : lines) {
std::cout << line << std::endl;
}
std::cout << "--- Topological Order ---" << std::endl;
auto order = clf->topological_order();
for (auto name : order) {
std::cout << name << ", ";
}
std::cout << "end." << std::endl;
auto score = clf->score(Xd, y); auto score = clf->score(Xd, y);
std::cout << "Score: " << score << std::endl; auto lines = clf->show();
auto graph = clf->graph(); auto graph = clf->graph();
for (auto line : lines) {
cout << line << endl;
}
cout << "Score: " << score << endl;
auto dot_file = model_name + "_" + file_name; auto dot_file = model_name + "_" + file_name;
ofstream file(dot_file + ".dot"); ofstream file(dot_file + ".dot");
file << graph; file << graph;
file.close(); file.close();
std::cout << "Graph saved in " << model_name << "_" << file_name << ".dot" << std::endl; cout << "Graph saved in " << model_name << "_" << file_name << ".dot" << endl;
std::cout << "dot -Tpng -o " + dot_file + ".png " + dot_file + ".dot " << std::endl; cout << "dot -Tpng -o " + dot_file + ".png " + dot_file + ".dot " << endl;
std::string stratified_string = stratified ? " Stratified" : ""; string stratified_string = stratified ? " Stratified" : "";
std::cout << nFolds << " Folds" << stratified_string << " Cross validation" << std::endl; cout << nFolds << " Folds" << stratified_string << " Cross validation" << endl;
std::cout << "==========================================" << std::endl; cout << "==========================================" << endl;
torch::Tensor Xt = torch::zeros({ static_cast<int>(Xd.size()), static_cast<int>(Xd[0].size()) }, torch::kInt32); torch::Tensor Xt = torch::zeros({ static_cast<int>(Xd.size()), static_cast<int>(Xd[0].size()) }, torch::kInt32);
torch::Tensor yt = torch::tensor(y, torch::kInt32); torch::Tensor yt = torch::tensor(y, torch::kInt32);
for (int i = 0; i < features.size(); ++i) { for (int i = 0; i < features.size(); ++i) {
Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32)); Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32));
} }
float total_score = 0, total_score_train = 0, score_train, score_test; float total_score = 0, total_score_train = 0, score_train, score_test;
platform::Fold* fold; Fold* fold;
if (stratified) if (stratified)
fold = new platform::StratifiedKFold(nFolds, y, seed); fold = new StratifiedKFold(nFolds, y, seed);
else else
fold = new platform::KFold(nFolds, y.size(), seed); fold = new KFold(nFolds, y.size(), seed);
for (auto i = 0; i < nFolds; ++i) { for (auto i = 0; i < nFolds; ++i) {
auto [train, test] = fold->getFold(i); auto [train, test] = fold->getFold(i);
std::cout << "Fold: " << i + 1 << std::endl; cout << "Fold: " << i + 1 << endl;
if (tensors) { if (tensors) {
auto ttrain = torch::tensor(train, torch::kInt64); auto ttrain = torch::tensor(train, torch::kInt64);
auto ttest = torch::tensor(test, torch::kInt64); auto ttest = torch::tensor(test, torch::kInt64);
@@ -209,7 +200,6 @@ int main(int argc, char** argv)
torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest); torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest);
torch::Tensor ytestt = yt.index({ ttest }); torch::Tensor ytestt = yt.index({ ttest });
clf->fit(Xtraint, ytraint, features, className, states); clf->fit(Xtraint, ytraint, features, className, states);
auto temp = clf->predict(Xtraint);
score_train = clf->score(Xtraint, ytraint); score_train = clf->score(Xtraint, ytraint);
score_test = clf->score(Xtestt, ytestt); score_test = clf->score(Xtestt, ytestt);
} else { } else {
@@ -219,17 +209,14 @@ int main(int argc, char** argv)
score_train = clf->score(Xtrain, ytrain); score_train = clf->score(Xtrain, ytrain);
score_test = clf->score(Xtest, ytest); score_test = clf->score(Xtest, ytest);
} }
if (dump_cpt) {
std::cout << "--- CPT Tables ---" << std::endl;
clf->dump_cpt();
}
total_score_train += score_train; total_score_train += score_train;
total_score += score_test; total_score += score_test;
std::cout << "Score Train: " << score_train << std::endl; cout << "Score Train: " << score_train << endl;
std::cout << "Score Test : " << score_test << std::endl; cout << "Score Test : " << score_test << endl;
std::cout << "-------------------------------------------------------------------------------" << std::endl; cout << "-------------------------------------------------------------------------------" << endl;
} }
std::cout << "**********************************************************************************" << std::endl; cout << "**********************************************************************************" << endl;
std::cout << "Average Score Train: " << total_score_train / nFolds << std::endl; cout << "Average Score Train: " << total_score_train / nFolds << endl;
std::cout << "Average Score Test : " << total_score / nFolds << std::endl;return 0; cout << "Average Score Test : " << total_score / nFolds << endl;
return 0;
} }

View File

@@ -2,16 +2,14 @@
namespace bayesnet { namespace bayesnet {
AODE::AODE() : Ensemble() {} AODE::AODE() : Ensemble() {}
void AODE::buildModel(const torch::Tensor& weights) void AODE::train()
{ {
models.clear(); models.clear();
for (int i = 0; i < features.size(); ++i) { for (int i = 0; i < features.size(); ++i) {
models.push_back(std::make_unique<SPODE>(i)); models.push_back(std::make_unique<SPODE>(i));
} }
n_models = models.size();
significanceModels = std::vector<double>(n_models, 1.0);
} }
std::vector<std::string> AODE::graph(const std::string& title) const vector<string> AODE::graph(string title)
{ {
return Ensemble::graph(title); return Ensemble::graph(title);
} }

View File

@@ -5,11 +5,10 @@
namespace bayesnet { namespace bayesnet {
class AODE : public Ensemble { class AODE : public Ensemble {
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
AODE(); AODE();
virtual ~AODE() {}; vector<string> graph(string title = "AODE") override;
std::vector<std::string> graph(const std::string& title = "AODE") const override;
}; };
} }
#endif #endif

View File

@@ -1,39 +0,0 @@
#include "AODELd.h"
namespace bayesnet {
AODELd::AODELd() : Ensemble(), Proposal(dataset, features, className) {}
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
Ensemble::fit(dataset, features, className, states);
return *this;
}
void AODELd::buildModel(const torch::Tensor& weights)
{
models.clear();
for (int i = 0; i < features.size(); ++i) {
models.push_back(std::make_unique<SPODELd>(i));
}
n_models = models.size();
significanceModels = std::vector<double>(n_models, 1.0);
}
void AODELd::trainModel(const torch::Tensor& weights)
{
for (const auto& model : models) {
model->fit(Xf, y, features, className, states);
}
}
std::vector<std::string> AODELd::graph(const std::string& name) const
{
return Ensemble::graph(name);
}
}

View File

@@ -1,20 +0,0 @@
#ifndef AODELD_H
#define AODELD_H
#include "Ensemble.h"
#include "Proposal.h"
#include "SPODELd.h"
namespace bayesnet {
class AODELd : public Ensemble, public Proposal {
protected:
void trainModel(const torch::Tensor& weights) override;
void buildModel(const torch::Tensor& weights) override;
public:
AODELd();
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_) override;
virtual ~AODELd() = default;
std::vector<std::string> graph(const std::string& name = "AODELd") const override;
static inline std::string version() { return "0.0.1"; };
};
}
#endif // !AODELD_H

View File

@@ -1,37 +1,23 @@
#ifndef BASE_H #ifndef BASE_H
#define BASE_H #define BASE_H
#include <torch/torch.h> #include <torch/torch.h>
#include <nlohmann/json.hpp>
#include <vector> #include <vector>
namespace bayesnet { namespace bayesnet {
enum status_t { NORMAL, WARNING, ERROR }; using namespace std;
class BaseClassifier { class BaseClassifier {
public: public:
// X is nxm std::vector, y is nx1 std::vector virtual BaseClassifier& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) = 0;
virtual BaseClassifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0; virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) = 0;
// X is nxm tensor, y is nx1 tensor vector<int> virtual predict(vector<vector<int>>& X) = 0;
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0; float virtual score(vector<vector<int>>& X, vector<int>& y) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) = 0;
virtual ~BaseClassifier() = default;
torch::Tensor virtual predict(torch::Tensor& X) = 0;
std::vector<int> virtual predict(std::vector<std::vector<int >>& X) = 0;
status_t virtual getStatus() const = 0;
float virtual score(std::vector<std::vector<int>>& X, std::vector<int>& y) = 0;
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0; float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
int virtual getNumberOfNodes()const = 0; int virtual getNumberOfNodes() = 0;
int virtual getNumberOfEdges()const = 0; int virtual getNumberOfEdges() = 0;
int virtual getNumberOfStates() const = 0; int virtual getNumberOfStates() = 0;
std::vector<std::string> virtual show() const = 0; vector<string> virtual show() = 0;
std::vector<std::string> virtual graph(const std::string& title = "") const = 0; vector<string> virtual graph(string title = "") = 0;
virtual std::string getVersion() = 0; virtual ~BaseClassifier() = default;
std::vector<std::string> virtual topological_order() = 0; const string inline getVersion() const { return "0.1.0"; };
void virtual dump_cpt()const = 0;
virtual void setHyperparameters(const nlohmann::json& hyperparameters) = 0;
std::vector<std::string>& getValidHyperparameters() { return validHyperparameters; }
protected:
virtual void trainModel(const torch::Tensor& weights) = 0;
std::vector<std::string> validHyperparameters;
}; };
} }
#endif #endif

View File

@@ -1,89 +1,57 @@
#include "BayesMetrics.h" #include "BayesMetrics.h"
#include "Mst.h" #include "Mst.h"
namespace bayesnet { namespace bayesnet {
//samples is n+1xm tensor used to fit the model Metrics::Metrics(torch::Tensor& samples, vector<string>& features, string& className, int classNumStates)
Metrics::Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
: samples(samples) : samples(samples)
, features(features) , features(features)
, className(className) , className(className)
, classNumStates(classNumStates) , classNumStates(classNumStates)
{ {
} }
//samples is nxm std::vector used to fit the model Metrics::Metrics(const vector<vector<int>>& vsamples, const vector<int>& labels, const vector<string>& features, const string& className, const int classNumStates)
Metrics::Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates)
: features(features) : features(features)
, className(className) , className(className)
, classNumStates(classNumStates) , classNumStates(classNumStates)
, samples(torch::zeros({ static_cast<int>(vsamples[0].size()), static_cast<int>(vsamples.size() + 1) }, torch::kInt32)) , samples(torch::zeros({ static_cast<int>(vsamples[0].size()), static_cast<int>(vsamples.size() + 1) }, torch::kInt32))
{ {
for (int i = 0; i < vsamples.size(); ++i) { for (int i = 0; i < vsamples.size(); ++i) {
samples.index_put_({ i, "..." }, torch::tensor(vsamples[i], torch::kInt32)); samples.index_put_({ "...", i }, torch::tensor(vsamples[i], torch::kInt32));
} }
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32)); samples.index_put_({ "...", -1 }, torch::tensor(labels, torch::kInt32));
} }
std::vector<int> Metrics::SelectKBestWeighted(const torch::Tensor& weights, bool ascending, unsigned k) vector<pair<string, string>> Metrics::doCombinations(const vector<string>& source)
{ {
// Return the K Best features vector<pair<string, string>> result;
auto n = samples.size(0) - 1; for (int i = 0; i < source.size(); ++i) {
if (k == 0) { string temp = source[i];
k = n; for (int j = i + 1; j < source.size(); ++j) {
} result.push_back({ temp, source[j] });
// compute scores
scoresKBest.clear();
featuresKBest.clear();
auto label = samples.index({ -1, "..." });
for (int i = 0; i < n; ++i) {
scoresKBest.push_back(mutualInformation(label, samples.index({ i, "..." }), weights));
featuresKBest.push_back(i);
}
// sort & reduce scores and features
if (ascending) {
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
{ return scoresKBest[i] < scoresKBest[j]; });
sort(scoresKBest.begin(), scoresKBest.end(), std::less<double>());
if (k < n) {
for (int i = 0; i < n - k; ++i) {
featuresKBest.erase(featuresKBest.begin());
scoresKBest.erase(scoresKBest.begin());
}
} }
} else {
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
{ return scoresKBest[i] > scoresKBest[j]; });
sort(scoresKBest.begin(), scoresKBest.end(), std::greater<double>());
featuresKBest.resize(k);
scoresKBest.resize(k);
} }
return featuresKBest; return result;
} }
std::vector<double> Metrics::getScoresKBest() const torch::Tensor Metrics::conditionalEdge()
{ {
return scoresKBest; auto result = vector<double>();
} auto source = vector<string>(features);
torch::Tensor Metrics::conditionalEdge(const torch::Tensor& weights)
{
auto result = std::vector<double>();
auto source = std::vector<std::string>(features);
source.push_back(className); source.push_back(className);
auto combinations = doCombinations(source); auto combinations = doCombinations(source);
// Compute class prior // Compute class prior
auto margin = torch::zeros({ classNumStates }, torch::kFloat); auto margin = torch::zeros({ classNumStates });
for (int value = 0; value < classNumStates; ++value) { for (int value = 0; value < classNumStates; ++value) {
auto mask = samples.index({ -1, "..." }) == value; auto mask = samples.index({ "...", -1 }) == value;
margin[value] = mask.sum().item<double>() / samples.size(1); margin[value] = mask.sum().item<float>() / samples.sizes()[0];
} }
for (auto [first, second] : combinations) { for (auto [first, second] : combinations) {
int index_first = find(features.begin(), features.end(), first) - features.begin(); int index_first = find(features.begin(), features.end(), first) - features.begin();
int index_second = find(features.begin(), features.end(), second) - features.begin(); int index_second = find(features.begin(), features.end(), second) - features.begin();
double accumulated = 0; double accumulated = 0;
for (int value = 0; value < classNumStates; ++value) { for (int value = 0; value < classNumStates; ++value) {
auto mask = samples.index({ -1, "..." }) == value; auto mask = samples.index({ "...", -1 }) == value;
auto first_dataset = samples.index({ index_first, mask }); auto first_dataset = samples.index({ mask, index_first });
auto second_dataset = samples.index({ index_second, mask }); auto second_dataset = samples.index({ mask, index_second });
auto weights_dataset = weights.index({ mask }); auto mi = mutualInformation(first_dataset, second_dataset);
auto mi = mutualInformation(first_dataset, second_dataset, weights_dataset); auto pb = margin[value].item<float>();
auto pb = margin[value].item<double>();
accumulated += pb * mi; accumulated += pb * mi;
} }
result.push_back(accumulated); result.push_back(accumulated);
@@ -99,33 +67,32 @@ namespace bayesnet {
} }
return matrix; return matrix;
} }
// To use in Python // To Interface with Python
std::vector<float> Metrics::conditionalEdgeWeights(std::vector<float>& weights_) vector<float> Metrics::conditionalEdgeWeights()
{ {
const torch::Tensor weights = torch::tensor(weights_); auto matrix = conditionalEdge();
auto matrix = conditionalEdge(weights);
std::vector<float> v(matrix.data_ptr<float>(), matrix.data_ptr<float>() + matrix.numel()); std::vector<float> v(matrix.data_ptr<float>(), matrix.data_ptr<float>() + matrix.numel());
return v; return v;
} }
double Metrics::entropy(const torch::Tensor& feature, const torch::Tensor& weights) double Metrics::entropy(torch::Tensor& feature)
{ {
torch::Tensor counts = feature.bincount(weights); torch::Tensor counts = feature.bincount();
double totalWeight = counts.sum().item<double>(); int totalWeight = counts.sum().item<int>();
torch::Tensor probs = counts.to(torch::kFloat) / totalWeight; torch::Tensor probs = counts.to(torch::kFloat) / totalWeight;
torch::Tensor logProbs = torch::log(probs); torch::Tensor logProbs = torch::log(probs);
torch::Tensor entropy = -probs * logProbs; torch::Tensor entropy = -probs * logProbs;
return entropy.nansum().item<double>(); return entropy.nansum().item<double>();
} }
// H(Y|X) = sum_{x in X} p(x) H(Y|X=x) // H(Y|X) = sum_{x in X} p(x) H(Y|X=x)
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights) double Metrics::conditionalEntropy(torch::Tensor& firstFeature, torch::Tensor& secondFeature)
{ {
int numSamples = firstFeature.sizes()[0]; int numSamples = firstFeature.sizes()[0];
torch::Tensor featureCounts = secondFeature.bincount(weights); torch::Tensor featureCounts = secondFeature.bincount();
std::unordered_map<int, std::unordered_map<int, double>> jointCounts; unordered_map<int, unordered_map<int, double>> jointCounts;
double totalWeight = 0; double totalWeight = 0;
for (auto i = 0; i < numSamples; i++) { for (auto i = 0; i < numSamples; i++) {
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += weights[i].item<double>(); jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += 1;
totalWeight += weights[i].item<float>(); totalWeight += 1;
} }
if (totalWeight == 0) if (totalWeight == 0)
return 0; return 0;
@@ -146,16 +113,16 @@ namespace bayesnet {
return entropyValue; return entropyValue;
} }
// I(X;Y) = H(Y) - H(Y|X) // I(X;Y) = H(Y) - H(Y|X)
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights) double Metrics::mutualInformation(torch::Tensor& firstFeature, torch::Tensor& secondFeature)
{ {
return entropy(firstFeature, weights) - conditionalEntropy(firstFeature, secondFeature, weights); return entropy(firstFeature) - conditionalEntropy(firstFeature, secondFeature);
} }
/* /*
Compute the maximum spanning tree considering the weights as distances Compute the maximum spanning tree considering the weights as distances
and the indices of the weights as nodes of this square matrix using and the indices of the weights as nodes of this square matrix using
Kruskal algorithm Kruskal algorithm
*/ */
std::vector<std::pair<int, int>> Metrics::maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root) vector<pair<int, int>> Metrics::maximumSpanningTree(vector<string> features, Tensor& weights, int root)
{ {
auto mst = MST(features, weights, root); auto mst = MST(features, weights, root);
return mst.maximumSpanningTree(); return mst.maximumSpanningTree();

View File

@@ -4,46 +4,25 @@
#include <vector> #include <vector>
#include <string> #include <string>
namespace bayesnet { namespace bayesnet {
using namespace std;
using namespace torch;
class Metrics { class Metrics {
private: private:
int classNumStates = 0; Tensor samples;
std::vector<double> scoresKBest; vector<string> features;
std::vector<int> featuresKBest; // sorted indices of the features string className;
double conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights); int classNumStates;
protected:
torch::Tensor samples; // n+1xm torch::Tensor used to fit the model where samples[-1] is the y std::vector
std::string className;
double entropy(const torch::Tensor& feature, const torch::Tensor& weights);
std::vector<std::string> features;
template <class T>
std::vector<std::pair<T, T>> doCombinations(const std::vector<T>& source)
{
std::vector<std::pair<T, T>> result;
for (int i = 0; i < source.size(); ++i) {
T temp = source[i];
for (int j = i + 1; j < source.size(); ++j) {
result.push_back({ temp, source[j] });
}
}
return result;
}
template <class T>
T pop_first(std::vector<T>& v)
{
T temp = v[0];
v.erase(v.begin());
return temp;
}
public: public:
Metrics() = default; Metrics() = default;
Metrics(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int classNumStates); Metrics(Tensor&, vector<string>&, string&, int);
Metrics(const std::vector<std::vector<int>>& vsamples, const std::vector<int>& labels, const std::vector<std::string>& features, const std::string& className, const int classNumStates); Metrics(const vector<vector<int>>&, const vector<int>&, const vector<string>&, const string&, const int);
std::vector<int> SelectKBestWeighted(const torch::Tensor& weights, bool ascending = false, unsigned k = 0); double entropy(Tensor&);
std::vector<double> getScoresKBest() const; double conditionalEntropy(Tensor&, Tensor&);
double mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights); double mutualInformation(Tensor&, Tensor&);
std::vector<float> conditionalEdgeWeights(std::vector<float>& weights); // To use in Python vector<float> conditionalEdgeWeights();
torch::Tensor conditionalEdge(const torch::Tensor& weights); Tensor conditionalEdge();
std::vector<std::pair<int, int>> maximumSpanningTree(const std::vector<std::string>& features, const torch::Tensor& weights, const int root); vector<pair<string, string>> doCombinations(const vector<string>&);
vector<pair<int, int>> maximumSpanningTree(vector<string> features, Tensor& weights, int root);
}; };
} }
#endif #endif

View File

@@ -1,209 +0,0 @@
#include <set>
#include <functional>
#include <limits.h>
#include "BoostAODE.h"
#include "Colors.h"
#include "Folding.h"
#include "Paths.h"
#include "CFS.h"
#include "FCBF.h"
#include "IWSS.h"
namespace bayesnet {
BoostAODE::BoostAODE() : Ensemble()
{
validHyperparameters = { "repeatSparent", "maxModels", "ascending", "convergence", "threshold", "select_features", "tolerance" };
}
void BoostAODE::buildModel(const torch::Tensor& weights)
{
// Models shall be built in trainModel
models.clear();
n_models = 0;
// Prepare the validation dataset
auto y_ = dataset.index({ -1, "..." });
if (convergence) {
// Prepare train & validation sets from train data
auto fold = platform::StratifiedKFold(5, y_, 271);
dataset_ = torch::clone(dataset);
// save input dataset
auto [train, test] = fold.getFold(0);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
// Get train and validation sets
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), train_t });
y_train = dataset.index({ -1, train_t });
X_test = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), test_t });
y_test = dataset.index({ -1, test_t });
dataset = X_train;
m = X_train.size(1);
auto n_classes = states.at(className).size();
metrics = Metrics(dataset, features, className, n_classes);
// Build dataset with train data
buildDataset(y_train);
} else {
// Use all data to train
X_train = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." });
y_train = y_;
}
}
void BoostAODE::setHyperparameters(const nlohmann::json& hyperparameters_)
{
auto hyperparameters = hyperparameters_;
if (hyperparameters.contains("repeatSparent")) {
repeatSparent = hyperparameters["repeatSparent"];
hyperparameters.erase("repeatSparent");
}
if (hyperparameters.contains("maxModels")) {
maxModels = hyperparameters["maxModels"];
hyperparameters.erase("maxModels");
}
if (hyperparameters.contains("ascending")) {
ascending = hyperparameters["ascending"];
hyperparameters.erase("ascending");
}
if (hyperparameters.contains("convergence")) {
convergence = hyperparameters["convergence"];
hyperparameters.erase("convergence");
}
if (hyperparameters.contains("threshold")) {
threshold = hyperparameters["threshold"];
hyperparameters.erase("threshold");
}
if (hyperparameters.contains("tolerance")) {
tolerance = hyperparameters["tolerance"];
hyperparameters.erase("tolerance");
}
if (hyperparameters.contains("select_features")) {
auto selectedAlgorithm = hyperparameters["select_features"];
std::vector<std::string> algos = { "IWSS", "FCBF", "CFS" };
selectFeatures = true;
algorithm = selectedAlgorithm;
if (std::find(algos.begin(), algos.end(), selectedAlgorithm) == algos.end()) {
throw std::invalid_argument("Invalid selectFeatures value [IWSS, FCBF, CFS]");
}
hyperparameters.erase("select_features");
}
if (!hyperparameters.empty()) {
throw std::invalid_argument("Invalid hyperparameters" + hyperparameters.dump());
}
}
std::unordered_set<int> BoostAODE::initializeModels()
{
std::unordered_set<int> featuresUsed;
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
int maxFeatures = 0;
if (algorithm == "CFS") {
featureSelector = new CFS(dataset, features, className, maxFeatures, states.at(className).size(), weights_);
} else if (algorithm == "IWSS") {
if (threshold < 0 || threshold >0.5) {
throw std::invalid_argument("Invalid threshold value for IWSS [0, 0.5]");
}
featureSelector = new IWSS(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
} else if (algorithm == "FCBF") {
if (threshold < 1e-7 || threshold > 1) {
throw std::invalid_argument("Invalid threshold value [1e-7, 1]");
}
featureSelector = new FCBF(dataset, features, className, maxFeatures, states.at(className).size(), weights_, threshold);
}
featureSelector->fit();
auto cfsFeatures = featureSelector->getFeatures();
for (const int& feature : cfsFeatures) {
// std::cout << "Feature: [" << feature << "] " << feature << " " << features.at(feature) << std::endl;
featuresUsed.insert(feature);
std::unique_ptr<Classifier> model = std::make_unique<SPODE>(feature);
model->fit(dataset, features, className, states, weights_);
models.push_back(std::move(model));
significanceModels.push_back(1.0);
n_models++;
}
delete featureSelector;
return featuresUsed;
}
void BoostAODE::trainModel(const torch::Tensor& weights)
{
std::unordered_set<int> featuresUsed;
if (selectFeatures) {
featuresUsed = initializeModels();
}
if (maxModels == 0)
maxModels = .1 * n > 10 ? .1 * n : n;
torch::Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
bool exitCondition = false;
// Variables to control the accuracy finish condition
double priorAccuracy = 0.0;
double delta = 1.0;
double threshold = 1e-4;
int count = 0; // number of times the accuracy is lower than the threshold
fitted = true; // to enable predict
// Step 0: Set the finish condition
// if not repeatSparent a finish condition is run out of features
// n_models == maxModels
// epsilon sub t > 0.5 => inverse the weights policy
// validation error is not decreasing
while (!exitCondition) {
// Step 1: Build ranking with mutual information
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
std::unique_ptr<Classifier> model;
auto feature = featureSelection[0];
if (!repeatSparent || featuresUsed.size() < featureSelection.size()) {
bool used = true;
for (const auto& feat : featureSelection) {
if (std::find(featuresUsed.begin(), featuresUsed.end(), feat) != featuresUsed.end()) {
continue;
}
used = false;
feature = feat;
break;
}
if (used) {
exitCondition = true;
continue;
}
}
featuresUsed.insert(feature);
model = std::make_unique<SPODE>(feature);
model->fit(dataset, features, className, states, weights_);
auto ypred = model->predict(X_train);
// Step 3.1: Compute the classifier amout of say
auto mask_wrong = ypred != y_train;
auto mask_right = ypred == y_train;
auto masked_weights = weights_ * mask_wrong.to(weights_.dtype());
double epsilon_t = masked_weights.sum().item<double>();
double wt = (1 - epsilon_t) / epsilon_t;
double alpha_t = epsilon_t == 0 ? 1 : 0.5 * log(wt);
// Step 3.2: Update weights for next classifier
// Step 3.2.1: Update weights of wrong samples
weights_ += mask_wrong.to(weights_.dtype()) * exp(alpha_t) * weights_;
// Step 3.2.2: Update weights of right samples
weights_ += mask_right.to(weights_.dtype()) * exp(-alpha_t) * weights_;
// Step 3.3: Normalise the weights
double totalWeights = torch::sum(weights_).item<double>();
weights_ = weights_ / totalWeights;
// Step 3.4: Store classifier and its accuracy to weigh its future vote
models.push_back(std::move(model));
significanceModels.push_back(alpha_t);
n_models++;
if (convergence) {
auto y_val_predict = predict(X_test);
double accuracy = (y_val_predict == y_test).sum().item<double>() / (double)y_test.size(0);
if (priorAccuracy == 0) {
priorAccuracy = accuracy;
} else {
delta = accuracy - priorAccuracy;
}
if (delta < threshold) {
count++;
}
}
exitCondition = n_models >= maxModels && repeatSparent || epsilon_t > 0.5 || count > tolerance;
}
if (featuresUsed.size() != features.size()) {
status = WARNING;
}
}
std::vector<std::string> BoostAODE::graph(const std::string& title) const
{
return Ensemble::graph(title);
}
}

View File

@@ -1,33 +0,0 @@
#ifndef BOOSTAODE_H
#define BOOSTAODE_H
#include "Ensemble.h"
#include <map>
#include "SPODE.h"
#include "FeatureSelect.h"
namespace bayesnet {
class BoostAODE : public Ensemble {
public:
BoostAODE();
virtual ~BoostAODE() = default;
std::vector<std::string> graph(const std::string& title = "BoostAODE") const override;
void setHyperparameters(const nlohmann::json& hyperparameters) override;
protected:
void buildModel(const torch::Tensor& weights) override;
void trainModel(const torch::Tensor& weights) override;
private:
torch::Tensor dataset_;
torch::Tensor X_train, y_train, X_test, y_test;
std::unordered_set<int> initializeModels();
// Hyperparameters
bool repeatSparent = false; // if true, a feature can be selected more than once
int maxModels = 0;
int tolerance = 0;
bool ascending = false; //Process KBest features ascending or descending order
bool convergence = false; //if true, stop when the model does not improve
bool selectFeatures = false; // if true, use feature selection
std::string algorithm = ""; // Selected feature selection algorithm
FeatureSelect* featureSelector = nullptr;
double threshold = -1;
};
}
#endif

View File

@@ -1,72 +0,0 @@
#include "CFS.h"
#include <limits>
#include "bayesnetUtils.h"
namespace bayesnet {
void CFS::fit()
{
initialize();
computeSuLabels();
auto featureOrder = argsort(suLabels); // sort descending order
auto continueCondition = true;
auto feature = featureOrder[0];
selectedFeatures.push_back(feature);
selectedScores.push_back(suLabels[feature]);
selectedFeatures.erase(selectedFeatures.begin());
while (continueCondition) {
double merit = std::numeric_limits<double>::lowest();
int bestFeature = -1;
for (auto feature : featureOrder) {
selectedFeatures.push_back(feature);
// Compute merit with selectedFeatures
auto meritNew = computeMeritCFS();
if (meritNew > merit) {
merit = meritNew;
bestFeature = feature;
}
selectedFeatures.pop_back();
}
if (bestFeature == -1) {
// meritNew has to be nan due to constant features
break;
}
selectedFeatures.push_back(bestFeature);
selectedScores.push_back(merit);
featureOrder.erase(remove(featureOrder.begin(), featureOrder.end(), bestFeature), featureOrder.end());
continueCondition = computeContinueCondition(featureOrder);
}
fitted = true;
}
bool CFS::computeContinueCondition(const std::vector<int>& featureOrder)
{
if (selectedFeatures.size() == maxFeatures || featureOrder.size() == 0) {
return false;
}
if (selectedScores.size() >= 5) {
/*
"To prevent the best first search from exploring the entire
feature subset search space, a stopping criterion is imposed.
The search will terminate if five consecutive fully expanded
subsets show no improvement over the current best subset."
as stated in Mark A.Hall Thesis
*/
double item_ant = std::numeric_limits<double>::lowest();
int num = 0;
std::vector<double> lastFive(selectedScores.end() - 5, selectedScores.end());
for (auto item : lastFive) {
if (item_ant == std::numeric_limits<double>::lowest()) {
item_ant = item;
}
if (item > item_ant) {
break;
} else {
num++;
item_ant = item;
}
}
if (num == 5) {
return false;
}
}
return true;
}
}

View File

@@ -1,20 +0,0 @@
#ifndef CFS_H
#define CFS_H
#include <torch/torch.h>
#include <vector>
#include "FeatureSelect.h"
namespace bayesnet {
class CFS : public FeatureSelect {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
CFS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights)
{
}
virtual ~CFS() {};
void fit() override;
private:
bool computeContinueCondition(const std::vector<int>& featureOrder);
};
}
#endif

View File

@@ -1,12 +1,2 @@
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp) add_library(BayesNet bayesnetUtils.cc Network.cc Node.cc BayesMetrics.cc Classifier.cc KDB.cc TAN.cc SPODE.cc Ensemble.cc AODE.cc Mst.cc)
include_directories(${BayesNet_SOURCE_DIR}/lib/Files) target_link_libraries(BayesNet "${TORCH_LIBRARIES}")
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
include_directories(${BayesNet_SOURCE_DIR}/src/PyClassifiers)
include_directories(${Python3_INCLUDE_DIRS})
add_library(BayesNet bayesnetUtils.cc Network.cc Node.cc BayesMetrics.cc Classifier.cc
KDB.cc TAN.cc SPODE.cc Ensemble.cc AODE.cc TANLd.cc KDBLd.cc SPODELd.cc AODELd.cc BoostAODE.cc
Mst.cc Proposal.cc CFS.cc FCBF.cc IWSS.cc FeatureSelect.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
target_link_libraries(BayesNet mdlp "${TORCH_LIBRARIES}")

View File

@@ -2,159 +2,146 @@
#include "bayesnetUtils.h" #include "bayesnetUtils.h"
namespace bayesnet { namespace bayesnet {
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {} using namespace torch;
Classifier& Classifier::build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
Classifier::Classifier(const Network& model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
Classifier& Classifier::build(vector<string>& features, string className, map<string, vector<int>>& states)
{ {
dataset = torch::cat({ X, y.view({y.size(0), 1}) }, 1);
this->features = features; this->features = features;
this->className = className; this->className = className;
this->states = states; this->states = states;
m = dataset.size(1);
n = dataset.size(0) - 1;
checkFitParameters(); checkFitParameters();
auto n_classes = states.at(className).size(); auto n_classes = states[className].size();
metrics = Metrics(dataset, features, className, n_classes); metrics = Metrics(dataset, features, className, n_classes);
model.initialize(); train();
buildModel(weights); if (Xv == vector<vector<int>>()) {
trainModel(weights); // fit with tensors
model.fit(X, y, features, className);
} else {
// fit with vectors
model.fit(Xv, yv, features, className);
}
fitted = true; fitted = true;
return *this; return *this;
} }
void Classifier::buildDataset(torch::Tensor& ytmp) Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states)
{ {
try { this->X = torch::transpose(X, 0, 1);
auto yresized = torch::transpose(ytmp.view({ ytmp.size(0), 1 }), 0, 1); this->y = y;
dataset = torch::cat({ dataset, yresized }, 0); Xv = vector<vector<int>>();
} yv = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
catch (const std::exception& e) { return build(features, className, states);
std::cerr << e.what() << '\n';
std::cout << "X dimensions: " << dataset.sizes() << "\n";
std::cout << "y dimensions: " << ytmp.sizes() << "\n";
exit(1);
}
} }
void Classifier::trainModel(const torch::Tensor& weights)
Classifier& Classifier::fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states)
{ {
model.fit(dataset, weights, features, className, states); this->X = torch::zeros({ static_cast<int>(X[0].size()), static_cast<int>(X.size()) }, kInt32);
} Xv = X;
// X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states)
{
dataset = X;
buildDataset(y);
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights);
}
// X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states)
{
dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kInt32);
for (int i = 0; i < X.size(); ++i) { for (int i = 0; i < X.size(); ++i) {
dataset.index_put_({ i, "..." }, torch::tensor(X[i], torch::kInt32)); this->X.index_put_({ "...", i }, torch::tensor(X[i], kInt32));
} }
auto ytmp = torch::tensor(y, torch::kInt32); this->y = torch::tensor(y, kInt32);
buildDataset(ytmp); yv = y;
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble); return build(features, className, states);
return build(features, className, states, weights);
}
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states)
{
this->dataset = dataset;
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights);
}
Classifier& Classifier::fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights)
{
this->dataset = dataset;
return build(features, className, states, weights);
} }
void Classifier::checkFitParameters() void Classifier::checkFitParameters()
{ {
if (torch::is_floating_point(dataset)) { auto sizes = X.sizes();
throw std::invalid_argument("dataset (X, y) must be of type Integer"); m = sizes[0];
n = sizes[1];
if (m != y.size(0)) {
throw invalid_argument("X and y must have the same number of samples");
} }
if (n != features.size()) { if (n != features.size()) {
throw std::invalid_argument("Classifier: X " + std::to_string(n) + " and features " + std::to_string(features.size()) + " must have the same number of features"); throw invalid_argument("X and features must have the same number of features");
} }
if (states.find(className) == states.end()) { if (states.find(className) == states.end()) {
throw std::invalid_argument("className not found in states"); throw invalid_argument("className not found in states");
} }
for (auto feature : features) { for (auto feature : features) {
if (states.find(feature) == states.end()) { if (states.find(feature) == states.end()) {
throw std::invalid_argument("feature [" + feature + "] not found in states"); throw invalid_argument("feature [" + feature + "] not found in states");
} }
} }
} }
torch::Tensor Classifier::predict(torch::Tensor& X)
Tensor Classifier::predict(Tensor& X)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("Classifier has not been fitted"); throw logic_error("Classifier has not been fitted");
} }
return model.predict(X); auto m_ = X.size(0);
auto n_ = X.size(1);
//auto Xt = torch::transpose(X, 0, 1);
vector<vector<int>> Xd(n_, vector<int>(m_, 0));
for (auto i = 0; i < n_; i++) {
auto temp = X.index({ "...", i });
Xd[i] = vector<int>(temp.data_ptr<int>(), temp.data_ptr<int>() + temp.numel());
}
auto yp = model.predict(Xd);
auto ypred = torch::tensor(yp, torch::kInt32);
return ypred;
} }
std::vector<int> Classifier::predict(std::vector<std::vector<int>>& X) vector<int> Classifier::predict(vector<vector<int>>& X)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("Classifier has not been fitted"); throw logic_error("Classifier has not been fitted");
} }
auto m_ = X[0].size(); auto m_ = X[0].size();
auto n_ = X.size(); auto n_ = X.size();
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0)); vector<vector<int>> Xd(n_, vector<int>(m_, 0));
for (auto i = 0; i < n_; i++) { for (auto i = 0; i < n_; i++) {
Xd[i] = std::vector<int>(X[i].begin(), X[i].end()); Xd[i] = vector<int>(X[i].begin(), X[i].end());
} }
auto yp = model.predict(Xd); auto yp = model.predict(Xd);
return yp; return yp;
} }
float Classifier::score(torch::Tensor& X, torch::Tensor& y) float Classifier::score(Tensor& X, Tensor& y)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("Classifier has not been fitted"); throw logic_error("Classifier has not been fitted");
} }
torch::Tensor y_pred = predict(X); auto Xt = torch::transpose(X, 0, 1);
Tensor y_pred = predict(Xt);
return (y_pred == y).sum().item<float>() / y.size(0); return (y_pred == y).sum().item<float>() / y.size(0);
} }
float Classifier::score(std::vector<std::vector<int>>& X, std::vector<int>& y) float Classifier::score(vector<vector<int>>& X, vector<int>& y)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("Classifier has not been fitted"); throw logic_error("Classifier has not been fitted");
} }
return model.score(X, y); auto m_ = X[0].size();
auto n_ = X.size();
vector<vector<int>> Xd(n_, vector<int>(m_, 0));
for (auto i = 0; i < n_; i++) {
Xd[i] = vector<int>(X[i].begin(), X[i].end());
}
return model.score(Xd, y);
} }
std::vector<std::string> Classifier::show() const vector<string> Classifier::show()
{ {
return model.show(); return model.show();
} }
void Classifier::addNodes() void Classifier::addNodes()
{ {
// Add all nodes to the network // Add all nodes to the network
for (const auto& feature : features) { for (auto feature : features) {
model.addNode(feature); model.addNode(feature, states[feature].size());
} }
model.addNode(className); model.addNode(className, states[className].size());
} }
int Classifier::getNumberOfNodes() const int Classifier::getNumberOfNodes()
{ {
// Features does not include class // Features does not include class
return fitted ? model.getFeatures().size() : 0; return fitted ? model.getFeatures().size() + 1 : 0;
} }
int Classifier::getNumberOfEdges() const int Classifier::getNumberOfEdges()
{ {
return fitted ? model.getNumEdges() : 0; return fitted ? model.getEdges().size() : 0;
} }
int Classifier::getNumberOfStates() const int Classifier::getNumberOfStates()
{ {
return fitted ? model.getStates() : 0; return fitted ? model.getStates() : 0;
} }
std::vector<std::string> Classifier::topological_order()
{
return model.topological_sort();
}
void Classifier::dump_cpt() const
{
model.dump_cpt();
}
void Classifier::setHyperparameters(const nlohmann::json& hyperparameters)
{
//For classifiers that don't have hyperparameters
}
} }

View File

@@ -4,46 +4,42 @@
#include "BaseClassifier.h" #include "BaseClassifier.h"
#include "Network.h" #include "Network.h"
#include "BayesMetrics.h" #include "BayesMetrics.h"
using namespace std;
using namespace torch;
namespace bayesnet { namespace bayesnet {
class Classifier : public BaseClassifier { class Classifier : public BaseClassifier {
private: private:
Classifier& build(const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights);
protected:
bool fitted; bool fitted;
int m, n; // m: number of samples, n: number of features Classifier& build(vector<string>& features, string className, map<string, vector<int>>& states);
protected:
Network model; Network model;
int m, n; // m: number of samples, n: number of features
Tensor X;
vector<vector<int>> Xv;
Tensor y;
vector<int> yv;
Tensor dataset;
Metrics metrics; Metrics metrics;
std::vector<std::string> features; vector<string> features;
std::string className; string className;
std::map<std::string, std::vector<int>> states; map<string, vector<int>> states;
torch::Tensor dataset; // (n+1)xm tensor
status_t status = NORMAL;
void checkFitParameters(); void checkFitParameters();
virtual void buildModel(const torch::Tensor& weights) = 0; virtual void train() = 0;
void trainModel(const torch::Tensor& weights) override;
void buildDataset(torch::Tensor& y);
public: public:
Classifier(Network model); Classifier(const Network& model);
virtual ~Classifier() = default; virtual ~Classifier() = default;
Classifier& fit(std::vector<std::vector<int>>& X, std::vector<int>& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override; Classifier& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
Classifier& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override; Classifier& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states) override;
Classifier& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) override;
void addNodes(); void addNodes();
int getNumberOfNodes() const override; int getNumberOfNodes() override;
int getNumberOfEdges() const override; int getNumberOfEdges() override;
int getNumberOfStates() const override; int getNumberOfStates() override;
torch::Tensor predict(torch::Tensor& X) override; Tensor predict(Tensor& X);
status_t getStatus() const override { return status; } vector<int> predict(vector<vector<int>>& X) override;
std::string getVersion() override { return "0.2.0"; }; float score(Tensor& X, Tensor& y) override;
std::vector<int> predict(std::vector<std::vector<int>>& X) override; float score(vector<vector<int>>& X, vector<int>& y) override;
float score(torch::Tensor& X, torch::Tensor& y) override; vector<string> show() override;
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override;
std::vector<std::string> show() const override;
std::vector<std::string> topological_order() override;
void dump_cpt() const override;
void setHyperparameters(const nlohmann::json& hyperparameters) override; //For classifiers that don't have hyperparameters
}; };
} }
#endif #endif

View File

@@ -1,77 +1,92 @@
#include "Ensemble.h" #include "Ensemble.h"
namespace bayesnet { namespace bayesnet {
using namespace torch;
Ensemble::Ensemble() : Classifier(Network()), n_models(0) {} Ensemble::Ensemble() : m(0), n(0), n_models(0), metrics(Metrics()), fitted(false) {}
Ensemble& Ensemble::build(vector<string>& features, string className, map<string, vector<int>>& states)
void Ensemble::trainModel(const torch::Tensor& weights)
{ {
dataset = cat({ X, y.view({y.size(0), 1}) }, 1);
this->features = features;
this->className = className;
this->states = states;
auto n_classes = states[className].size();
metrics = Metrics(dataset, features, className, n_classes);
// Build models
train();
// Train models
n_models = models.size(); n_models = models.size();
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
// fit with std::vectors models[i]->fit(Xv, yv, features, className, states);
models[i]->fit(dataset, features, className, states);
} }
fitted = true;
return *this;
} }
std::vector<int> Ensemble::voting(torch::Tensor& y_pred) Ensemble& Ensemble::fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states)
{
this->X = X;
this->y = y;
Xv = vector<vector<int>>();
yv = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
return build(features, className, states);
}
Ensemble& Ensemble::fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states)
{
this->X = torch::zeros({ static_cast<int>(X[0].size()), static_cast<int>(X.size()) }, kInt32);
Xv = X;
for (int i = 0; i < X.size(); ++i) {
this->X.index_put_({ "...", i }, torch::tensor(X[i], kInt32));
}
this->y = torch::tensor(y, kInt32);
yv = y;
return build(features, className, states);
}
Tensor Ensemble::predict(Tensor& X)
{
if (!fitted) {
throw logic_error("Ensemble has not been fitted");
}
Tensor y_pred = torch::zeros({ X.size(0), n_models }, kInt32);
for (auto i = 0; i < n_models; ++i) {
y_pred.index_put_({ "...", i }, models[i]->predict(X));
}
return torch::tensor(voting(y_pred));
}
vector<int> Ensemble::voting(Tensor& y_pred)
{ {
auto y_pred_ = y_pred.accessor<int, 2>(); auto y_pred_ = y_pred.accessor<int, 2>();
std::vector<int> y_pred_final; vector<int> y_pred_final;
int numClasses = states.at(className).size();
// y_pred is m x n_models with the prediction of every model for each sample
for (int i = 0; i < y_pred.size(0); ++i) { for (int i = 0; i < y_pred.size(0); ++i) {
// votes store in each index (value of class) the significance added by each model vector<float> votes(states[className].size(), 0);
// i.e. votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions for (int j = 0; j < y_pred.size(1); ++j) {
std::vector<double> votes(numClasses, 0.0); votes[y_pred_[i][j]] += 1;
for (int j = 0; j < n_models; ++j) {
votes[y_pred_[i][j]] += significanceModels.at(j);
} }
// argsort in descending order
auto indices = argsort(votes); auto indices = argsort(votes);
y_pred_final.push_back(indices[0]); y_pred_final.push_back(indices[0]);
} }
return y_pred_final; return y_pred_final;
} }
torch::Tensor Ensemble::predict(torch::Tensor& X) vector<int> Ensemble::predict(vector<vector<int>>& X)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("Ensemble has not been fitted"); throw logic_error("Ensemble has not been fitted");
}
torch::Tensor y_pred = torch::zeros({ X.size(1), n_models }, torch::kInt32);
auto threads{ std::vector<std::thread>() };
std::mutex mtx;
for (auto i = 0; i < n_models; ++i) {
threads.push_back(std::thread([&, i]() {
auto ypredict = models[i]->predict(X);
std::lock_guard<std::mutex> lock(mtx);
y_pred.index_put_({ "...", i }, ypredict);
}));
}
for (auto& thread : threads) {
thread.join();
}
return torch::tensor(voting(y_pred));
}
std::vector<int> Ensemble::predict(std::vector<std::vector<int>>& X)
{
if (!fitted) {
throw std::logic_error("Ensemble has not been fitted");
} }
long m_ = X[0].size(); long m_ = X[0].size();
long n_ = X.size(); long n_ = X.size();
std::vector<std::vector<int>> Xd(n_, std::vector<int>(m_, 0)); vector<vector<int>> Xd(n_, vector<int>(m_, 0));
for (auto i = 0; i < n_; i++) { for (auto i = 0; i < n_; i++) {
Xd[i] = std::vector<int>(X[i].begin(), X[i].end()); Xd[i] = vector<int>(X[i].begin(), X[i].end());
} }
torch::Tensor y_pred = torch::zeros({ m_, n_models }, torch::kInt32); Tensor y_pred = torch::zeros({ m_, n_models }, kInt32);
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
y_pred.index_put_({ "...", i }, torch::tensor(models[i]->predict(Xd), torch::kInt32)); y_pred.index_put_({ "...", i }, torch::tensor(models[i]->predict(Xd), kInt32));
} }
return voting(y_pred); return voting(y_pred);
} }
float Ensemble::score(torch::Tensor& X, torch::Tensor& y) float Ensemble::score(Tensor& X, Tensor& y)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("Ensemble has not been fitted"); throw logic_error("Ensemble has not been fitted");
} }
auto y_pred = predict(X); auto y_pred = predict(X);
int correct = 0; int correct = 0;
@@ -82,10 +97,10 @@ namespace bayesnet {
} }
return (double)correct / y_pred.size(0); return (double)correct / y_pred.size(0);
} }
float Ensemble::score(std::vector<std::vector<int>>& X, std::vector<int>& y) float Ensemble::score(vector<vector<int>>& X, vector<int>& y)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("Ensemble has not been fitted"); throw logic_error("Ensemble has not been fitted");
} }
auto y_pred = predict(X); auto y_pred = predict(X);
int correct = 0; int correct = 0;
@@ -95,26 +110,27 @@ namespace bayesnet {
} }
} }
return (double)correct / y_pred.size(); return (double)correct / y_pred.size();
} }
std::vector<std::string> Ensemble::show() const vector<string> Ensemble::show()
{ {
auto result = std::vector<std::string>(); auto result = vector<string>();
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
auto res = models[i]->show(); auto res = models[i]->show();
result.insert(result.end(), res.begin(), res.end()); result.insert(result.end(), res.begin(), res.end());
} }
return result; return result;
} }
std::vector<std::string> Ensemble::graph(const std::string& title) const vector<string> Ensemble::graph(string title)
{ {
auto result = std::vector<std::string>(); auto result = vector<string>();
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
auto res = models[i]->graph(title + "_" + std::to_string(i)); auto res = models[i]->graph(title + "_" + to_string(i));
result.insert(result.end(), res.begin(), res.end()); result.insert(result.end(), res.begin(), res.end());
} }
return result; return result;
} }
int Ensemble::getNumberOfNodes() const int Ensemble::getNumberOfNodes()
{ {
int nodes = 0; int nodes = 0;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
@@ -122,7 +138,7 @@ namespace bayesnet {
} }
return nodes; return nodes;
} }
int Ensemble::getNumberOfEdges() const int Ensemble::getNumberOfEdges()
{ {
int edges = 0; int edges = 0;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
@@ -130,7 +146,7 @@ namespace bayesnet {
} }
return edges; return edges;
} }
int Ensemble::getNumberOfStates() const int Ensemble::getNumberOfStates()
{ {
int nstates = 0; int nstates = 0;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {

View File

@@ -4,36 +4,43 @@
#include "Classifier.h" #include "Classifier.h"
#include "BayesMetrics.h" #include "BayesMetrics.h"
#include "bayesnetUtils.h" #include "bayesnetUtils.h"
using namespace std;
using namespace torch;
namespace bayesnet { namespace bayesnet {
class Ensemble : public Classifier { class Ensemble : public BaseClassifier {
private: private:
Ensemble& build(std::vector<std::string>& features, std::string className, std::map<std::string, std::vector<int>>& states); bool fitted;
long n_models;
Ensemble& build(vector<string>& features, string className, map<string, vector<int>>& states);
protected: protected:
unsigned n_models; vector<unique_ptr<Classifier>> models;
std::vector<std::unique_ptr<Classifier>> models; int m, n; // m: number of samples, n: number of features
std::vector<double> significanceModels; Tensor X;
void trainModel(const torch::Tensor& weights) override; vector<vector<int>> Xv;
std::vector<int> voting(torch::Tensor& y_pred); Tensor y;
vector<int> yv;
Tensor dataset;
Metrics metrics;
vector<string> features;
string className;
map<string, vector<int>> states;
void virtual train() = 0;
vector<int> voting(Tensor& y_pred);
public: public:
Ensemble(); Ensemble();
virtual ~Ensemble() = default; virtual ~Ensemble() = default;
torch::Tensor predict(torch::Tensor& X) override; Ensemble& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
std::vector<int> predict(std::vector<std::vector<int>>& X) override; Ensemble& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
float score(torch::Tensor& X, torch::Tensor& y) override; Tensor predict(Tensor& X);
float score(std::vector<std::vector<int>>& X, std::vector<int>& y) override; vector<int> predict(vector<vector<int>>& X) override;
int getNumberOfNodes() const override; float score(Tensor& X, Tensor& y) override;
int getNumberOfEdges() const override; float score(vector<vector<int>>& X, vector<int>& y) override;
int getNumberOfStates() const override; int getNumberOfNodes() override;
std::vector<std::string> show() const override; int getNumberOfEdges() override;
std::vector<std::string> graph(const std::string& title) const override; int getNumberOfStates() override;
std::vector<std::string> topological_order() override vector<string> show() override;
{ vector<string> graph(string title) override;
return std::vector<std::string>();
}
void dump_cpt() const override
{
}
}; };
} }
#endif #endif

View File

@@ -1,44 +0,0 @@
#include "bayesnetUtils.h"
#include "FCBF.h"
namespace bayesnet {
FCBF::FCBF(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold) :
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)
{
if (threshold < 1e-7) {
throw std::invalid_argument("Threshold cannot be less than 1e-7");
}
}
void FCBF::fit()
{
initialize();
computeSuLabels();
auto featureOrder = argsort(suLabels); // sort descending order
auto featureOrderCopy = featureOrder;
for (const auto& feature : featureOrder) {
// Don't self compare
featureOrderCopy.erase(featureOrderCopy.begin());
if (suLabels.at(feature) == 0.0) {
// The feature has been removed from the list
continue;
}
if (suLabels.at(feature) < threshold) {
break;
}
// Remove redundant features
for (const auto& featureCopy : featureOrderCopy) {
double value = computeSuFeatures(feature, featureCopy);
if (value >= suLabels.at(featureCopy)) {
// Remove feature from list
suLabels[featureCopy] = 0.0;
}
}
selectedFeatures.push_back(feature);
selectedScores.push_back(suLabels[feature]);
if (selectedFeatures.size() == maxFeatures) {
break;
}
}
fitted = true;
}
}

View File

@@ -1,17 +0,0 @@
#ifndef FCBF_H
#define FCBF_H
#include <torch/torch.h>
#include <vector>
#include "FeatureSelect.h"
namespace bayesnet {
class FCBF : public FeatureSelect {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
FCBF(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
virtual ~FCBF() {};
void fit() override;
private:
double threshold = -1;
};
}
#endif

View File

@@ -1,79 +0,0 @@
#include "FeatureSelect.h"
#include <limits>
#include "bayesnetUtils.h"
namespace bayesnet {
FeatureSelect::FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights) :
Metrics(samples, features, className, classNumStates), maxFeatures(maxFeatures == 0 ? samples.size(0) - 1 : maxFeatures), weights(weights)
{
}
void FeatureSelect::initialize()
{
selectedFeatures.clear();
selectedScores.clear();
}
double FeatureSelect::symmetricalUncertainty(int a, int b)
{
/*
Compute symmetrical uncertainty. Normalize* information gain (mutual
information) with the entropies of the features in order to compensate
the bias due to high cardinality features. *Range [0, 1]
(https://www.sciencedirect.com/science/article/pii/S0020025519303603)
*/
auto x = samples.index({ a, "..." });
auto y = samples.index({ b, "..." });
auto mu = mutualInformation(x, y, weights);
auto hx = entropy(x, weights);
auto hy = entropy(y, weights);
return 2.0 * mu / (hx + hy);
}
void FeatureSelect::computeSuLabels()
{
// Compute Simmetrical Uncertainty between features and labels
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
for (int i = 0; i < features.size(); ++i) {
suLabels.push_back(symmetricalUncertainty(i, -1));
}
}
double FeatureSelect::computeSuFeatures(const int firstFeature, const int secondFeature)
{
// Compute Simmetrical Uncertainty between features
// https://en.wikipedia.org/wiki/Symmetric_uncertainty
try {
return suFeatures.at({ firstFeature, secondFeature });
}
catch (const std::out_of_range& e) {
double result = symmetricalUncertainty(firstFeature, secondFeature);
suFeatures[{firstFeature, secondFeature}] = result;
return result;
}
}
double FeatureSelect::computeMeritCFS()
{
double result;
double rcf = 0;
for (auto feature : selectedFeatures) {
rcf += suLabels[feature];
}
double rff = 0;
int n = selectedFeatures.size();
for (const auto& item : doCombinations(selectedFeatures)) {
rff += computeSuFeatures(item.first, item.second);
}
return rcf / sqrt(n + (n * n - n) * rff);
}
std::vector<int> FeatureSelect::getFeatures() const
{
if (!fitted) {
throw std::runtime_error("FeatureSelect not fitted");
}
return selectedFeatures;
}
std::vector<double> FeatureSelect::getScores() const
{
if (!fitted) {
throw std::runtime_error("FeatureSelect not fitted");
}
return selectedScores;
}
}

View File

@@ -1,30 +0,0 @@
#ifndef FEATURE_SELECT_H
#define FEATURE_SELECT_H
#include <torch/torch.h>
#include <vector>
#include "BayesMetrics.h"
namespace bayesnet {
class FeatureSelect : public Metrics {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
FeatureSelect(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights);
virtual ~FeatureSelect() {};
virtual void fit() = 0;
std::vector<int> getFeatures() const;
std::vector<double> getScores() const;
protected:
void initialize();
void computeSuLabels();
double computeSuFeatures(const int a, const int b);
double symmetricalUncertainty(int a, int b);
double computeMeritCFS();
const torch::Tensor& weights;
int maxFeatures;
std::vector<int> selectedFeatures;
std::vector<double> selectedScores;
std::vector<double> suLabels;
std::map<std::pair<int, int>, double> suFeatures;
bool fitted = false;
};
}
#endif

View File

@@ -1,47 +0,0 @@
#include "IWSS.h"
#include <limits>
#include "bayesnetUtils.h"
namespace bayesnet {
IWSS::IWSS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold) :
FeatureSelect(samples, features, className, maxFeatures, classNumStates, weights), threshold(threshold)
{
if (threshold < 0 || threshold > .5) {
throw std::invalid_argument("Threshold has to be in [0, 0.5]");
}
}
void IWSS::fit()
{
initialize();
computeSuLabels();
auto featureOrder = argsort(suLabels); // sort descending order
auto featureOrderCopy = featureOrder;
// Add first and second features to result
// First with its own score
auto first_feature = pop_first(featureOrderCopy);
selectedFeatures.push_back(first_feature);
selectedScores.push_back(suLabels.at(first_feature));
// Second with the score of the candidates
selectedFeatures.push_back(pop_first(featureOrderCopy));
auto merit = computeMeritCFS();
selectedScores.push_back(merit);
for (const auto feature : featureOrderCopy) {
selectedFeatures.push_back(feature);
// Compute merit with selectedFeatures
auto meritNew = computeMeritCFS();
double delta = merit != 0.0 ? abs(merit - meritNew) / merit : 0.0;
if (meritNew > merit || delta < threshold) {
if (meritNew > merit) {
merit = meritNew;
}
selectedScores.push_back(meritNew);
} else {
selectedFeatures.pop_back();
break;
}
if (selectedFeatures.size() == maxFeatures) {
break;
}
}
fitted = true;
}
}

View File

@@ -1,17 +0,0 @@
#ifndef IWSS_H
#define IWSS_H
#include <torch/torch.h>
#include <vector>
#include "FeatureSelect.h"
namespace bayesnet {
class IWSS : public FeatureSelect {
public:
// dataset is a n+1xm tensor of integers where dataset[-1] is the y std::vector
IWSS(const torch::Tensor& samples, const std::vector<std::string>& features, const std::string& className, const int maxFeatures, const int classNumStates, const torch::Tensor& weights, const double threshold);
virtual ~IWSS() {};
void fit() override;
private:
double threshold = -1;
};
}
#endif

View File

@@ -1,21 +1,10 @@
#include "KDB.h" #include "KDB.h"
namespace bayesnet { namespace bayesnet {
KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta) using namespace torch;
{
validHyperparameters = { "k", "theta" };
} KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta) {}
void KDB::setHyperparameters(const nlohmann::json& hyperparameters) void KDB::train()
{
if (hyperparameters.contains("k")) {
k = hyperparameters["k"];
}
if (hyperparameters.contains("theta")) {
theta = hyperparameters["theta"];
}
}
void KDB::buildModel(const torch::Tensor& weights)
{ {
/* /*
1. For each feature Xi, compute mutual information, I(X;C), 1. For each feature Xi, compute mutual information, I(X;C),
@@ -38,25 +27,25 @@ namespace bayesnet {
*/ */
// 1. For each feature Xi, compute mutual information, I(X;C), // 1. For each feature Xi, compute mutual information, I(X;C),
// where C is the class. // where C is the class.
addNodes(); vector <float> mi;
const torch::Tensor& y = dataset.index({ -1, "..." });
std::vector<double> mi;
for (auto i = 0; i < features.size(); i++) { for (auto i = 0; i < features.size(); i++) {
torch::Tensor firstFeature = dataset.index({ i, "..." }); Tensor firstFeature = X.index({ "...", i });
mi.push_back(metrics.mutualInformation(firstFeature, y, weights)); mi.push_back(metrics.mutualInformation(firstFeature, y));
} }
// 2. Compute class conditional mutual information I(Xi;XjIC), f or each // 2. Compute class conditional mutual information I(Xi;XjIC), f or each
auto conditionalEdgeWeights = metrics.conditionalEdge(weights); auto conditionalEdgeWeights = metrics.conditionalEdge();
// 3. Let the used variable list, S, be empty. // 3. Let the used variable list, S, be empty.
std::vector<int> S; vector<int> S;
// 4. Let the DAG network being constructed, BN, begin with a single // 4. Let the DAG network being constructed, BN, begin with a single
// class node, C. // class node, C.
model.addNode(className, states[className].size());
// 5. Repeat until S includes all domain features // 5. Repeat until S includes all domain features
// 5.1. Select feature Xmax which is not in S and has the largest value // 5.1. Select feature Xmax which is not in S and has the largest value
// I(Xmax;C). // I(Xmax;C).
auto order = argsort(mi); auto order = argsort(mi);
for (auto idx : order) { for (auto idx : order) {
// 5.2. Add a node to BN representing Xmax. // 5.2. Add a node to BN representing Xmax.
model.addNode(features[idx], states[features[idx]].size());
// 5.3. Add an arc from C to Xmax in BN. // 5.3. Add an arc from C to Xmax in BN.
model.addEdge(className, features[idx]); model.addEdge(className, features[idx]);
// 5.4. Add m = min(lSl,/c) arcs from m distinct features Xj in S with // 5.4. Add m = min(lSl,/c) arcs from m distinct features Xj in S with
@@ -66,9 +55,9 @@ namespace bayesnet {
S.push_back(idx); S.push_back(idx);
} }
} }
void KDB::add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights) void KDB::add_m_edges(int idx, vector<int>& S, Tensor& weights)
{ {
auto n_edges = std::min(k, static_cast<int>(S.size())); auto n_edges = min(k, static_cast<int>(S.size()));
auto cond_w = clone(weights); auto cond_w = clone(weights);
bool exit_cond = k == 0; bool exit_cond = k == 0;
int num = 0; int num = 0;
@@ -80,7 +69,7 @@ namespace bayesnet {
model.addEdge(features[max_minfo], features[idx]); model.addEdge(features[max_minfo], features[idx]);
num++; num++;
} }
catch (const std::invalid_argument& e) { catch (const invalid_argument& e) {
// Loops are not allowed // Loops are not allowed
} }
} }
@@ -90,12 +79,11 @@ namespace bayesnet {
exit_cond = num == n_edges || candidates.size(0) == 0; exit_cond = num == n_edges || candidates.size(0) == 0;
} }
} }
std::vector<std::string> KDB::graph(const std::string& title) const vector<string> KDB::graph(string title)
{ {
std::string header{ title };
if (title == "KDB") { if (title == "KDB") {
header += " (k=" + std::to_string(k) + ", theta=" + std::to_string(theta) + ")"; title += " (k=" + to_string(k) + ", theta=" + to_string(theta) + ")";
} }
return model.graph(header); return model.graph(title);
} }
} }

View File

@@ -1,21 +1,20 @@
#ifndef KDB_H #ifndef KDB_H
#define KDB_H #define KDB_H
#include <torch/torch.h>
#include "Classifier.h" #include "Classifier.h"
#include "bayesnetUtils.h" #include "bayesnetUtils.h"
namespace bayesnet { namespace bayesnet {
using namespace std;
using namespace torch;
class KDB : public Classifier { class KDB : public Classifier {
private: private:
int k; int k;
float theta; float theta;
void add_m_edges(int idx, std::vector<int>& S, torch::Tensor& weights); void add_m_edges(int idx, vector<int>& S, Tensor& weights);
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
explicit KDB(int k, float theta = 0.03); explicit KDB(int k, float theta = 0.03);
virtual ~KDB() = default; vector<string> graph(string name = "KDB") override;
void setHyperparameters(const nlohmann::json& hyperparameters) override;
std::vector<std::string> graph(const std::string& name = "KDB") const override;
}; };
} }
#endif #endif

View File

@@ -1,29 +0,0 @@
#include "KDBLd.h"
namespace bayesnet {
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network
KDB::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
torch::Tensor KDBLd::predict(torch::Tensor& X)
{
auto Xt = prepareX(X);
return KDB::predict(Xt);
}
std::vector<std::string> KDBLd::graph(const std::string& name) const
{
return KDB::graph(name);
}
}

View File

@@ -1,18 +0,0 @@
#ifndef KDBLD_H
#define KDBLD_H
#include "KDB.h"
#include "Proposal.h"
namespace bayesnet {
class KDBLd : public KDB, public Proposal {
private:
public:
explicit KDBLd(int k);
virtual ~KDBLd() = default;
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
std::vector<std::string> graph(const std::string& name = "KDB") const override;
torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; };
};
}
#endif // !KDBLD_H

View File

@@ -1,13 +1,13 @@
#include "Mst.h" #include "Mst.h"
#include <vector> #include <vector>
#include <list>
/* /*
Based on the code from https://www.softwaretestinghelp.com/minimum-spanning-tree-tutorial/ Based on the code from https://www.softwaretestinghelp.com/minimum-spanning-tree-tutorial/
*/ */
namespace bayesnet { namespace bayesnet {
Graph::Graph(int V) : V(V), parent(std::vector<int>(V)) using namespace std;
Graph::Graph(int V) : V(V), parent{ vector<int>(V) }
{ {
for (int i = 0; i < V; i++) for (int i = 0; i < V; i++)
parent[i] = i; parent[i] = i;
@@ -33,46 +33,38 @@ namespace bayesnet {
} }
void Graph::kruskal_algorithm() void Graph::kruskal_algorithm()
{ {
int i;
// sort the edges ordered on decreasing weight // sort the edges ordered on decreasing weight
stable_sort(G.begin(), G.end(), [](const auto& left, const auto& right) {return left.first > right.first;}); sort(G.begin(), G.end(), [](const auto& left, const auto& right) {return left.first > right.first;});
for (int i = 0; i < G.size(); i++) { for (i = 0; i < G.size(); i++) {
int uSt, vEd; int uSt, vEd;
uSt = find_set(G[i].second.first); uSt = find_set(G[i].second.first);
vEd = find_set(G[i].second.second); vEd = find_set(G[i].second.second);
if (uSt != vEd) { if (uSt != vEd) {
T.push_back(G[i]); // add to mst std::vector T.push_back(G[i]); // add to mst vector
union_set(uSt, vEd); union_set(uSt, vEd);
} }
} }
} }
void Graph::display_mst() void Graph::display_mst()
{ {
std::cout << "Edge :" << " Weight" << std::endl; cout << "Edge :" << " Weight" << endl;
for (int i = 0; i < T.size(); i++) { for (int i = 0; i < T.size(); i++) {
std::cout << T[i].second.first << " - " << T[i].second.second << " : " cout << T[i].second.first << " - " << T[i].second.second << " : "
<< T[i].first; << T[i].first;
std::cout << std::endl; cout << endl;
} }
} }
void insertElement(std::list<int>& variables, int variable) vector<pair<int, int>> reorder(vector<pair<float, pair<int, int>>> T, int root_original)
{ {
if (std::find(variables.begin(), variables.end(), variable) == variables.end()) { auto result = vector<pair<int, int>>();
variables.push_front(variable); auto visited = vector<int>();
} auto nextVariables = unordered_set<int>();
} nextVariables.emplace(root_original);
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
{
// Create the edges of a DAG from the MST
// replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted
auto result = std::vector<std::pair<int, int>>();
auto visited = std::vector<int>();
auto nextVariables = std::list<int>();
nextVariables.push_front(root_original);
while (nextVariables.size() > 0) { while (nextVariables.size() > 0) {
int root = nextVariables.front(); int root = *nextVariables.begin();
nextVariables.pop_front(); nextVariables.erase(nextVariables.begin());
for (int i = 0; i < T.size(); ++i) { for (int i = 0; i < T.size(); ++i) {
auto [weight, edge] = T[i]; auto [weight, edge] = T[i];
auto [from, to] = edge; auto [from, to] = edge;
@@ -80,10 +72,10 @@ namespace bayesnet {
visited.insert(visited.begin(), i); visited.insert(visited.begin(), i);
if (from == root) { if (from == root) {
result.push_back({ from, to }); result.push_back({ from, to });
insertElement(nextVariables, to); nextVariables.emplace(to);
} else { } else {
result.push_back({ to, from }); result.push_back({ to, from });
insertElement(nextVariables, from); nextVariables.emplace(from);
} }
} }
} }
@@ -103,11 +95,12 @@ namespace bayesnet {
return result; return result;
} }
MST::MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root) : features(features), weights(weights), root(root) {} MST::MST(vector<string>& features, Tensor& weights, int root) : features(features), weights(weights), root(root) {}
std::vector<std::pair<int, int>> MST::maximumSpanningTree() vector<pair<int, int>> MST::maximumSpanningTree()
{ {
auto num_features = features.size(); auto num_features = features.size();
Graph g(num_features); Graph g(num_features);
// Make a complete graph // Make a complete graph
for (int i = 0; i < num_features - 1; ++i) { for (int i = 0; i < num_features - 1; ++i) {
for (int j = i + 1; j < num_features; ++j) { for (int j = i + 1; j < num_features; ++j) {

View File

@@ -4,22 +4,23 @@
#include <vector> #include <vector>
#include <string> #include <string>
namespace bayesnet { namespace bayesnet {
using namespace std;
using namespace torch;
class MST { class MST {
private: private:
torch::Tensor weights; Tensor weights;
std::vector<std::string> features; vector<string> features;
int root = 0; int root;
public: public:
MST() = default; MST(vector<string>& features, Tensor& weights, int root);
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root); vector<pair<int, int>> maximumSpanningTree();
std::vector<std::pair<int, int>> maximumSpanningTree();
}; };
class Graph { class Graph {
private: private:
int V; // number of nodes in graph int V; // number of nodes in graph
std::vector <std::pair<float, std::pair<int, int>>> G; // std::vector for graph vector <pair<float, pair<int, int>>> G; // vector for graph
std::vector <std::pair<float, std::pair<int, int>>> T; // std::vector for mst vector <pair<float, pair<int, int>>> T; // vector for mst
std::vector<int> parent; vector<int> parent;
public: public:
explicit Graph(int V); explicit Graph(int V);
void addEdge(int u, int v, float wt); void addEdge(int u, int v, float wt);
@@ -27,7 +28,7 @@ namespace bayesnet {
void union_set(int u, int v); void union_set(int u, int v);
void kruskal_algorithm(); void kruskal_algorithm();
void display_mst(); void display_mst();
std::vector <std::pair<float, std::pair<int, int>>> get_mst() { return T; } vector <pair<float, pair<int, int>>> get_mst() { return T; }
}; };
} }
#endif #endif

View File

@@ -3,24 +3,15 @@
#include "Network.h" #include "Network.h"
#include "bayesnetUtils.h" #include "bayesnetUtils.h"
namespace bayesnet { namespace bayesnet {
Network::Network() : features(std::vector<std::string>()), className(""), classNumStates(0), fitted(false), laplaceSmoothing(0) {} Network::Network() : laplaceSmoothing(1), features(vector<string>()), className(""), classNumStates(0), maxThreads(0.8), fitted(false) {}
Network::Network(float maxT) : features(std::vector<std::string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false), laplaceSmoothing(0) {} Network::Network(const float maxT) : laplaceSmoothing(1), features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
Network::Network(Network& other) : laplaceSmoothing(other.laplaceSmoothing), features(other.features), className(other.className), classNumStates(other.getClassNumStates()), maxThreads(other. Network::Network(const float maxT, const int smoothing) : laplaceSmoothing(smoothing), features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
getmaxThreads()), fitted(other.fitted) Network::Network(const Network& other) : laplaceSmoothing(other.laplaceSmoothing), features(other.features), className(other.className), classNumStates(other.classNumStates), maxThreads(other.maxThreads), fitted(other.fitted)
{ {
for (const auto& node : other.nodes) { for (const auto& pair : other.nodes) {
nodes[node.first] = std::make_unique<Node>(*node.second); nodes[pair.first] = std::make_unique<Node>(*pair.second);
} }
} }
void Network::initialize()
{
features = std::vector<std::string>();
className = "";
classNumStates = 0;
fitted = false;
nodes.clear();
samples = torch::Tensor();
}
float Network::getmaxThreads() float Network::getmaxThreads()
{ {
return maxThreads; return maxThreads;
@@ -29,28 +20,28 @@ namespace bayesnet {
{ {
return samples; return samples;
} }
void Network::addNode(const std::string& name) void Network::addNode(const string& name, const int numStates)
{ {
if (name == "") {
throw std::invalid_argument("Node name cannot be empty");
}
if (nodes.find(name) != nodes.end()) {
return;
}
if (find(features.begin(), features.end(), name) == features.end()) { if (find(features.begin(), features.end(), name) == features.end()) {
features.push_back(name); features.push_back(name);
} }
nodes[name] = std::make_unique<Node>(name); if (nodes.find(name) != nodes.end()) {
// if node exists update its number of states and remove parents, children and CPT
nodes[name]->clear();
nodes[name]->setNumStates(numStates);
return;
}
nodes[name] = std::make_unique<Node>(name, numStates);
} }
std::vector<std::string> Network::getFeatures() const vector<string> Network::getFeatures()
{ {
return features; return features;
} }
int Network::getClassNumStates() const const int Network::getClassNumStates()
{ {
return classNumStates; return classNumStates;
} }
int Network::getStates() const const int Network::getStates()
{ {
int result = 0; int result = 0;
for (auto& node : nodes) { for (auto& node : nodes) {
@@ -58,11 +49,11 @@ namespace bayesnet {
} }
return result; return result;
} }
std::string Network::getClassName() const const string Network::getClassName()
{ {
return className; return className;
} }
bool Network::isCyclic(const std::string& nodeId, std::unordered_set<std::string>& visited, std::unordered_set<std::string>& recStack) bool Network::isCyclic(const string& nodeId, unordered_set<string>& visited, unordered_set<string>& recStack)
{ {
if (visited.find(nodeId) == visited.end()) // if node hasn't been visited yet if (visited.find(nodeId) == visited.end()) // if node hasn't been visited yet
{ {
@@ -78,158 +69,124 @@ namespace bayesnet {
recStack.erase(nodeId); // remove node from recursion stack before function ends recStack.erase(nodeId); // remove node from recursion stack before function ends
return false; return false;
} }
void Network::addEdge(const std::string& parent, const std::string& child) void Network::addEdge(const string& parent, const string& child)
{ {
if (nodes.find(parent) == nodes.end()) { if (nodes.find(parent) == nodes.end()) {
throw std::invalid_argument("Parent node " + parent + " does not exist"); throw invalid_argument("Parent node " + parent + " does not exist");
} }
if (nodes.find(child) == nodes.end()) { if (nodes.find(child) == nodes.end()) {
throw std::invalid_argument("Child node " + child + " does not exist"); throw invalid_argument("Child node " + child + " does not exist");
} }
// Temporarily add edge to check for cycles // Temporarily add edge to check for cycles
nodes[parent]->addChild(nodes[child].get()); nodes[parent]->addChild(nodes[child].get());
nodes[child]->addParent(nodes[parent].get()); nodes[child]->addParent(nodes[parent].get());
std::unordered_set<std::string> visited; unordered_set<string> visited;
std::unordered_set<std::string> recStack; unordered_set<string> recStack;
if (isCyclic(nodes[child]->getName(), visited, recStack)) // if adding this edge forms a cycle if (isCyclic(nodes[child]->getName(), visited, recStack)) // if adding this edge forms a cycle
{ {
// remove problematic edge // remove problematic edge
nodes[parent]->removeChild(nodes[child].get()); nodes[parent]->removeChild(nodes[child].get());
nodes[child]->removeParent(nodes[parent].get()); nodes[child]->removeParent(nodes[parent].get());
throw std::invalid_argument("Adding this edge forms a cycle in the graph."); throw invalid_argument("Adding this edge forms a cycle in the graph.");
} }
} }
std::map<std::string, std::unique_ptr<Node>>& Network::getNodes() map<string, std::unique_ptr<Node>>& Network::getNodes()
{ {
return nodes; return nodes;
} }
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) void Network::fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& featureNames, const string& className)
{ {
if (weights.size(0) != n_samples) { features = featureNames;
throw std::invalid_argument("Weights (" + std::to_string(weights.size(0)) + ") must have the same number of elements as samples (" + std::to_string(n_samples) + ") in Network::fit");
}
if (n_samples != n_samples_y) {
throw std::invalid_argument("X and y must have the same number of samples in Network::fit (" + std::to_string(n_samples) + " != " + std::to_string(n_samples_y) + ")");
}
if (n_features != featureNames.size()) {
throw std::invalid_argument("X and features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(featureNames.size()) + ")");
}
if (n_features != features.size() - 1) {
throw std::invalid_argument("X and local features must have the same number of features in Network::fit (" + std::to_string(n_features) + " != " + std::to_string(features.size() - 1) + ")");
}
if (find(features.begin(), features.end(), className) == features.end()) {
throw std::invalid_argument("className not found in Network::features");
}
for (auto& feature : featureNames) {
if (find(features.begin(), features.end(), feature) == features.end()) {
throw std::invalid_argument("Feature " + feature + " not found in Network::features");
}
if (states.find(feature) == states.end()) {
throw std::invalid_argument("Feature " + feature + " not found in states");
}
}
}
void Network::setStates(const std::map<std::string, std::vector<int>>& states)
{
// Set states to every Node in the network
for_each(features.begin(), features.end(), [this, &states](const std::string& feature) {
nodes.at(feature)->setNumStates(states.at(feature).size());
});
classNumStates = nodes.at(className)->getNumStates();
}
// X comes in nxm, where n is the number of features and m the number of samples
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states)
{
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights);
this->className = className; this->className = className;
torch::Tensor ytmp = torch::transpose(y.view({ y.size(0), 1 }), 0, 1); dataset.clear();
samples = torch::cat({ X , ytmp }, 0); // Specific part
classNumStates = torch::max(y).item<int>() + 1;
samples = torch::cat({ X, y.view({ y.size(0), 1 }) }, 1);
for (int i = 0; i < featureNames.size(); ++i) { for (int i = 0; i < featureNames.size(); ++i) {
auto row_feature = X.index({ i, "..." }); auto column = torch::flatten(X.index({ "...", i }));
auto k = vector<int>();
for (auto z = 0; z < X.size(0); ++z) {
k.push_back(column[z].item<int>());
}
dataset[featureNames[i]] = k;
} }
completeFit(states, weights); dataset[className] = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
completeFit();
} }
void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states) void Network::fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<string>& featureNames, const string& className)
{ {
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights); features = featureNames;
this->className = className; this->className = className;
this->samples = samples; dataset.clear();
completeFit(states, weights); // Specific part
} classNumStates = *max_element(labels.begin(), labels.end()) + 1;
// input_data comes in nxm, where n is the number of features and m the number of samples // Build dataset & tensor of samples
void Network::fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights_, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states) samples = torch::zeros({ static_cast<int>(input_data[0].size()), static_cast<int>(input_data.size() + 1) }, torch::kInt32);
{
const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64);
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights);
this->className = className;
// Build tensor of samples (nxm) (n+1 because of the class)
samples = torch::zeros({ static_cast<int>(input_data.size() + 1), static_cast<int>(input_data[0].size()) }, torch::kInt32);
for (int i = 0; i < featureNames.size(); ++i) { for (int i = 0; i < featureNames.size(); ++i) {
samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32)); dataset[featureNames[i]] = input_data[i];
samples.index_put_({ "...", i }, torch::tensor(input_data[i], torch::kInt32));
} }
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32)); dataset[className] = labels;
completeFit(states, weights); samples.index_put_({ "...", -1 }, torch::tensor(labels, torch::kInt32));
completeFit();
} }
void Network::completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights) void Network::completeFit()
{ {
setStates(states);
laplaceSmoothing = 1.0 / samples.size(1); // To use in CPT computation int maxThreadsRunning = static_cast<int>(std::thread::hardware_concurrency() * maxThreads);
std::vector<std::thread> threads; if (maxThreadsRunning < 1) {
for (auto& node : nodes) { maxThreadsRunning = 1;
threads.emplace_back([this, &node, &weights]() { }
node.second->computeCPT(samples, features, laplaceSmoothing, weights); vector<thread> threads;
mutex mtx;
condition_variable cv;
int activeThreads = 0;
int nextNodeIndex = 0;
while (nextNodeIndex < nodes.size()) {
unique_lock<mutex> lock(mtx);
cv.wait(lock, [&activeThreads, &maxThreadsRunning]() { return activeThreads < maxThreadsRunning; });
if (nextNodeIndex >= nodes.size()) {
break; // No more work remaining
}
threads.emplace_back([this, &nextNodeIndex, &mtx, &cv, &activeThreads]() {
while (true) {
unique_lock<mutex> lock(mtx);
if (nextNodeIndex >= nodes.size()) {
break; // No more work remaining
}
auto& pair = *std::next(nodes.begin(), nextNodeIndex);
++nextNodeIndex;
lock.unlock();
pair.second->computeCPT(dataset, laplaceSmoothing);
lock.lock();
nodes[pair.first] = std::move(pair.second);
lock.unlock();
}
lock_guard<mutex> lock(mtx);
--activeThreads;
cv.notify_one();
}); });
++activeThreads;
} }
for (auto& thread : threads) { for (auto& thread : threads) {
thread.join(); thread.join();
} }
fitted = true; fitted = true;
} }
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba)
vector<int> Network::predict(const vector<vector<int>>& tsamples)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("You must call fit() before calling predict()"); throw logic_error("You must call fit() before calling predict()");
} }
torch::Tensor result; vector<int> predictions;
result = torch::zeros({ samples.size(1), classNumStates }, torch::kFloat64); vector<int> sample;
for (int i = 0; i < samples.size(1); ++i) {
const torch::Tensor sample = samples.index({ "...", i });
auto psample = predict_sample(sample);
auto temp = torch::tensor(psample, torch::kFloat64);
// result.index_put_({ i, "..." }, torch::tensor(predict_sample(sample), torch::kFloat64));
result.index_put_({ i, "..." }, temp);
}
if (proba)
return result;
return result.argmax(1);
}
// Return mxn tensor of probabilities
torch::Tensor Network::predict_proba(const torch::Tensor& samples)
{
return predict_tensor(samples, true);
}
// Return mxn tensor of probabilities
torch::Tensor Network::predict(const torch::Tensor& samples)
{
return predict_tensor(samples, false);
}
// Return mx1 std::vector of predictions
// tsamples is nxm std::vector of samples
std::vector<int> Network::predict(const std::vector<std::vector<int>>& tsamples)
{
if (!fitted) {
throw std::logic_error("You must call fit() before calling predict()");
}
std::vector<int> predictions;
std::vector<int> sample;
for (int row = 0; row < tsamples[0].size(); ++row) { for (int row = 0; row < tsamples[0].size(); ++row) {
sample.clear(); sample.clear();
for (int col = 0; col < tsamples.size(); ++col) { for (int col = 0; col < tsamples.size(); ++col) {
sample.push_back(tsamples[col][row]); sample.push_back(tsamples[col][row]);
} }
std::vector<double> classProbabilities = predict_sample(sample); vector<double> classProbabilities = predict_sample(sample);
// Find the class with the maximum posterior probability // Find the class with the maximum posterior probability
auto maxElem = max_element(classProbabilities.begin(), classProbabilities.end()); auto maxElem = max_element(classProbabilities.begin(), classProbabilities.end());
int predictedClass = distance(classProbabilities.begin(), maxElem); int predictedClass = distance(classProbabilities.begin(), maxElem);
@@ -237,14 +194,13 @@ namespace bayesnet {
} }
return predictions; return predictions;
} }
// Return mxn std::vector of probabilities vector<vector<double>> Network::predict_proba(const vector<vector<int>>& tsamples)
std::vector<std::vector<double>> Network::predict_proba(const std::vector<std::vector<int>>& tsamples)
{ {
if (!fitted) { if (!fitted) {
throw std::logic_error("You must call fit() before calling predict_proba()"); throw logic_error("You must call fit() before calling predict_proba()");
} }
std::vector<std::vector<double>> predictions; vector<vector<double>> predictions;
std::vector<int> sample; vector<int> sample;
for (int row = 0; row < tsamples[0].size(); ++row) { for (int row = 0; row < tsamples[0].size(); ++row) {
sample.clear(); sample.clear();
for (int col = 0; col < tsamples.size(); ++col) { for (int col = 0; col < tsamples.size(); ++col) {
@@ -254,9 +210,9 @@ namespace bayesnet {
} }
return predictions; return predictions;
} }
double Network::score(const std::vector<std::vector<int>>& tsamples, const std::vector<int>& labels) double Network::score(const vector<vector<int>>& tsamples, const vector<int>& labels)
{ {
std::vector<int> y_pred = predict(tsamples); vector<int> y_pred = predict(tsamples);
int correct = 0; int correct = 0;
for (int i = 0; i < y_pred.size(); ++i) { for (int i = 0; i < y_pred.size(); ++i) {
if (y_pred[i] == labels[i]) { if (y_pred[i] == labels[i]) {
@@ -265,35 +221,20 @@ namespace bayesnet {
} }
return (double)correct / y_pred.size(); return (double)correct / y_pred.size();
} }
// Return 1xn std::vector of probabilities vector<double> Network::predict_sample(const vector<int>& sample)
std::vector<double> Network::predict_sample(const std::vector<int>& sample)
{ {
// Ensure the sample size is equal to the number of features // Ensure the sample size is equal to the number of features
if (sample.size() != features.size() - 1) { if (sample.size() != features.size()) {
throw std::invalid_argument("Sample size (" + std::to_string(sample.size()) + throw invalid_argument("Sample size (" + to_string(sample.size()) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")"); ") does not match the number of features (" + to_string(features.size()) + ")");
} }
std::map<std::string, int> evidence; map<string, int> evidence;
for (int i = 0; i < sample.size(); ++i) { for (int i = 0; i < sample.size(); ++i) {
evidence[features[i]] = sample[i]; evidence[features[i]] = sample[i];
} }
return exactInference(evidence); return exactInference(evidence);
} }
// Return 1xn std::vector of probabilities double Network::computeFactor(map<string, int>& completeEvidence)
std::vector<double> Network::predict_sample(const torch::Tensor& sample)
{
// Ensure the sample size is equal to the number of features
if (sample.size(0) != features.size() - 1) {
throw std::invalid_argument("Sample size (" + std::to_string(sample.size(0)) +
") does not match the number of features (" + std::to_string(features.size() - 1) + ")");
}
std::map<std::string, int> evidence;
for (int i = 0; i < sample.size(0); ++i) {
evidence[features[i]] = sample[i].item<int>();
}
return exactInference(evidence);
}
double Network::computeFactor(std::map<std::string, int>& completeEvidence)
{ {
double result = 1.0; double result = 1.0;
for (auto& node : getNodes()) { for (auto& node : getNodes()) {
@@ -301,34 +242,35 @@ namespace bayesnet {
} }
return result; return result;
} }
std::vector<double> Network::exactInference(std::map<std::string, int>& evidence) vector<double> Network::exactInference(map<string, int>& evidence)
{ {
std::vector<double> result(classNumStates, 0.0); vector<double> result(classNumStates, 0.0);
std::vector<std::thread> threads; vector<thread> threads;
std::mutex mtx; mutex mtx;
for (int i = 0; i < classNumStates; ++i) { for (int i = 0; i < classNumStates; ++i) {
threads.emplace_back([this, &result, &evidence, i, &mtx]() { threads.emplace_back([this, &result, &evidence, i, &mtx]() {
auto completeEvidence = std::map<std::string, int>(evidence); auto completeEvidence = map<string, int>(evidence);
completeEvidence[getClassName()] = i; completeEvidence[getClassName()] = i;
double factor = computeFactor(completeEvidence); double factor = computeFactor(completeEvidence);
std::lock_guard<std::mutex> lock(mtx); lock_guard<mutex> lock(mtx);
result[i] = factor; result[i] = factor;
}); });
} }
for (auto& thread : threads) { for (auto& thread : threads) {
thread.join(); thread.join();
} }
// Normalize result // Normalize result
double sum = accumulate(result.begin(), result.end(), 0.0); double sum = accumulate(result.begin(), result.end(), 0.0);
transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; }); transform(result.begin(), result.end(), result.begin(), [sum](double x) { return x / sum; });
return result; return result;
} }
std::vector<std::string> Network::show() const vector<string> Network::show()
{ {
std::vector<std::string> result; vector<string> result;
// Draw the network // Draw the network
for (auto& node : nodes) { for (auto& node : nodes) {
std::string line = node.first + " -> "; string line = node.first + " -> ";
for (auto child : node.second->getChildren()) { for (auto child : node.second->getChildren()) {
line += child->getName() + ", "; line += child->getName() + ", ";
} }
@@ -336,12 +278,12 @@ namespace bayesnet {
} }
return result; return result;
} }
std::vector<std::string> Network::graph(const std::string& title) const vector<string> Network::graph(const string& title)
{ {
auto output = std::vector<std::string>(); auto output = vector<string>();
auto prefix = "digraph BayesNet {\nlabel=<BayesNet "; auto prefix = "digraph BayesNet {\nlabel=<BayesNet ";
auto suffix = ">\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n"; auto suffix = ">\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n";
std::string header = prefix + title + suffix; string header = prefix + title + suffix;
output.push_back(header); output.push_back(header);
for (auto& node : nodes) { for (auto& node : nodes) {
auto result = node.second->graph(className); auto result = node.second->graph(className);
@@ -350,9 +292,9 @@ namespace bayesnet {
output.push_back("}\n"); output.push_back("}\n");
return output; return output;
} }
std::vector<std::pair<std::string, std::string>> Network::getEdges() const vector<pair<string, string>> Network::getEdges()
{ {
auto edges = std::vector<std::pair<std::string, std::string>>(); auto edges = vector<pair<string, string>>();
for (const auto& node : nodes) { for (const auto& node : nodes) {
auto head = node.first; auto head = node.first;
for (const auto& child : node.second->getChildren()) { for (const auto& child : node.second->getChildren()) {
@@ -362,52 +304,4 @@ namespace bayesnet {
} }
return edges; return edges;
} }
int Network::getNumEdges() const
{
return getEdges().size();
}
std::vector<std::string> Network::topological_sort()
{
/* Check if al the fathers of every node are before the node */
auto result = features;
result.erase(remove(result.begin(), result.end(), className), result.end());
bool ending{ false };
while (!ending) {
ending = true;
for (auto feature : features) {
auto fathers = nodes[feature]->getParents();
for (const auto& father : fathers) {
auto fatherName = father->getName();
if (fatherName == className) {
continue;
}
// Check if father is placed before the actual feature
auto it = find(result.begin(), result.end(), fatherName);
if (it != result.end()) {
auto it2 = find(result.begin(), result.end(), feature);
if (it2 != result.end()) {
if (distance(it, it2) < 0) {
// if it is not, insert it before the feature
result.erase(remove(result.begin(), result.end(), fatherName), result.end());
result.insert(it2, fatherName);
ending = false;
}
} else {
throw std::logic_error("Error in topological sort because of node " + feature + " is not in result");
}
} else {
throw std::logic_error("Error in topological sort because of node father " + fatherName + " is not in result");
}
}
}
}
return result;
}
void Network::dump_cpt() const
{
for (auto& node : nodes) {
std::cout << "* " << node.first << ": (" << node.second->getNumStates() << ") : " << node.second->getCPT().sizes() << std::endl;
std::cout << node.second->getCPT() << std::endl;
}
}
} }

View File

@@ -7,56 +7,49 @@
namespace bayesnet { namespace bayesnet {
class Network { class Network {
private: private:
std::map<std::string, std::unique_ptr<Node>> nodes; map<string, unique_ptr<Node>> nodes;
map<string, vector<int>> dataset;
bool fitted; bool fitted;
float maxThreads = 0.95; float maxThreads;
int classNumStates; int classNumStates;
std::vector<std::string> features; // Including classname vector<string> features;
std::string className; string className;
double laplaceSmoothing; int laplaceSmoothing;
torch::Tensor samples; // nxm tensor used to fit the model torch::Tensor samples;
bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&); bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&);
std::vector<double> predict_sample(const std::vector<int>&); vector<double> predict_sample(const vector<int>&);
std::vector<double> predict_sample(const torch::Tensor&); vector<double> exactInference(map<string, int>&);
std::vector<double> exactInference(std::map<std::string, int>&); double computeFactor(map<string, int>&);
double computeFactor(std::map<std::string, int>&); double mutual_info(torch::Tensor&, torch::Tensor&);
void completeFit(const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights); double entropy(torch::Tensor&);
void checkFitData(int n_features, int n_samples, int n_samples_y, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states, const torch::Tensor& weights); double conditionalEntropy(torch::Tensor&, torch::Tensor&);
void setStates(const std::map<std::string, std::vector<int>>&); double mutualInformation(torch::Tensor&, torch::Tensor&);
void completeFit();
public: public:
Network(); Network();
explicit Network(float); explicit Network(const float, const int);
explicit Network(Network&); explicit Network(const float);
~Network() = default; explicit Network(const Network&);
torch::Tensor& getSamples(); torch::Tensor& getSamples();
float getmaxThreads(); float getmaxThreads();
void addNode(const std::string&); void addNode(const string&, const int);
void addEdge(const std::string&, const std::string&); void addEdge(const string&, const string&);
std::map<std::string, std::unique_ptr<Node>>& getNodes(); map<string, std::unique_ptr<Node>>& getNodes();
std::vector<std::string> getFeatures() const; vector<string> getFeatures();
int getStates() const; const int getStates();
std::vector<std::pair<std::string, std::string>> getEdges() const; vector<pair<string, string>> getEdges();
int getNumEdges() const; const int getClassNumStates();
int getClassNumStates() const; const string getClassName();
std::string getClassName() const; void fit(const vector<vector<int>>&, const vector<int>&, const vector<string>&, const string&);
/* void fit(torch::Tensor&, torch::Tensor&, const vector<string>&, const string&);
Notice: Nodes have to be inserted in the same order as they are in the dataset, i.e., first node is first column and so on. vector<int> predict(const vector<vector<int>>&);
*/ //Computes the conditional edge weight of variable index u and v conditioned on class_node
void fit(const std::vector<std::vector<int>>& input_data, const std::vector<int>& labels, const std::vector<double>& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states); torch::Tensor conditionalEdgeWeight();
void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states); vector<vector<double>> predict_proba(const vector<vector<int>>&);
void fit(const torch::Tensor& samples, const torch::Tensor& weights, const std::vector<std::string>& featureNames, const std::string& className, const std::map<std::string, std::vector<int>>& states); double score(const vector<vector<int>>&, const vector<int>&);
std::vector<int> predict(const std::vector<std::vector<int>>&); // Return mx1 std::vector of predictions vector<string> show();
torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions vector<string> graph(const string& title); // Returns a vector of strings representing the graph in graphviz format
torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba); inline string version() { return "0.1.0"; }
std::vector<std::vector<double>> predict_proba(const std::vector<std::vector<int>>&); // Return mxn std::vector of probabilities
torch::Tensor predict_proba(const torch::Tensor&); // Return mxn tensor of probabilities
double score(const std::vector<std::vector<int>>&, const std::vector<int>&);
std::vector<std::string> topological_sort();
std::vector<std::string> show() const;
std::vector<std::string> graph(const std::string& title) const; // Returns a std::vector of std::strings representing the graph in graphviz format
void initialize();
void dump_cpt() const;
inline std::string version() { return "0.2.0"; }
}; };
} }
#endif #endif

View File

@@ -2,8 +2,8 @@
namespace bayesnet { namespace bayesnet {
Node::Node(const std::string& name) Node::Node(const std::string& name, int numStates)
: name(name), numStates(0), cpTable(torch::Tensor()), parents(std::vector<Node*>()), children(std::vector<Node*>()) : name(name), numStates(numStates), cpTable(torch::Tensor()), parents(vector<Node*>()), children(vector<Node*>())
{ {
} }
void Node::clear() void Node::clear()
@@ -14,7 +14,7 @@ namespace bayesnet {
dimensions.clear(); dimensions.clear();
numStates = 0; numStates = 0;
} }
std::string Node::getName() const string Node::getName() const
{ {
return name; return name;
} }
@@ -34,11 +34,11 @@ namespace bayesnet {
{ {
children.push_back(child); children.push_back(child);
} }
std::vector<Node*>& Node::getParents() vector<Node*>& Node::getParents()
{ {
return parents; return parents;
} }
std::vector<Node*>& Node::getChildren() vector<Node*>& Node::getChildren()
{ {
return children; return children;
} }
@@ -63,73 +63,59 @@ namespace bayesnet {
*/ */
unsigned Node::minFill() unsigned Node::minFill()
{ {
std::unordered_set<std::string> neighbors; unordered_set<string> neighbors;
for (auto child : children) { for (auto child : children) {
neighbors.emplace(child->getName()); neighbors.emplace(child->getName());
} }
for (auto parent : parents) { for (auto parent : parents) {
neighbors.emplace(parent->getName()); neighbors.emplace(parent->getName());
} }
auto source = std::vector<std::string>(neighbors.begin(), neighbors.end()); auto source = vector<string>(neighbors.begin(), neighbors.end());
return combinations(source).size(); return combinations(source).size();
} }
std::vector<std::pair<std::string, std::string>> Node::combinations(const std::vector<std::string>& source) vector<pair<string, string>> Node::combinations(const vector<string>& source)
{ {
std::vector<std::pair<std::string, std::string>> result; vector<pair<string, string>> result;
for (int i = 0; i < source.size(); ++i) { for (int i = 0; i < source.size(); ++i) {
std::string temp = source[i]; string temp = source[i];
for (int j = i + 1; j < source.size(); ++j) { for (int j = i + 1; j < source.size(); ++j) {
result.push_back({ temp, source[j] }); result.push_back({ temp, source[j] });
} }
} }
return result; return result;
} }
void Node::computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double laplaceSmoothing, const torch::Tensor& weights) void Node::computeCPT(map<string, vector<int>>& dataset, const int laplaceSmoothing)
{ {
dimensions.clear();
// Get dimensions of the CPT // Get dimensions of the CPT
dimensions.push_back(numStates); dimensions.push_back(numStates);
transform(parents.begin(), parents.end(), back_inserter(dimensions), [](const auto& parent) { return parent->getNumStates(); }); transform(parents.begin(), parents.end(), back_inserter(dimensions), [](Node* parent) { return parent->getNumStates(); });
// Create a tensor of zeros with the dimensions of the CPT // Create a tensor of zeros with the dimensions of the CPT
cpTable = torch::zeros(dimensions, torch::kFloat) + laplaceSmoothing; cpTable = torch::zeros(dimensions, torch::kFloat) + laplaceSmoothing;
// Fill table with counts // Fill table with counts
auto pos = find(features.begin(), features.end(), name); for (int n_sample = 0; n_sample < dataset[name].size(); ++n_sample) {
if (pos == features.end()) { torch::List<c10::optional<torch::Tensor>> coordinates;
throw std::logic_error("Feature " + name + " not found in dataset"); coordinates.push_back(torch::tensor(dataset[name][n_sample]));
} transform(parents.begin(), parents.end(), back_inserter(coordinates), [&dataset, &n_sample](Node* parent) { return torch::tensor(dataset[parent->getName()][n_sample]); });
int name_index = pos - features.begin();
for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) {
c10::List<c10::optional<at::Tensor>> coordinates;
coordinates.push_back(dataset.index({ name_index, n_sample }));
for (auto parent : parents) {
pos = find(features.begin(), features.end(), parent->getName());
if (pos == features.end()) {
throw std::logic_error("Feature parent " + parent->getName() + " not found in dataset");
}
int parent_index = pos - features.begin();
coordinates.push_back(dataset.index({ parent_index, n_sample }));
}
// Increment the count of the corresponding coordinate // Increment the count of the corresponding coordinate
cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + weights.index({ n_sample }).item<double>()); cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + 1);
} }
// Normalize the counts // Normalize the counts
cpTable = cpTable / cpTable.sum(0); cpTable = cpTable / cpTable.sum(0);
} }
float Node::getFactorValue(std::map<std::string, int>& evidence) float Node::getFactorValue(map<string, int>& evidence)
{ {
c10::List<c10::optional<at::Tensor>> coordinates; torch::List<c10::optional<torch::Tensor>> coordinates;
// following predetermined order of indices in the cpTable (see Node.h) // following predetermined order of indices in the cpTable (see Node.h)
coordinates.push_back(at::tensor(evidence[name])); coordinates.push_back(torch::tensor(evidence[name]));
transform(parents.begin(), parents.end(), std::back_inserter(coordinates), [&evidence](const auto& parent) { return at::tensor(evidence[parent->getName()]); }); transform(parents.begin(), parents.end(), back_inserter(coordinates), [&evidence](Node* parent) { return torch::tensor(evidence[parent->getName()]); });
return cpTable.index({ coordinates }).item<float>(); return cpTable.index({ coordinates }).item<float>();
} }
std::vector<std::string> Node::graph(const std::string& className) vector<string> Node::graph(const string& className)
{ {
auto output = std::vector<std::string>(); auto output = vector<string>();
auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : ""; auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : "";
output.push_back(name + " [shape=circle" + suffix + "] \n"); output.push_back(name + " [shape=circle" + suffix + "] \n");
transform(children.begin(), children.end(), back_inserter(output), [this](const auto& child) { return name + " -> " + child->getName(); }); transform(children.begin(), children.end(), back_inserter(output), [this](Node* child) { return name + " -> " + child->getName() + "\n"; });
return output; return output;
} }
} }

View File

@@ -5,32 +5,33 @@
#include <vector> #include <vector>
#include <string> #include <string>
namespace bayesnet { namespace bayesnet {
using namespace std;
class Node { class Node {
private: private:
std::string name; string name;
std::vector<Node*> parents; vector<Node*> parents;
std::vector<Node*> children; vector<Node*> children;
int numStates; // number of states of the variable int numStates; // number of states of the variable
torch::Tensor cpTable; // Order of indices is 0-> node variable, 1-> 1st parent, 2-> 2nd parent, ... torch::Tensor cpTable; // Order of indices is 0-> node variable, 1-> 1st parent, 2-> 2nd parent, ...
std::vector<int64_t> dimensions; // dimensions of the cpTable vector<int64_t> dimensions; // dimensions of the cpTable
std::vector<std::pair<std::string, std::string>> combinations(const std::vector<std::string>&);
public: public:
explicit Node(const std::string&); vector<pair<string, string>> combinations(const vector<string>&);
Node(const std::string&, int);
void clear(); void clear();
void addParent(Node*); void addParent(Node*);
void addChild(Node*); void addChild(Node*);
void removeParent(Node*); void removeParent(Node*);
void removeChild(Node*); void removeChild(Node*);
std::string getName() const; string getName() const;
std::vector<Node*>& getParents(); vector<Node*>& getParents();
std::vector<Node*>& getChildren(); vector<Node*>& getChildren();
torch::Tensor& getCPT(); torch::Tensor& getCPT();
void computeCPT(const torch::Tensor& dataset, const std::vector<std::string>& features, const double laplaceSmoothing, const torch::Tensor& weights); void computeCPT(map<string, vector<int>>&, const int);
int getNumStates() const; int getNumStates() const;
void setNumStates(int); void setNumStates(int);
unsigned minFill(); unsigned minFill();
std::vector<std::string> graph(const std::string& clasName); // Returns a std::vector of std::strings representing the graph in graphviz format vector<string> graph(const string& clasName); // Returns a vector of strings representing the graph in graphviz format
float getFactorValue(std::map<std::string, int>&); float getFactorValue(map<string, int>&);
}; };
} }
#endif #endif

View File

@@ -1,110 +0,0 @@
#include "Proposal.h"
#include "ArffFiles.h"
namespace bayesnet {
Proposal::Proposal(torch::Tensor& dataset_, std::vector<std::string>& features_, std::string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}
Proposal::~Proposal()
{
for (auto& [key, value] : discretizers) {
delete value;
}
}
void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
{
if (!torch::is_floating_point(X)) {
throw std::invalid_argument("X must be a floating point tensor");
}
if (torch::is_floating_point(y)) {
throw std::invalid_argument("y must be an integer tensor");
}
}
map<std::string, std::vector<int>> Proposal::localDiscretizationProposal(const map<std::string, std::vector<int>>& oldStates, Network& model)
{
// order of local discretization is important. no good 0, 1, 2...
// although we rediscretize features after the local discretization of every feature
auto order = model.topological_sort();
auto& nodes = model.getNodes();
map<std::string, std::vector<int>> states = oldStates;
std::vector<int> indicesToReDiscretize;
bool upgrade = false; // Flag to check if we need to upgrade the model
for (auto feature : order) {
auto nodeParents = nodes[feature]->getParents();
if (nodeParents.size() < 2) continue; // Only has class as parent
upgrade = true;
int index = find(pFeatures.begin(), pFeatures.end(), feature) - pFeatures.begin();
indicesToReDiscretize.push_back(index); // We need to re-discretize this feature
std::vector<std::string> parents;
transform(nodeParents.begin(), nodeParents.end(), back_inserter(parents), [](const auto& p) { return p->getName(); });
// Remove class as parent as it will be added later
parents.erase(remove(parents.begin(), parents.end(), pClassName), parents.end());
// Get the indices of the parents
std::vector<int> indices;
indices.push_back(-1); // Add class index
transform(parents.begin(), parents.end(), back_inserter(indices), [&](const auto& p) {return find(pFeatures.begin(), pFeatures.end(), p) - pFeatures.begin(); });
// Now we fit the discretizer of the feature, conditioned on its parents and the class i.e. discretizer.fit(X[index], X[indices] + y)
std::vector<std::string> yJoinParents(Xf.size(1));
for (auto idx : indices) {
for (int i = 0; i < Xf.size(1); ++i) {
yJoinParents[i] += to_string(pDataset.index({ idx, i }).item<int>());
}
}
auto arff = ArffFiles();
auto yxv = arff.factorize(yJoinParents);
auto xvf_ptr = Xf.index({ index }).data_ptr<float>();
auto xvf = std::vector<mdlp::precision_t>(xvf_ptr, xvf_ptr + Xf.size(1));
discretizers[feature]->fit(xvf, yxv);
}
if (upgrade) {
// Discretize again X (only the affected indices) with the new fitted discretizers
for (auto index : indicesToReDiscretize) {
auto Xt_ptr = Xf.index({ index }).data_ptr<float>();
auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
pDataset.index_put_({ index, "..." }, torch::tensor(discretizers[pFeatures[index]]->transform(Xt)));
auto xStates = std::vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
iota(xStates.begin(), xStates.end(), 0);
//Update new states of the feature/node
states[pFeatures[index]] = xStates;
}
const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);
model.fit(pDataset, weights, pFeatures, pClassName, states);
}
return states;
}
map<std::string, std::vector<int>> Proposal::fit_local_discretization(const torch::Tensor& y)
{
// Discretize the continuous input data and build pDataset (Classifier::dataset)
int m = Xf.size(1);
int n = Xf.size(0);
map<std::string, std::vector<int>> states;
pDataset = torch::zeros({ n + 1, m }, torch::kInt32);
auto yv = std::vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
// discretize input data by feature(row)
for (auto i = 0; i < pFeatures.size(); ++i) {
auto* discretizer = new mdlp::CPPFImdlp();
auto Xt_ptr = Xf.index({ i }).data_ptr<float>();
auto Xt = std::vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
discretizer->fit(Xt, yv);
pDataset.index_put_({ i, "..." }, torch::tensor(discretizer->transform(Xt)));
auto xStates = std::vector<int>(discretizer->getCutPoints().size() + 1);
iota(xStates.begin(), xStates.end(), 0);
states[pFeatures[i]] = xStates;
discretizers[pFeatures[i]] = discretizer;
}
int n_classes = torch::max(y).item<int>() + 1;
auto yStates = std::vector<int>(n_classes);
iota(yStates.begin(), yStates.end(), 0);
states[pClassName] = yStates;
pDataset.index_put_({ n, "..." }, y);
return states;
}
torch::Tensor Proposal::prepareX(torch::Tensor& X)
{
auto Xtd = torch::zeros_like(X, torch::kInt32);
for (int i = 0; i < X.size(0); ++i) {
auto Xt = std::vector<float>(X[i].data_ptr<float>(), X[i].data_ptr<float>() + X.size(1));
auto Xd = discretizers[pFeatures[i]]->transform(Xt);
Xtd.index_put_({ i }, torch::tensor(Xd, torch::kInt32));
}
return Xtd;
}
}

View File

@@ -1,30 +0,0 @@
#ifndef PROPOSAL_H
#define PROPOSAL_H
#include <string>
#include <map>
#include <torch/torch.h>
#include "Network.h"
#include "CPPFImdlp.h"
#include "Classifier.h"
namespace bayesnet {
class Proposal {
public:
Proposal(torch::Tensor& pDataset, std::vector<std::string>& features_, std::string& className_);
virtual ~Proposal();
protected:
void checkInput(const torch::Tensor& X, const torch::Tensor& y);
torch::Tensor prepareX(torch::Tensor& X);
map<std::string, std::vector<int>> localDiscretizationProposal(const map<std::string, std::vector<int>>& states, Network& model);
map<std::string, std::vector<int>> fit_local_discretization(const torch::Tensor& y);
torch::Tensor Xf; // X continuous nxm tensor
torch::Tensor y; // y discrete nx1 tensor
map<std::string, mdlp::CPPFImdlp*> discretizers;
private:
torch::Tensor& pDataset; // (n+1)xm tensor
std::vector<std::string>& pFeatures;
std::string& pClassName;
};
}
#endif

View File

@@ -4,7 +4,7 @@ namespace bayesnet {
SPODE::SPODE(int root) : Classifier(Network()), root(root) {} SPODE::SPODE(int root) : Classifier(Network()), root(root) {}
void SPODE::buildModel(const torch::Tensor& weights) void SPODE::train()
{ {
// 0. Add all nodes to the model // 0. Add all nodes to the model
addNodes(); addNodes();
@@ -17,7 +17,7 @@ namespace bayesnet {
} }
} }
} }
std::vector<std::string> SPODE::graph(const std::string& name) const vector<string> SPODE::graph(string name )
{ {
return model.graph(name); return model.graph(name);
} }

View File

@@ -1,17 +1,15 @@
#ifndef SPODE_H #ifndef SPODE_H
#define SPODE_H #define SPODE_H
#include "Classifier.h" #include "Classifier.h"
namespace bayesnet { namespace bayesnet {
class SPODE : public Classifier { class SPODE : public Classifier {
private: private:
int root; int root;
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
explicit SPODE(int root); explicit SPODE(int root);
virtual ~SPODE() = default; vector<string> graph(string name = "SPODE") override;
std::vector<std::string> graph(const std::string& name = "SPODE") const override;
}; };
} }
#endif #endif

View File

@@ -1,47 +0,0 @@
#include "SPODELd.h"
namespace bayesnet {
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
SPODE::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
SPODELd& SPODELd::fit(torch::Tensor& dataset, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
{
if (!torch::is_floating_point(dataset)) {
throw std::runtime_error("Dataset must be a floating point tensor");
}
Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone();
y = dataset.index({ -1, "..." }).clone();
features = features_;
className = className_;
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
SPODE::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
torch::Tensor SPODELd::predict(torch::Tensor& X)
{
auto Xt = prepareX(X);
return SPODE::predict(Xt);
}
std::vector<std::string> SPODELd::graph(const std::string& name) const
{
return SPODE::graph(name);
}
}

View File

@@ -1,18 +0,0 @@
#ifndef SPODELD_H
#define SPODELD_H
#include "SPODE.h"
#include "Proposal.h"
namespace bayesnet {
class SPODELd : public SPODE, public Proposal {
public:
explicit SPODELd(int root);
virtual ~SPODELd() = default;
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
SPODELd& fit(torch::Tensor& dataset, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
std::vector<std::string> graph(const std::string& name = "SPODE") const override;
torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; };
};
}
#endif // !SPODELD_H

View File

@@ -1,27 +1,29 @@
#include "TAN.h" #include "TAN.h"
namespace bayesnet { namespace bayesnet {
using namespace torch;
TAN::TAN() : Classifier(Network()) {} TAN::TAN() : Classifier(Network()) {}
void TAN::buildModel(const torch::Tensor& weights) void TAN::train()
{ {
// 0. Add all nodes to the model // 0. Add all nodes to the model
addNodes(); addNodes();
// 1. Compute mutual information between each feature and the class and set the root node // 1. Compute mutual information between each feature and the class and set the root node
// as the highest mutual information with the class // as the highest mutual information with the class
auto mi = std::vector <std::pair<int, float >>(); auto mi = vector <pair<int, float >>();
torch::Tensor class_dataset = dataset.index({ -1, "..." }); Tensor class_dataset = dataset.index({ "...", -1 });
for (int i = 0; i < static_cast<int>(features.size()); ++i) { for (int i = 0; i < static_cast<int>(features.size()); ++i) {
torch::Tensor feature_dataset = dataset.index({ i, "..." }); Tensor feature_dataset = dataset.index({ "...", i });
auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset, weights); auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset);
mi.push_back({ i, mi_value }); mi.push_back({ i, mi_value });
} }
sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;}); sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;});
auto root = mi[mi.size() - 1].first; auto root = mi[mi.size() - 1].first;
// 2. Compute mutual information between each feature and the class // 2. Compute mutual information between each feature and the class
auto weights_matrix = metrics.conditionalEdge(weights); auto weights = metrics.conditionalEdge();
// 3. Compute the maximum spanning tree // 3. Compute the maximum spanning tree
auto mst = metrics.maximumSpanningTree(features, weights_matrix, root); auto mst = metrics.maximumSpanningTree(features, weights, root);
// 4. Add edges from the maximum spanning tree to the model // 4. Add edges from the maximum spanning tree to the model
for (auto i = 0; i < mst.size(); ++i) { for (auto i = 0; i < mst.size(); ++i) {
auto [from, to] = mst[i]; auto [from, to] = mst[i];
@@ -32,7 +34,7 @@ namespace bayesnet {
model.addEdge(className, feature); model.addEdge(className, feature);
} }
} }
std::vector<std::string> TAN::graph(const std::string& title) const vector<string> TAN::graph(string title)
{ {
return model.graph(title); return model.graph(title);
} }

View File

@@ -2,14 +2,15 @@
#define TAN_H #define TAN_H
#include "Classifier.h" #include "Classifier.h"
namespace bayesnet { namespace bayesnet {
using namespace std;
using namespace torch;
class TAN : public Classifier { class TAN : public Classifier {
private: private:
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
TAN(); TAN();
virtual ~TAN() = default; vector<string> graph(string name = "TAN") override;
std::vector<std::string> graph(const std::string& name = "TAN") const override;
}; };
} }
#endif #endif

View File

@@ -1,30 +0,0 @@
#include "TANLd.h"
namespace bayesnet {
TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, const std::vector<std::string>& features_, const std::string& className_, map<std::string, std::vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills std::vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
TAN::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
torch::Tensor TANLd::predict(torch::Tensor& X)
{
auto Xt = prepareX(X);
return TAN::predict(Xt);
}
std::vector<std::string> TANLd::graph(const std::string& name) const
{
return TAN::graph(name);
}
}

View File

@@ -1,18 +0,0 @@
#ifndef TANLD_H
#define TANLD_H
#include "TAN.h"
#include "Proposal.h"
namespace bayesnet {
class TANLd : public TAN, public Proposal {
private:
public:
TANLd();
virtual ~TANLd() = default;
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, map<std::string, std::vector<int>>& states) override;
std::vector<std::string> graph(const std::string& name = "TAN") const override;
torch::Tensor predict(torch::Tensor& X) override;
static inline std::string version() { return "0.0.1"; };
};
}
#endif // !TANLD_H

View File

@@ -1,23 +1,24 @@
#include "bayesnetUtils.h" #include "bayesnetUtils.h"
namespace bayesnet { namespace bayesnet {
// Return the indices in descending order using namespace std;
std::vector<int> argsort(std::vector<double>& nums) using namespace torch;
vector<int> argsort(vector<float>& nums)
{ {
int n = nums.size(); int n = nums.size();
std::vector<int> indices(n); vector<int> indices(n);
iota(indices.begin(), indices.end(), 0); iota(indices.begin(), indices.end(), 0);
sort(indices.begin(), indices.end(), [&nums](int i, int j) {return nums[i] > nums[j];}); sort(indices.begin(), indices.end(), [&nums](int i, int j) {return nums[i] > nums[j];});
return indices; return indices;
} }
std::vector<std::vector<int>> tensorToVector(torch::Tensor& tensor) vector<vector<int>> tensorToVector(Tensor& tensor)
{ {
// convert mxn tensor to nxm std::vector // convert mxn tensor to nxm vector
std::vector<std::vector<int>> result; vector<vector<int>> result;
// Iterate over cols // Iterate over cols
for (int i = 0; i < tensor.size(1); ++i) { for (int i = 0; i < tensor.size(1); ++i) {
auto col_tensor = tensor.index({ "...", i }); auto col_tensor = tensor.index({ "...", i });
auto col = std::vector<int>(col_tensor.data_ptr<int>(), col_tensor.data_ptr<int>() + tensor.size(0)); auto col = vector<int>(col_tensor.data_ptr<int>(), col_tensor.data_ptr<int>() + tensor.size(0));
result.push_back(col); result.push_back(col);
} }
return result; return result;

View File

@@ -3,7 +3,9 @@
#include <torch/torch.h> #include <torch/torch.h>
#include <vector> #include <vector>
namespace bayesnet { namespace bayesnet {
std::vector<int> argsort(std::vector<double>& nums); using namespace std;
std::vector<std::vector<int>> tensorToVector(torch::Tensor& tensor); using namespace torch;
vector<int> argsort(vector<float>& nums);
vector<vector<int>> tensorToVector(Tensor& tensor);
} }
#endif //BAYESNET_UTILS_H #endif //BAYESNET_UTILS_H

View File

@@ -1,343 +0,0 @@
#include <filesystem>
#include <set>
#include <fstream>
#include <iostream>
#include <sstream>
#include <algorithm>
#include "BestResults.h"
#include "Result.h"
#include "Colors.h"
#include "Statistics.h"
#include "BestResultsExcel.h"
#include "CLocale.h"
namespace fs = std::filesystem;
// function ftime_to_std::string, Code taken from
// https://stackoverflow.com/a/58237530/1389271
template <typename TP>
std::string ftime_to_string(TP tp)
{
auto sctp = std::chrono::time_point_cast<std::chrono::system_clock::duration>(tp - TP::clock::now()
+ std::chrono::system_clock::now());
auto tt = std::chrono::system_clock::to_time_t(sctp);
std::tm* gmt = std::gmtime(&tt);
std::stringstream buffer;
buffer << std::put_time(gmt, "%Y-%m-%d %H:%M");
return buffer.str();
}
namespace platform {
std::string BestResults::build()
{
auto files = loadResultFiles();
if (files.size() == 0) {
std::cerr << Colors::MAGENTA() << "No result files were found!" << Colors::RESET() << std::endl;
exit(1);
}
json bests;
for (const auto& file : files) {
auto result = Result(path, file);
auto data = result.load();
for (auto const& item : data.at("results")) {
bool update = false;
// Check if results file contains only one dataset
auto datasetName = item.at("dataset").get<std::string>();
if (bests.contains(datasetName)) {
if (item.at("score").get<double>() > bests[datasetName].at(0).get<double>()) {
update = true;
}
} else {
update = true;
}
if (update) {
bests[datasetName] = { item.at("score").get<double>(), item.at("hyperparameters"), file };
}
}
}
std::string bestFileName = path + bestResultFile();
if (FILE* fileTest = fopen(bestFileName.c_str(), "r")) {
fclose(fileTest);
std::cout << Colors::MAGENTA() << "File " << bestFileName << " already exists and it shall be overwritten." << Colors::RESET() << std::endl;
}
std::ofstream file(bestFileName);
file << bests;
file.close();
return bestFileName;
}
std::string BestResults::bestResultFile()
{
return "best_results_" + score + "_" + model + ".json";
}
std::pair<std::string, std::string> getModelScore(std::string name)
{
// results_accuracy_BoostAODE_MacBookpro16_2023-09-06_12:27:00_1.json
int i = 0;
auto pos = name.find("_");
auto pos2 = name.find("_", pos + 1);
std::string score = name.substr(pos + 1, pos2 - pos - 1);
pos = name.find("_", pos2 + 1);
std::string model = name.substr(pos2 + 1, pos - pos2 - 1);
return { model, score };
}
std::vector<std::string> BestResults::loadResultFiles()
{
std::vector<std::string> files;
using std::filesystem::directory_iterator;
std::string fileModel, fileScore;
for (const auto& file : directory_iterator(path)) {
auto fileName = file.path().filename().string();
if (fileName.find(".json") != std::string::npos && fileName.find("results_") == 0) {
tie(fileModel, fileScore) = getModelScore(fileName);
if (score == fileScore && (model == fileModel || model == "any")) {
files.push_back(fileName);
}
}
}
return files;
}
json BestResults::loadFile(const std::string& fileName)
{
std::ifstream resultData(fileName);
if (resultData.is_open()) {
json data = json::parse(resultData);
return data;
}
throw std::invalid_argument("Unable to open result file. [" + fileName + "]");
}
std::vector<std::string> BestResults::getModels()
{
std::set<std::string> models;
std::vector<std::string> result;
auto files = loadResultFiles();
if (files.size() == 0) {
std::cerr << Colors::MAGENTA() << "No result files were found!" << Colors::RESET() << std::endl;
exit(1);
}
std::string fileModel, fileScore;
for (const auto& file : files) {
// extract the model from the file name
tie(fileModel, fileScore) = getModelScore(file);
// add the model to the std::vector of models
models.insert(fileModel);
}
result = std::vector<std::string>(models.begin(), models.end());
return result;
}
std::vector<std::string> BestResults::getDatasets(json table)
{
std::vector<std::string> datasets;
for (const auto& dataset : table.items()) {
datasets.push_back(dataset.key());
}
return datasets;
}
void BestResults::buildAll()
{
auto models = getModels();
for (const auto& model : models) {
std::cout << "Building best results for model: " << model << std::endl;
this->model = model;
build();
}
model = "any";
}
void BestResults::listFile()
{
std::string bestFileName = path + bestResultFile();
if (FILE* fileTest = fopen(bestFileName.c_str(), "r")) {
fclose(fileTest);
} else {
std::cerr << Colors::MAGENTA() << "File " << bestFileName << " doesn't exist." << Colors::RESET() << std::endl;
exit(1);
}
auto temp = ConfigLocale();
auto date = ftime_to_string(std::filesystem::last_write_time(bestFileName));
auto data = loadFile(bestFileName);
auto datasets = getDatasets(data);
int maxDatasetName = (*max_element(datasets.begin(), datasets.end(), [](const std::string& a, const std::string& b) { return a.size() < b.size(); })).size();
int maxFileName = 0;
int maxHyper = 15;
for (auto const& item : data.items()) {
maxHyper = std::max(maxHyper, (int)item.value().at(1).dump().size());
maxFileName = std::max(maxFileName, (int)item.value().at(2).get<std::string>().size());
}
std::stringstream oss;
oss << Colors::GREEN() << "Best results for " << model << " as of " << date << std::endl;
std::cout << oss.str();
std::cout << std::string(oss.str().size() - 8, '-') << std::endl;
std::cout << Colors::GREEN() << " # " << std::setw(maxDatasetName + 1) << std::left << "Dataset" << "Score " << std::setw(maxFileName) << "File" << " Hyperparameters" << std::endl;
std::cout << "=== " << std::string(maxDatasetName, '=') << " =========== " << std::string(maxFileName, '=') << " " << std::string(maxHyper, '=') << std::endl;
auto i = 0;
bool odd = true;
double total = 0;
for (auto const& item : data.items()) {
auto color = odd ? Colors::BLUE() : Colors::CYAN();
double value = item.value().at(0).get<double>();
std::cout << color << std::setw(3) << std::fixed << std::right << i++ << " ";
std::cout << std::setw(maxDatasetName) << std::left << item.key() << " ";
std::cout << std::setw(11) << std::setprecision(9) << std::fixed << value << " ";
std::cout << std::setw(maxFileName) << item.value().at(2).get<std::string>() << " ";
std::cout << item.value().at(1) << " ";
std::cout << std::endl;
total += value;
odd = !odd;
}
std::cout << Colors::GREEN() << "=== " << std::string(maxDatasetName, '=') << " ===========" << std::endl;
std::cout << std::setw(5 + maxDatasetName) << "Total.................. " << std::setw(11) << std::setprecision(8) << std::fixed << total << std::endl;
}
json BestResults::buildTableResults(std::vector<std::string> models)
{
json table;
auto maxDate = std::filesystem::file_time_type::max();
for (const auto& model : models) {
this->model = model;
std::string bestFileName = path + bestResultFile();
if (FILE* fileTest = fopen(bestFileName.c_str(), "r")) {
fclose(fileTest);
} else {
std::cerr << Colors::MAGENTA() << "File " << bestFileName << " doesn't exist." << Colors::RESET() << std::endl;
exit(1);
}
auto dateWrite = std::filesystem::last_write_time(bestFileName);
if (dateWrite < maxDate) {
maxDate = dateWrite;
}
auto data = loadFile(bestFileName);
table[model] = data;
}
table["dateTable"] = ftime_to_string(maxDate);
return table;
}
void BestResults::printTableResults(std::vector<std::string> models, json table)
{
std::stringstream oss;
oss << Colors::GREEN() << "Best results for " << score << " as of " << table.at("dateTable").get<std::string>() << std::endl;
std::cout << oss.str();
std::cout << std::string(oss.str().size() - 8, '-') << std::endl;
std::cout << Colors::GREEN() << " # " << std::setw(maxDatasetName + 1) << std::left << std::string("Dataset");
for (const auto& model : models) {
std::cout << std::setw(maxModelName) << std::left << model << " ";
}
std::cout << std::endl;
std::cout << "=== " << std::string(maxDatasetName, '=') << " ";
for (const auto& model : models) {
std::cout << std::string(maxModelName, '=') << " ";
}
std::cout << std::endl;
auto i = 0;
bool odd = true;
std::map<std::string, double> totals;
int nDatasets = table.begin().value().size();
for (const auto& model : models) {
totals[model] = 0.0;
}
auto datasets = getDatasets(table.begin().value());
for (auto const& dataset : datasets) {
auto color = odd ? Colors::BLUE() : Colors::CYAN();
std::cout << color << std::setw(3) << std::fixed << std::right << i++ << " ";
std::cout << std::setw(maxDatasetName) << std::left << dataset << " ";
double maxValue = 0;
// Find out the max value for this dataset
for (const auto& model : models) {
double value = table[model].at(dataset).at(0).get<double>();
if (value > maxValue) {
maxValue = value;
}
}
// Print the row with red colors on max values
for (const auto& model : models) {
std::string efectiveColor = color;
double value = table[model].at(dataset).at(0).get<double>();
if (value == maxValue) {
efectiveColor = Colors::RED();
}
totals[model] += value;
std::cout << efectiveColor << std::setw(maxModelName) << std::setprecision(maxModelName - 2) << std::fixed << value << " ";
}
std::cout << std::endl;
odd = !odd;
}
std::cout << Colors::GREEN() << "=== " << std::string(maxDatasetName, '=') << " ";
for (const auto& model : models) {
std::cout << std::string(maxModelName, '=') << " ";
}
std::cout << std::endl;
std::cout << Colors::GREEN() << std::setw(5 + maxDatasetName) << " Totals...................";
double max = 0.0;
for (const auto& total : totals) {
if (total.second > max) {
max = total.second;
}
}
for (const auto& model : models) {
std::string efectiveColor = Colors::GREEN();
if (totals[model] == max) {
efectiveColor = Colors::RED();
}
std::cout << efectiveColor << std::right << std::setw(maxModelName) << std::setprecision(maxModelName - 4) << std::fixed << totals[model] << " ";
}
std::cout << std::endl;
}
void BestResults::reportSingle(bool excel)
{
listFile();
if (excel) {
auto models = getModels();
// Build the table of results
json table = buildTableResults(models);
std::vector<std::string> datasets = getDatasets(table.begin().value());
BestResultsExcel excel(score, datasets);
excel.reportSingle(model, path + bestResultFile());
messageExcelFile(excel.getFileName());
}
}
void BestResults::reportAll(bool excel)
{
auto models = getModels();
// Build the table of results
json table = buildTableResults(models);
std::vector<std::string> datasets = getDatasets(table.begin().value());
maxModelName = (*max_element(models.begin(), models.end(), [](const std::string& a, const std::string& b) { return a.size() < b.size(); })).size();
maxModelName = std::max(12, maxModelName);
maxDatasetName = (*max_element(datasets.begin(), datasets.end(), [](const std::string& a, const std::string& b) { return a.size() < b.size(); })).size();
maxDatasetName = std::max(25, maxDatasetName);
// Print the table of results
printTableResults(models, table);
// Compute the Friedman test
std::map<std::string, std::map<std::string, float>> ranksModels;
if (friedman) {
Statistics stats(models, datasets, table, significance);
auto result = stats.friedmanTest();
stats.postHocHolmTest(result);
ranksModels = stats.getRanks();
}
if (excel) {
BestResultsExcel excel(score, datasets);
excel.reportAll(models, table, ranksModels, friedman, significance);
if (friedman) {
int idx = -1;
double min = 2000;
// Find out the control model
auto totals = std::vector<double>(models.size(), 0.0);
for (const auto& dataset : datasets) {
for (int i = 0; i < models.size(); ++i) {
totals[i] += ranksModels[dataset][models[i]];
}
}
for (int i = 0; i < models.size(); ++i) {
if (totals[i] < min) {
min = totals[i];
idx = i;
}
}
model = models.at(idx);
excel.reportSingle(model, path + bestResultFile());
}
messageExcelFile(excel.getFileName());
}
}
void BestResults::messageExcelFile(const std::string& fileName)
{
std::cout << Colors::YELLOW() << "** Excel file generated: " << fileName << Colors::RESET() << std::endl;
}
}

View File

@@ -1,36 +0,0 @@
#ifndef BESTRESULTS_H
#define BESTRESULTS_H
#include <string>
#include <nlohmann/json.hpp>
using json = nlohmann::json;
namespace platform {
class BestResults {
public:
explicit BestResults(const std::string& path, const std::string& score, const std::string& model, bool friedman, double significance = 0.05)
: path(path), score(score), model(model), friedman(friedman), significance(significance)
{
}
std::string build();
void reportSingle(bool excel);
void reportAll(bool excel);
void buildAll();
private:
std::vector<std::string> getModels();
std::vector<std::string> getDatasets(json table);
std::vector<std::string> loadResultFiles();
void messageExcelFile(const std::string& fileName);
json buildTableResults(std::vector<std::string> models);
void printTableResults(std::vector<std::string> models, json table);
std::string bestResultFile();
json loadFile(const std::string& fileName);
void listFile();
std::string path;
std::string score;
std::string model;
bool friedman;
double significance;
int maxModelName = 0;
int maxDatasetName = 0;
};
}
#endif //BESTRESULTS_H

View File

@@ -1,300 +0,0 @@
#include <sstream>
#include "BestResultsExcel.h"
#include "Paths.h"
#include <map>
#include <nlohmann/json.hpp>
#include "Statistics.h"
#include "ReportExcel.h"
namespace platform {
json loadResultData(const std::string& fileName)
{
json data;
std::ifstream resultData(fileName);
if (resultData.is_open()) {
data = json::parse(resultData);
} else {
throw std::invalid_argument("Unable to open result file. [" + fileName + "]");
}
return data;
}
std::string getColumnName(int colNum)
{
std::string columnName = "";
if (colNum == 0)
return "A";
while (colNum > 0) {
int modulo = colNum % 26;
columnName = char(65 + modulo) + columnName;
colNum = (int)((colNum - modulo) / 26);
}
return columnName;
}
BestResultsExcel::BestResultsExcel(const std::string& score, const std::vector<std::string>& datasets) : score(score), datasets(datasets)
{
workbook = workbook_new((Paths::excel() + fileName).c_str());
setProperties("Best Results");
int maxDatasetName = (*max_element(datasets.begin(), datasets.end(), [](const std::string& a, const std::string& b) { return a.size() < b.size(); })).size();
datasetNameSize = std::max(datasetNameSize, maxDatasetName);
createFormats();
}
void BestResultsExcel::reportAll(const std::vector<std::string>& models, const json& table, const std::map<std::string, std::map<std::string, float>>& ranks, bool friedman, double significance)
{
this->table = table;
this->models = models;
ranksModels = ranks;
this->friedman = friedman;
this->significance = significance;
worksheet = workbook_add_worksheet(workbook, "Best Results");
int maxModelName = (*std::max_element(models.begin(), models.end(), [](const std::string& a, const std::string& b) { return a.size() < b.size(); })).size();
modelNameSize = std::max(modelNameSize, maxModelName);
formatColumns();
build();
}
void BestResultsExcel::reportSingle(const std::string& model, const std::string& fileName)
{
worksheet = workbook_add_worksheet(workbook, "Report");
if (FILE* fileTest = fopen(fileName.c_str(), "r")) {
fclose(fileTest);
} else {
std::cerr << "File " << fileName << " doesn't exist." << std::endl;
exit(1);
}
json data = loadResultData(fileName);
std::string title = "Best results for " + model;
worksheet_merge_range(worksheet, 0, 0, 0, 4, title.c_str(), styles["headerFirst"]);
// Body header
row = 3;
int col = 1;
writeString(row, 0, "", "bodyHeader");
writeString(row, 1, "Dataset", "bodyHeader");
writeString(row, 2, "Score", "bodyHeader");
writeString(row, 3, "File", "bodyHeader");
writeString(row, 4, "Hyperparameters", "bodyHeader");
auto i = 0;
std::string hyperparameters;
int hypSize = 22;
std::map<std::string, std::string> files; // map of files imported and their tabs
for (auto const& item : data.items()) {
row++;
writeInt(row, 0, i++, "ints");
writeString(row, 1, item.key().c_str(), "text");
writeDouble(row, 2, item.value().at(0).get<double>(), "result");
auto fileName = item.value().at(2).get<std::string>();
std::string hyperlink = "";
try {
hyperlink = files.at(fileName);
}
catch (const std::out_of_range& oor) {
auto tabName = "table_" + std::to_string(i);
auto worksheetNew = workbook_add_worksheet(workbook, tabName.c_str());
json data = loadResultData(Paths::results() + fileName);
auto report = ReportExcel(data, false, workbook, worksheetNew);
report.show();
hyperlink = "#table_" + std::to_string(i);
files[fileName] = hyperlink;
}
hyperlink += "!H" + std::to_string(i + 6);
std::string fileNameText = "=HYPERLINK(\"" + hyperlink + "\",\"" + fileName + "\")";
worksheet_write_formula(worksheet, row, 3, fileNameText.c_str(), efectiveStyle("text"));
hyperparameters = item.value().at(1).dump();
if (hyperparameters.size() > hypSize) {
hypSize = hyperparameters.size();
}
writeString(row, 4, hyperparameters, "text");
}
row++;
// Set Totals
writeString(row, 1, "Total", "bodyHeader");
std::stringstream oss;
auto colName = getColumnName(2);
oss << "=sum(" << colName << "5:" << colName << row << ")";
worksheet_write_formula(worksheet, row, 2, oss.str().c_str(), styles["bodyHeader_odd"]);
// Set format
worksheet_freeze_panes(worksheet, 4, 2);
std::vector<int> columns_sizes = { 5, datasetNameSize, modelNameSize, 66, hypSize + 1 };
for (int i = 0; i < columns_sizes.size(); ++i) {
worksheet_set_column(worksheet, i, i, columns_sizes.at(i), NULL);
}
}
BestResultsExcel::~BestResultsExcel()
{
workbook_close(workbook);
}
void BestResultsExcel::formatColumns()
{
worksheet_freeze_panes(worksheet, 4, 2);
std::vector<int> columns_sizes = { 5, datasetNameSize };
for (int i = 0; i < models.size(); ++i) {
columns_sizes.push_back(modelNameSize);
}
for (int i = 0; i < columns_sizes.size(); ++i) {
worksheet_set_column(worksheet, i, i, columns_sizes.at(i), NULL);
}
}
void BestResultsExcel::addConditionalFormat(std::string formula)
{
// Add conditional format for max/min values in scores/ranks sheets
lxw_format* custom_format = workbook_add_format(workbook);
format_set_bg_color(custom_format, 0xFFC7CE);
format_set_font_color(custom_format, 0x9C0006);
// Create a conditional format object. A static object would also work.
lxw_conditional_format* conditional_format = (lxw_conditional_format*)calloc(1, sizeof(lxw_conditional_format));
conditional_format->type = LXW_CONDITIONAL_TYPE_FORMULA;
std::string col = getColumnName(models.size() + 1);
std::stringstream oss;
oss << "=C5=" << formula << "($C5:$" << col << "5)";
auto formulaValue = oss.str();
conditional_format->value_string = formulaValue.c_str();
conditional_format->format = custom_format;
worksheet_conditional_format_range(worksheet, 4, 2, datasets.size() + 3, models.size() + 1, conditional_format);
}
void BestResultsExcel::build()
{
// Create Sheet with scores
header(false);
body(false);
// Add conditional format for max values
addConditionalFormat("max");
footer(false);
if (friedman) {
// Create Sheet with ranks
worksheet = workbook_add_worksheet(workbook, "Ranks");
formatColumns();
header(true);
body(true);
addConditionalFormat("min");
footer(true);
// Create Sheet with Friedman Test
doFriedman();
}
}
std::string BestResultsExcel::getFileName()
{
return Paths::excel() + fileName;
}
void BestResultsExcel::header(bool ranks)
{
row = 0;
std::string message = ranks ? "Ranks for score " + score : "Best results for " + score;
worksheet_merge_range(worksheet, 0, 0, 0, 1 + models.size(), message.c_str(), styles["headerFirst"]);
// Body header
row = 3;
int col = 1;
writeString(row, 0, "", "bodyHeader");
writeString(row, 1, "Dataset", "bodyHeader");
for (const auto& model : models) {
writeString(row, ++col, model.c_str(), "bodyHeader");
}
}
void BestResultsExcel::body(bool ranks)
{
row = 4;
int i = 0;
json origin = table.begin().value();
for (auto const& item : origin.items()) {
writeInt(row, 0, i++, "ints");
writeString(row, 1, item.key().c_str(), "text");
int col = 1;
for (const auto& model : models) {
double value = ranks ? ranksModels[item.key()][model] : table[model].at(item.key()).at(0).get<double>();
writeDouble(row, ++col, value, "result");
}
++row;
}
}
void BestResultsExcel::footer(bool ranks)
{
// Set Totals
writeString(row, 1, "Total", "bodyHeader");
int col = 1;
for (const auto& model : models) {
std::stringstream oss;
auto colName = getColumnName(col + 1);
oss << "=SUM(" << colName << "5:" << colName << row << ")";
worksheet_write_formula(worksheet, row, ++col, oss.str().c_str(), styles["bodyHeader_odd"]);
}
if (ranks) {
row++;
writeString(row, 1, "Average ranks", "bodyHeader");
int col = 1;
for (const auto& model : models) {
auto colName = getColumnName(col + 1);
std::stringstream oss;
oss << "=SUM(" << colName << "5:" << colName << row - 1 << ")/" << datasets.size();
worksheet_write_formula(worksheet, row, ++col, oss.str().c_str(), styles["bodyHeader_odd"]);
}
}
}
void BestResultsExcel::doFriedman()
{
worksheet = workbook_add_worksheet(workbook, "Friedman");
std::vector<int> columns_sizes = { 5, datasetNameSize };
for (int i = 0; i < models.size(); ++i) {
columns_sizes.push_back(modelNameSize);
}
for (int i = 0; i < columns_sizes.size(); ++i) {
worksheet_set_column(worksheet, i, i, columns_sizes.at(i), NULL);
}
worksheet_merge_range(worksheet, 0, 0, 0, 1 + models.size(), "Friedman Test", styles["headerFirst"]);
row = 2;
Statistics stats(models, datasets, table, significance, false);
auto result = stats.friedmanTest();
stats.postHocHolmTest(result);
auto friedmanResult = stats.getFriedmanResult();
auto holmResult = stats.getHolmResult();
worksheet_merge_range(worksheet, row, 0, row, 1 + models.size(), "Null hypothesis: H0 'There is no significant differences between all the classifiers.'", styles["headerSmall"]);
row += 2;
writeString(row, 1, "Friedman Q", "bodyHeader");
writeDouble(row, 2, friedmanResult.statistic, "bodyHeader");
row++;
writeString(row, 1, "Critical χ2 value", "bodyHeader");
writeDouble(row, 2, friedmanResult.criticalValue, "bodyHeader");
row++;
writeString(row, 1, "p-value", "bodyHeader");
writeDouble(row, 2, friedmanResult.pvalue, "bodyHeader");
writeString(row, 3, friedmanResult.reject ? "<" : ">", "bodyHeader");
writeDouble(row, 4, significance, "bodyHeader");
writeString(row, 5, friedmanResult.reject ? "Reject H0" : "Accept H0", "bodyHeader");
row += 3;
worksheet_merge_range(worksheet, row, 0, row, 1 + models.size(), "Holm Test", styles["headerFirst"]);
row += 2;
worksheet_merge_range(worksheet, row, 0, row, 1 + models.size(), "Null hypothesis: H0 'There is no significant differences between the control model and the other models.'", styles["headerSmall"]);
row += 2;
std::string controlModel = "Control Model: " + holmResult.model;
worksheet_merge_range(worksheet, row, 1, row, 7, controlModel.c_str(), styles["bodyHeader_odd"]);
row++;
writeString(row, 1, "Model", "bodyHeader");
writeString(row, 2, "p-value", "bodyHeader");
writeString(row, 3, "Rank", "bodyHeader");
writeString(row, 4, "Win", "bodyHeader");
writeString(row, 5, "Tie", "bodyHeader");
writeString(row, 6, "Loss", "bodyHeader");
writeString(row, 7, "Reject H0", "bodyHeader");
row++;
bool first = true;
for (const auto& item : holmResult.holmLines) {
writeString(row, 1, item.model, "text");
if (first) {
// Control model info
first = false;
writeString(row, 2, "", "text");
writeDouble(row, 3, item.rank, "result");
writeString(row, 4, "", "text");
writeString(row, 5, "", "text");
writeString(row, 6, "", "text");
writeString(row, 7, "", "textCentered");
} else {
// Rest of the models info
writeDouble(row, 2, item.pvalue, "result");
writeDouble(row, 3, item.rank, "result");
writeInt(row, 4, item.wtl.win, "ints");
writeInt(row, 5, item.wtl.tie, "ints");
writeInt(row, 6, item.wtl.loss, "ints");
writeString(row, 7, item.reject ? "Yes" : "No", "textCentered");
}
row++;
}
}
}

View File

@@ -1,39 +0,0 @@
#ifndef BESTRESULTS_EXCEL_H
#define BESTRESULTS_EXCEL_H
#include "ExcelFile.h"
#include <vector>
#include <map>
#include <nlohmann/json.hpp>
using json = nlohmann::json;
namespace platform {
class BestResultsExcel : ExcelFile {
public:
BestResultsExcel(const std::string& score, const std::vector<std::string>& datasets);
~BestResultsExcel();
void reportAll(const std::vector<std::string>& models, const json& table, const std::map<std::string, std::map<std::string, float>>& ranks, bool friedman, double significance);
void reportSingle(const std::string& model, const std::string& fileName);
std::string getFileName();
private:
void build();
void header(bool ranks);
void body(bool ranks);
void footer(bool ranks);
void formatColumns();
void doFriedman();
void addConditionalFormat(std::string formula);
const std::string fileName = "BestResults.xlsx";
std::string score;
std::vector<std::string> models;
std::vector<std::string> datasets;
json table;
std::map<std::string, std::map<std::string, float>> ranksModels;
bool friedman;
double significance;
int modelNameSize = 12; // Min size of the column
int datasetNameSize = 25; // Min size of the column
};
}
#endif //BESTRESULTS_EXCEL_H

View File

@@ -1,28 +0,0 @@
#ifndef BESTSCORE_H
#define BESTSCORE_H
#include <string>
#include <map>
#include <utility>
#include "DotEnv.h"
namespace platform {
class BestScore {
public:
static std::pair<std::string, double> getScore(const std::string& metric)
{
static std::map<std::pair<std::string, std::string>, std::pair<std::string, double>> data = {
{{"discretiz", "accuracy"}, {"STree_default (linear-ovo)", 22.109799}},
{{"odte", "accuracy"}, {"STree_default (linear-ovo)", 22.109799}},
};
auto env = platform::DotEnv();
std::string experiment = env.get("experiment");
try {
return data[{experiment, metric}];
}
catch (...) {
return { "", 0.0 };
}
}
};
}
#endif

View File

@@ -1,22 +0,0 @@
#ifndef LOCALE_H
#define LOCALE_H
#include <locale>
#include <iostream>
#include <string>
namespace platform {
struct separation : std::numpunct<char> {
char do_decimal_point() const { return ','; }
char do_thousands_sep() const { return '.'; }
std::string do_grouping() const { return "\03"; }
};
class ConfigLocale {
public:
explicit ConfigLocale()
{
std::locale mylocale(std::cout.getloc(), new separation);
std::locale::global(mylocale);
std::cout.imbue(mylocale);
}
};
}
#endif

View File

@@ -1,22 +1,8 @@
include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet) include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
include_directories(${BayesNet_SOURCE_DIR}/src/Platform) include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
include_directories(${BayesNet_SOURCE_DIR}/src/PyClassifiers)
include_directories(${BayesNet_SOURCE_DIR}/lib/Files) include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp) include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include) include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include) include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
include_directories(${BayesNet_SOURCE_DIR}/lib/libxlsxwriter/include) add_executable(main main.cc Folding.cc platformUtils.cc Experiment.cc Datasets.cc CrossValidation.cc Models.cc)
include_directories(${Python3_INCLUDE_DIRS}) target_link_libraries(main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
include_directories(${MPI_CXX_INCLUDE_DIRS})
add_executable(b_best b_best.cc BestResults.cc Result.cc Statistics.cc BestResultsExcel.cc ReportExcel.cc ReportBase.cc Datasets.cc Dataset.cc ExcelFile.cc)
add_executable(b_grid b_grid.cc GridSearch.cc GridData.cc HyperParameters.cc Folding.cc Datasets.cc Dataset.cc)
add_executable(b_list b_list.cc Datasets.cc Dataset.cc)
add_executable(b_main b_main.cc Folding.cc Experiment.cc Datasets.cc Dataset.cc Models.cc HyperParameters.cc ReportConsole.cc ReportBase.cc)
add_executable(b_manage b_manage.cc Results.cc ManageResults.cc CommandParser.cc Result.cc ReportConsole.cc ReportExcel.cc ReportBase.cc Datasets.cc Dataset.cc ExcelFile.cc)
target_link_libraries(b_best Boost::boost "${XLSXWRITER_LIB}" "${TORCH_LIBRARIES}" ArffFiles mdlp)
target_link_libraries(b_grid BayesNet PyWrap ${MPI_CXX_LIBRARIES})
target_link_libraries(b_list ArffFiles mdlp "${TORCH_LIBRARIES}")
target_link_libraries(b_main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}" PyWrap)
target_link_libraries(b_manage "${TORCH_LIBRARIES}" "${XLSXWRITER_LIB}" ArffFiles mdlp)

View File

@@ -1,15 +0,0 @@
#ifndef COLORS_H
#define COLORS_H
class Colors {
public:
static std::string MAGENTA() { return "\033[1;35m"; }
static std::string BLUE() { return "\033[1;34m"; }
static std::string CYAN() { return "\033[1;36m"; }
static std::string GREEN() { return "\033[1;32m"; }
static std::string YELLOW() { return "\033[1;33m"; }
static std::string RED() { return "\033[1;31m"; }
static std::string WHITE() { return "\033[1;37m"; }
static std::string IBLUE() { return "\033[0;94m"; }
static std::string RESET() { return "\033[0m"; }
};
#endif // COLORS_H

View File

@@ -1,87 +0,0 @@
#include "CommandParser.h"
#include <iostream>
#include <sstream>
#include <algorithm>
#include "Colors.h"
#include "Utils.h"
namespace platform {
void CommandParser::messageError(const std::string& message)
{
std::cout << Colors::RED() << message << Colors::RESET() << std::endl;
}
std::pair<char, int> CommandParser::parse(const std::string& color, const std::vector<std::tuple<std::string, char, bool>>& options, const char defaultCommand, const int maxIndex)
{
bool finished = false;
while (!finished) {
std::stringstream oss;
std::string line;
oss << color << "Choose option (";
bool first = true;
for (auto& option : options) {
if (first) {
first = false;
} else {
oss << ", ";
}
oss << std::get<char>(option) << "=" << std::get<std::string>(option);
}
oss << "): ";
std::cout << oss.str();
getline(std::cin, line);
std::cout << Colors::RESET();
line = trim(line);
if (line.size() == 0)
continue;
if (all_of(line.begin(), line.end(), ::isdigit)) {
command = defaultCommand;
index = stoi(line);
if (index > maxIndex || index < 0) {
messageError("Index out of range");
continue;
}
finished = true;
break;
}
bool found = false;
for (auto& option : options) {
if (line[0] == std::get<char>(option)) {
found = true;
// it's a match
line.erase(line.begin());
line = trim(line);
if (std::get<bool>(option)) {
// The option requires a value
if (line.size() == 0) {
messageError("Option " + std::get<std::string>(option) + " requires a value");
break;
}
try {
index = stoi(line);
if (index > maxIndex || index < 0) {
messageError("Index out of range");
break;
}
}
catch (const std::invalid_argument& ia) {
messageError("Invalid value: " + line);
break;
}
} else {
if (line.size() > 0) {
messageError("option " + std::get<std::string>(option) + " doesn't accept values");
break;
}
}
command = std::get<char>(option);
finished = true;
break;
}
}
if (!found) {
messageError("I don't know " + line);
}
}
return { command, index };
}
} /* namespace platform */

View File

@@ -1,20 +0,0 @@
#ifndef COMMAND_PARSER_H
#define COMMAND_PARSER_H
#include <string>
#include <vector>
#include <tuple>
namespace platform {
class CommandParser {
public:
CommandParser() = default;
std::pair<char, int> parse(const std::string& color, const std::vector<std::tuple<std::string, char, bool>>& options, const char defaultCommand, const int maxIndex);
char getCommand() const { return command; };
int getIndex() const { return index; };
private:
void messageError(const std::string& message);
char command;
int index;
};
} /* namespace platform */
#endif /* COMMAND_PARSER_H */

View File

@@ -0,0 +1,79 @@
#include "CrossValidation.h"
#include "Models.h"
namespace platform {
using json = nlohmann::json;
using namespace std::chrono;
CrossValidation::CrossValidation(const string& modelName, bool stratified, int nfolds, const vector<int>& randomSeeds, platform::Datasets& datasets) : modelName(modelName), stratified(stratified), nfolds(nfolds), randomSeeds(randomSeeds), datasets(datasets)
{
}
Result CrossValidation::crossValidate(const string& fileName)
{
auto [Xt, y] = datasets.getTensors(fileName);
auto states = datasets.getStates(fileName);
auto className = datasets.getClassName(fileName);
auto features = datasets.getFeatures(fileName);
auto samples = datasets.getNSamples(fileName);
auto result = Result();
auto [values, counts] = at::_unique(y);
result.setSamples(Xt.size(1)).setFeatures(Xt.size(0)).setClasses(values.size(0));
int nSeeds = static_cast<int>(randomSeeds.size());
auto accuracy_test = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto accuracy_train = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto train_time = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto test_time = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto nodes = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto edges = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto num_states = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
Timer train_timer, test_timer;
int item = 0;
for (auto seed : randomSeeds) {
cout << "(" << seed << ") " << flush;
Fold* fold;
if (stratified)
fold = new StratifiedKFold(nfolds, y, seed);
else
fold = new KFold(nfolds, samples, seed);
cout << "Fold: " << flush;
for (int nfold = 0; nfold < nfolds; nfold++) {
bayesnet::BaseClassifier* model = Models::get(modelName);
result.setModelVersion(model->getVersion());
train_timer.start();
auto [train, test] = fold->getFold(nfold);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
auto X_train = Xt.index({ "...", train_t });
auto y_train = y.index({ train_t });
auto X_test = Xt.index({ "...", test_t });
auto y_test = y.index({ test_t });
cout << nfold + 1 << ", " << flush;
model->fit(X_train, y_train, features, className, states);
nodes[item] = model->getNumberOfNodes();
edges[item] = model->getNumberOfEdges();
num_states[item] = model->getNumberOfStates();
train_time[item] = train_timer.getDuration();
auto accuracy_train_value = model->score(X_train, y_train);
test_timer.start();
auto accuracy_test_value = model->score(X_test, y_test);
test_time[item] = test_timer.getDuration();
accuracy_train[item] = accuracy_train_value;
accuracy_test[item] = accuracy_test_value;
// Store results and times in vector
result.addScoreTrain(accuracy_train_value);
result.addScoreTest(accuracy_test_value);
result.addTimeTrain(train_time[item].item<double>());
result.addTimeTest(test_time[item].item<double>());
item++;
}
delete fold;
}
cout << "end." << endl;
result.setScoreTest(torch::mean(accuracy_test).item<double>()).setScoreTrain(torch::mean(accuracy_train).item<double>());
result.setScoreTestStd(torch::std(accuracy_test).item<double>()).setScoreTrainStd(torch::std(accuracy_train).item<double>());
result.setTrainTime(torch::mean(train_time).item<double>()).setTestTime(torch::mean(test_time).item<double>());
result.setNodes(torch::mean(nodes).item<double>()).setLeaves(torch::mean(edges).item<double>()).setDepth(torch::mean(num_states).item<double>());
return result;
}
} // namespace platform

View File

@@ -0,0 +1,25 @@
#ifndef CROSSVALIDATION_H
#define CROSSVALIDATION_H
#include <torch/torch.h>
#include <nlohmann/json.hpp>
#include <string>
#include <chrono>
#include "Folding.h"
#include "Datasets.h"
#include "Experiment.h"
namespace platform {
class CrossValidation {
private:
bool stratified;
int nfolds;
string modelName;
vector<int> randomSeeds;
platform::Datasets& datasets;
public:
CrossValidation(const string& modelName, bool stratified, int nfolds, const vector<int>& randomSeeds, platform::Datasets& datasets);
~CrossValidation() = default;
Result crossValidate(const string& fileName);
};
}
#endif // !CROSSVALIDATION_H

View File

@@ -1,215 +0,0 @@
#include "Dataset.h"
#include "ArffFiles.h"
#include <fstream>
namespace platform {
Dataset::Dataset(const Dataset& dataset) : path(dataset.path), name(dataset.name), className(dataset.className), n_samples(dataset.n_samples), n_features(dataset.n_features), features(dataset.features), states(dataset.states), loaded(dataset.loaded), discretize(dataset.discretize), X(dataset.X), y(dataset.y), Xv(dataset.Xv), Xd(dataset.Xd), yv(dataset.yv), fileType(dataset.fileType)
{
}
std::string Dataset::getName() const
{
return name;
}
std::string Dataset::getClassName() const
{
return className;
}
std::vector<std::string> Dataset::getFeatures() const
{
if (loaded) {
return features;
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
int Dataset::getNFeatures() const
{
if (loaded) {
return n_features;
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
int Dataset::getNSamples() const
{
if (loaded) {
return n_samples;
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
std::map<std::string, std::vector<int>> Dataset::getStates() const
{
if (loaded) {
return states;
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
pair<std::vector<std::vector<float>>&, std::vector<int>&> Dataset::getVectors()
{
if (loaded) {
return { Xv, yv };
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
pair<std::vector<std::vector<int>>&, std::vector<int>&> Dataset::getVectorsDiscretized()
{
if (loaded) {
return { Xd, yv };
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
pair<torch::Tensor&, torch::Tensor&> Dataset::getTensors()
{
if (loaded) {
buildTensors();
return { X, y };
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
void Dataset::load_csv()
{
ifstream file(path + "/" + name + ".csv");
if (file.is_open()) {
std::string line;
getline(file, line);
std::vector<std::string> tokens = split(line, ',');
features = std::vector<std::string>(tokens.begin(), tokens.end() - 1);
if (className == "-1") {
className = tokens.back();
}
for (auto i = 0; i < features.size(); ++i) {
Xv.push_back(std::vector<float>());
}
while (getline(file, line)) {
tokens = split(line, ',');
for (auto i = 0; i < features.size(); ++i) {
Xv[i].push_back(stof(tokens[i]));
}
yv.push_back(stoi(tokens.back()));
}
file.close();
} else {
throw std::invalid_argument("Unable to open dataset file.");
}
}
void Dataset::computeStates()
{
for (int i = 0; i < features.size(); ++i) {
states[features[i]] = std::vector<int>(*max_element(Xd[i].begin(), Xd[i].end()) + 1);
auto item = states.at(features[i]);
iota(begin(item), end(item), 0);
}
states[className] = std::vector<int>(*max_element(yv.begin(), yv.end()) + 1);
iota(begin(states.at(className)), end(states.at(className)), 0);
}
void Dataset::load_arff()
{
auto arff = ArffFiles();
arff.load(path + "/" + name + ".arff", className);
// Get Dataset X, y
Xv = arff.getX();
yv = arff.getY();
// Get className & Features
className = arff.getClassName();
auto attributes = arff.getAttributes();
transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& attribute) { return attribute.first; });
}
std::vector<std::string> tokenize(std::string line)
{
std::vector<std::string> tokens;
for (auto i = 0; i < line.size(); ++i) {
if (line[i] == ' ' || line[i] == '\t' || line[i] == '\n') {
std::string token = line.substr(0, i);
tokens.push_back(token);
line.erase(line.begin(), line.begin() + i + 1);
i = 0;
while (line[i] == ' ' || line[i] == '\t' || line[i] == '\n')
line.erase(line.begin(), line.begin() + i + 1);
}
}
if (line.size() > 0) {
tokens.push_back(line);
}
return tokens;
}
void Dataset::load_rdata()
{
ifstream file(path + "/" + name + "_R.dat");
if (file.is_open()) {
std::string line;
getline(file, line);
line = ArffFiles::trim(line);
std::vector<std::string> tokens = tokenize(line);
transform(tokens.begin(), tokens.end() - 1, back_inserter(features), [](const auto& attribute) { return ArffFiles::trim(attribute); });
if (className == "-1") {
className = ArffFiles::trim(tokens.back());
}
for (auto i = 0; i < features.size(); ++i) {
Xv.push_back(std::vector<float>());
}
while (getline(file, line)) {
tokens = tokenize(line);
// We have to skip the first token, which is the instance number.
for (auto i = 1; i < features.size() + 1; ++i) {
const float value = stof(tokens[i]);
Xv[i - 1].push_back(value);
}
yv.push_back(stoi(tokens.back()));
}
file.close();
} else {
throw std::invalid_argument("Unable to open dataset file.");
}
}
void Dataset::load()
{
if (loaded) {
return;
}
if (fileType == CSV) {
load_csv();
} else if (fileType == ARFF) {
load_arff();
} else if (fileType == RDATA) {
load_rdata();
}
if (discretize) {
Xd = discretizeDataset(Xv, yv);
computeStates();
}
n_samples = Xv[0].size();
n_features = Xv.size();
loaded = true;
}
void Dataset::buildTensors()
{
if (discretize) {
X = torch::zeros({ static_cast<int>(n_features), static_cast<int>(n_samples) }, torch::kInt32);
} else {
X = torch::zeros({ static_cast<int>(n_features), static_cast<int>(n_samples) }, torch::kFloat32);
}
for (int i = 0; i < features.size(); ++i) {
if (discretize) {
X.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32));
} else {
X.index_put_({ i, "..." }, torch::tensor(Xv[i], torch::kFloat32));
}
}
y = torch::tensor(yv, torch::kInt32);
}
std::vector<mdlp::labels_t> Dataset::discretizeDataset(std::vector<mdlp::samples_t>& X, mdlp::labels_t& y)
{
std::vector<mdlp::labels_t> Xd;
auto fimdlp = mdlp::CPPFImdlp();
for (int i = 0; i < X.size(); i++) {
fimdlp.fit(X[i], y);
mdlp::labels_t& xd = fimdlp.transform(X[i]);
Xd.push_back(xd);
}
return Xd;
}
}

View File

@@ -1,78 +0,0 @@
#ifndef DATASET_H
#define DATASET_H
#include <torch/torch.h>
#include <map>
#include <vector>
#include <string>
#include "CPPFImdlp.h"
#include "Utils.h"
namespace platform {
enum fileType_t { CSV, ARFF, RDATA };
class SourceData {
public:
SourceData(std::string source)
{
if (source == "Surcov") {
path = "datasets/";
fileType = CSV;
} else if (source == "Arff") {
path = "datasets/";
fileType = ARFF;
} else if (source == "Tanveer") {
path = "data/";
fileType = RDATA;
} else {
throw std::invalid_argument("Unknown source.");
}
}
std::string getPath()
{
return path;
}
fileType_t getFileType()
{
return fileType;
}
private:
std::string path;
fileType_t fileType;
};
class Dataset {
private:
std::string path;
std::string name;
fileType_t fileType;
std::string className;
int n_samples{ 0 }, n_features{ 0 };
std::vector<std::string> features;
std::map<std::string, std::vector<int>> states;
bool loaded;
bool discretize;
torch::Tensor X, y;
std::vector<std::vector<float>> Xv;
std::vector<std::vector<int>> Xd;
std::vector<int> yv;
void buildTensors();
void load_csv();
void load_arff();
void load_rdata();
void computeStates();
std::vector<mdlp::labels_t> discretizeDataset(std::vector<mdlp::samples_t>& X, mdlp::labels_t& y);
public:
Dataset(const std::string& path, const std::string& name, const std::string& className, bool discretize, fileType_t fileType) : path(path), name(name), className(className), discretize(discretize), loaded(false), fileType(fileType) {};
explicit Dataset(const Dataset&);
std::string getName() const;
std::string getClassName() const;
std::vector<string> getFeatures() const;
std::map<std::string, std::vector<int>> getStates() const;
std::pair<vector<std::vector<float>>&, std::vector<int>&> getVectors();
std::pair<vector<std::vector<int>>&, std::vector<int>&> getVectorsDiscretized();
std::pair<torch::Tensor&, torch::Tensor&> getTensors();
int getNFeatures() const;
int getNSamples() const;
void load();
const bool inline isLoaded() const { return loaded; };
};
};
#endif

View File

@@ -1,129 +1,239 @@
#include "Datasets.h" #include "Datasets.h"
#include <fstream> #include "platformUtils.h"
#include "ArffFiles.h"
namespace platform { namespace platform {
vector<string> split(const string& text, char delimiter)
{
vector<string> result;
stringstream ss(text);
string token;
while (getline(ss, token, delimiter)) {
result.push_back(token);
}
return result;
}
void Datasets::load() void Datasets::load()
{ {
auto sd = SourceData(sfileType); ifstream catalog(path + "/all.txt");
fileType = sd.getFileType();
path = sd.getPath();
ifstream catalog(path + "all.txt");
if (catalog.is_open()) { if (catalog.is_open()) {
std::string line; string line;
while (getline(catalog, line)) { while (getline(catalog, line)) {
if (line.empty() || line[0] == '#') { vector<string> tokens = split(line, ',');
continue; string name = tokens[0];
} string className = tokens[1];
std::vector<std::string> tokens = split(line, ',');
std::string name = tokens[0];
std::string className;
if (tokens.size() == 1) {
className = "-1";
} else {
className = tokens[1];
}
datasets[name] = make_unique<Dataset>(path, name, className, discretize, fileType); datasets[name] = make_unique<Dataset>(path, name, className, discretize, fileType);
} }
catalog.close(); catalog.close();
} else { } else {
throw std::invalid_argument("Unable to open catalog file. [" + path + "all.txt" + "]"); throw invalid_argument("Unable to open catalog file. [" + path + "/all.txt" + "]");
} }
} }
std::vector<std::string> Datasets::getNames() vector<string> Datasets::getNames()
{ {
std::vector<std::string> result; vector<string> result;
transform(datasets.begin(), datasets.end(), back_inserter(result), [](const auto& d) { return d.first; }); transform(datasets.begin(), datasets.end(), back_inserter(result), [](const auto& d) { return d.first; });
return result; return result;
} }
std::vector<std::string> Datasets::getFeatures(const std::string& name) const vector<string> Datasets::getFeatures(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return datasets.at(name)->getFeatures(); return datasets[name]->getFeatures();
} else { } else {
throw std::invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
map<std::string, std::vector<int>> Datasets::getStates(const std::string& name) const map<string, vector<int>> Datasets::getStates(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return datasets.at(name)->getStates(); return datasets[name]->getStates();
} else { } else {
throw std::invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
void Datasets::loadDataset(const std::string& name) const string Datasets::getClassName(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return; return datasets[name]->getClassName();
} else { } else {
datasets.at(name)->load(); throw invalid_argument("Dataset not loaded.");
} }
} }
std::string Datasets::getClassName(const std::string& name) const int Datasets::getNSamples(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return datasets.at(name)->getClassName(); return datasets[name]->getNSamples();
} else { } else {
throw std::invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
int Datasets::getNSamples(const std::string& name) const pair<vector<vector<float>>&, vector<int>&> Datasets::getVectors(const string& name)
{
if (datasets.at(name)->isLoaded()) {
return datasets.at(name)->getNSamples();
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
int Datasets::getNClasses(const std::string& name)
{
if (datasets.at(name)->isLoaded()) {
auto className = datasets.at(name)->getClassName();
if (discretize) {
auto states = getStates(name);
return states.at(className).size();
}
auto [Xv, yv] = getVectors(name);
return *std::max_element(yv.begin(), yv.end()) + 1;
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
std::vector<int> Datasets::getClassesCounts(const std::string& name) const
{
if (datasets.at(name)->isLoaded()) {
auto [Xv, yv] = datasets.at(name)->getVectors();
std::vector<int> counts(*std::max_element(yv.begin(), yv.end()) + 1);
for (auto y : yv) {
counts[y]++;
}
return counts;
} else {
throw std::invalid_argument("Dataset not loaded.");
}
}
pair<std::vector<std::vector<float>>&, std::vector<int>&> Datasets::getVectors(const std::string& name)
{ {
if (!datasets[name]->isLoaded()) { if (!datasets[name]->isLoaded()) {
datasets[name]->load(); datasets[name]->load();
} }
return datasets[name]->getVectors(); return datasets[name]->getVectors();
} }
pair<std::vector<std::vector<int>>&, std::vector<int>&> Datasets::getVectorsDiscretized(const std::string& name) pair<vector<vector<int>>&, vector<int>&> Datasets::getVectorsDiscretized(const string& name)
{ {
if (!datasets[name]->isLoaded()) { if (!datasets[name]->isLoaded()) {
datasets[name]->load(); datasets[name]->load();
} }
return datasets[name]->getVectorsDiscretized(); return datasets[name]->getVectorsDiscretized();
} }
pair<torch::Tensor&, torch::Tensor&> Datasets::getTensors(const std::string& name) pair<torch::Tensor&, torch::Tensor&> Datasets::getTensors(const string& name)
{ {
if (!datasets[name]->isLoaded()) { if (!datasets[name]->isLoaded()) {
datasets[name]->load(); datasets[name]->load();
} }
return datasets[name]->getTensors(); return datasets[name]->getTensors();
} }
bool Datasets::isDataset(const std::string& name) const bool Datasets::isDataset(const string& name)
{ {
return datasets.find(name) != datasets.end(); return datasets.find(name) != datasets.end();
} }
Dataset::Dataset(Dataset& dataset) : path(dataset.path), name(dataset.name), className(dataset.className), n_samples(dataset.n_samples), n_features(dataset.n_features), features(dataset.features), states(dataset.states), loaded(dataset.loaded), discretize(dataset.discretize), X(dataset.X), y(dataset.y), Xv(dataset.Xv), Xd(dataset.Xd), yv(dataset.yv), fileType(dataset.fileType) {}
string Dataset::getName()
{
return name;
}
string Dataset::getClassName()
{
return className;
}
vector<string> Dataset::getFeatures()
{
if (loaded) {
return features;
} else {
throw invalid_argument("Dataset not loaded.");
}
}
int Dataset::getNFeatures()
{
if (loaded) {
return n_features;
} else {
throw invalid_argument("Dataset not loaded.");
}
}
int Dataset::getNSamples()
{
if (loaded) {
return n_samples;
} else {
throw invalid_argument("Dataset not loaded.");
}
}
map<string, vector<int>> Dataset::getStates()
{
if (loaded) {
return states;
} else {
throw invalid_argument("Dataset not loaded.");
}
}
pair<vector<vector<float>>&, vector<int>&> Dataset::getVectors()
{
if (loaded) {
return { Xv, yv };
} else {
throw invalid_argument("Dataset not loaded.");
}
}
pair<vector<vector<int>>&, vector<int>&> Dataset::getVectorsDiscretized()
{
if (loaded) {
return { Xd, yv };
} else {
throw invalid_argument("Dataset not loaded.");
}
}
pair<torch::Tensor&, torch::Tensor&> Dataset::getTensors()
{
if (loaded) {
buildTensors();
return { X, y };
} else {
throw invalid_argument("Dataset not loaded.");
}
}
void Dataset::load_csv()
{
ifstream file(path + "/" + name + ".csv");
if (file.is_open()) {
string line;
getline(file, line);
vector<string> tokens = split(line, ',');
features = vector<string>(tokens.begin(), tokens.end() - 1);
className = tokens.back();
for (auto i = 0; i < features.size(); ++i) {
Xv.push_back(vector<float>());
}
while (getline(file, line)) {
tokens = split(line, ',');
for (auto i = 0; i < features.size(); ++i) {
Xv[i].push_back(stof(tokens[i]));
}
yv.push_back(stoi(tokens.back()));
}
file.close();
} else {
throw invalid_argument("Unable to open dataset file.");
}
}
void Dataset::computeStates()
{
for (int i = 0; i < features.size(); ++i) {
states[features[i]] = vector<int>(*max_element(Xd[i].begin(), Xd[i].end()) + 1);
iota(begin(states[features[i]]), end(states[features[i]]), 0);
}
states[className] = vector<int>(*max_element(yv.begin(), yv.end()) + 1);
iota(begin(states[className]), end(states[className]), 0);
}
void Dataset::load_arff()
{
auto arff = ArffFiles();
arff.load(path + "/" + name + ".arff", className);
// Get Dataset X, y
Xv = arff.getX();
yv = arff.getY();
// Get className & Features
className = arff.getClassName();
auto attributes = arff.getAttributes();
transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& f) { return f.first; });
}
void Dataset::load()
{
if (loaded) {
return;
}
if (fileType == CSV) {
load_csv();
} else if (fileType == ARFF) {
load_arff();
}
if (discretize) {
Xd = discretizeDataset(Xv, yv);
computeStates();
n_samples = Xd[0].size();
n_features = Xd.size();
}
loaded = true;
}
void Dataset::buildTensors()
{
if (discretize) {
X = torch::zeros({ static_cast<int>(n_features), static_cast<int>(n_samples) }, torch::kInt32);
} else {
X = torch::zeros({ static_cast<int>(n_features), static_cast<int>(n_samples) }, torch::kFloat32);
}
for (int i = 0; i < features.size(); ++i) {
if (discretize) {
X.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32));
} else {
X.index_put_({ i, "..." }, torch::tensor(Xv[i], torch::kFloat32));
}
}
y = torch::tensor(yv, torch::kInt32);
}
} }

View File

@@ -1,30 +1,66 @@
#ifndef DATASETS_H #ifndef DATASETS_H
#define DATASETS_H #define DATASETS_H
#include "Dataset.h" #include <torch/torch.h>
#include <map>
#include <vector>
#include <string>
namespace platform { namespace platform {
using namespace std;
enum fileType_t { CSV, ARFF };
class Dataset {
private:
string path;
string name;
fileType_t fileType;
string className;
int n_samples, n_features;
vector<string> features;
map<string, vector<int>> states;
bool loaded;
bool discretize;
torch::Tensor X, y;
vector<vector<float>> Xv;
vector<vector<int>> Xd;
vector<int> yv;
void buildTensors();
void load_csv();
void load_arff();
void computeStates();
public:
Dataset(const string& path, const string& name, const string& className, bool discretize, fileType_t fileType) : path(path), name(name), className(className), discretize(discretize), loaded(false), fileType(fileType), n_samples(0), n_features(0) {};
explicit Dataset(Dataset&);
string getName();
string getClassName();
vector<string> getFeatures();
map<string, vector<int>> getStates();
pair<vector<vector<float>>&, vector<int>&> getVectors();
pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized();
pair<torch::Tensor&, torch::Tensor&> getTensors();
int getNFeatures();
int getNSamples();
void load();
const bool inline isLoaded() const { return loaded; };
};
class Datasets { class Datasets {
private: private:
std::string path; string path;
fileType_t fileType; fileType_t fileType;
std::string sfileType; map<string, unique_ptr<Dataset>> datasets;
std::map<std::string, std::unique_ptr<Dataset>> datasets;
bool discretize; bool discretize;
void load(); // Loads the list of datasets void load(); // Loads the list of datasets
public: public:
explicit Datasets(bool discretize, std::string sfileType) : discretize(discretize), sfileType(sfileType) { load(); }; explicit Datasets(const string& path, bool discretize = false, fileType_t fileType = ARFF) : path(path), discretize(discretize), fileType(fileType) { load(); };
std::vector<string> getNames(); vector<string> getNames();
std::vector<string> getFeatures(const std::string& name) const; vector<string> getFeatures(const string& name);
int getNSamples(const std::string& name) const; int getNSamples(const string& name);
std::string getClassName(const std::string& name) const; string getClassName(const string& name);
int getNClasses(const std::string& name); map<string, vector<int>> getStates(const string& name);
std::vector<int> getClassesCounts(const std::string& name) const; pair<vector<vector<float>>&, vector<int>&> getVectors(const string& name);
std::map<std::string, std::vector<int>> getStates(const std::string& name) const; pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized(const string& name);
std::pair<std::vector<std::vector<float>>&, std::vector<int>&> getVectors(const std::string& name); pair<torch::Tensor&, torch::Tensor&> getTensors(const string& name);
std::pair<std::vector<std::vector<int>>&, std::vector<int>&> getVectorsDiscretized(const std::string& name); bool isDataset(const string& name);
std::pair<torch::Tensor&, torch::Tensor&> getTensors(const std::string& name);
bool isDataset(const std::string& name) const;
void loadDataset(const std::string& name) const;
}; };
vector<string> split(const string&, char);
}; };
#endif #endif

View File

@@ -4,52 +4,64 @@
#include <map> #include <map>
#include <fstream> #include <fstream>
#include <sstream> #include <sstream>
#include <algorithm> std::vector<std::string> split(std::string text, char delimiter)
#include <iostream> {
#include "Utils.h" std::vector<std::string> result;
std::stringstream ss(text);
//#include "Dataset.h" std::string token;
namespace platform { while (getline(ss, token, delimiter)) {
class DotEnv { result.push_back(token);
private: }
std::map<std::string, std::string> env; return result;
public:
DotEnv()
{
std::ifstream file(".env");
if (!file.is_open()) {
std::cerr << "File .env not found" << std::endl;
exit(1);
}
std::string line;
while (std::getline(file, line)) {
line = trim(line);
if (line.empty() || line[0] == '#') {
continue;
}
std::istringstream iss(line);
std::string key, value;
if (std::getline(iss, key, '=') && std::getline(iss, value)) {
env[key] = value;
}
}
}
std::string get(const std::string& key)
{
return env.at(key);
}
std::vector<int> getSeeds()
{
auto seeds = std::vector<int>();
auto seeds_str = env["seeds"];
seeds_str = trim(seeds_str);
seeds_str = seeds_str.substr(1, seeds_str.size() - 2);
auto seeds_str_split = split(seeds_str, ',');
transform(seeds_str_split.begin(), seeds_str_split.end(), back_inserter(seeds), [](const std::string& str) {
return stoi(str);
});
return seeds;
}
};
} }
class DotEnv {
private:
std::map<std::string, std::string> env;
std::string trim(const std::string& str)
{
std::string result = str;
result.erase(result.begin(), std::find_if(result.begin(), result.end(), [](int ch) {
return !std::isspace(ch);
}));
result.erase(std::find_if(result.rbegin(), result.rend(), [](int ch) {
return !std::isspace(ch);
}).base(), result.end());
return result;
}
public:
DotEnv()
{
std::ifstream file(".env");
if (!file.is_open()) {
std::cerr << "File .env not found" << std::endl;
exit(1);
}
std::string line;
while (std::getline(file, line)) {
line = trim(line);
if (line.empty() || line[0] == '#') {
continue;
}
std::istringstream iss(line);
std::string key, value;
if (std::getline(iss, key, '=') && std::getline(iss, value)) {
env[key] = value;
}
}
}
std::string get(const std::string& key)
{
return env[key];
}
std::vector<int> getSeeds()
{
auto seeds = std::vector<int>();
auto seeds_str = env["seeds"];
seeds_str = trim(seeds_str);
seeds_str = seeds_str.substr(1, seeds_str.size() - 2);
auto seeds_str_split = split(seeds_str, ',');
transform(seeds_str_split.begin(), seeds_str_split.end(), back_inserter(seeds), [](const auto& s) { return stoi(s); });
return seeds;
}
};
#endif #endif

View File

@@ -1,168 +0,0 @@
#include "ExcelFile.h"
namespace platform {
ExcelFile::ExcelFile()
{
setDefault();
}
ExcelFile::ExcelFile(lxw_workbook* workbook) : workbook(workbook)
{
setDefault();
}
ExcelFile::ExcelFile(lxw_workbook* workbook, lxw_worksheet* worksheet) : workbook(workbook), worksheet(worksheet)
{
setDefault();
}
void ExcelFile::setDefault()
{
normalSize = 14; //font size for report body
row = 0;
colorTitle = 0xB1A0C7;
colorOdd = 0xDCE6F1;
colorEven = 0xFDE9D9;
}
lxw_workbook* ExcelFile::getWorkbook()
{
return workbook;
}
void ExcelFile::setProperties(std::string title)
{
char line[title.size() + 1];
strcpy(line, title.c_str());
lxw_doc_properties properties = {
.title = line,
.subject = (char*)"Machine learning results",
.author = (char*)"Ricardo Montañana Gómez",
.manager = (char*)"Dr. J. A. Gámez, Dr. J. M. Puerta",
.company = (char*)"UCLM",
.comments = (char*)"Created with libxlsxwriter and c++",
};
workbook_set_properties(workbook, &properties);
}
lxw_format* ExcelFile::efectiveStyle(const std::string& style)
{
lxw_format* efectiveStyle = NULL;
if (style != "") {
std::string suffix = row % 2 ? "_odd" : "_even";
try {
efectiveStyle = styles.at(style + suffix);
}
catch (const std::out_of_range& oor) {
try {
efectiveStyle = styles.at(style);
}
catch (const std::out_of_range& oor) {
throw std::invalid_argument("Style " + style + " not found");
}
}
}
return efectiveStyle;
}
void ExcelFile::writeString(int row, int col, const std::string& text, const std::string& style)
{
worksheet_write_string(worksheet, row, col, text.c_str(), efectiveStyle(style));
}
void ExcelFile::writeInt(int row, int col, const int number, const std::string& style)
{
worksheet_write_number(worksheet, row, col, number, efectiveStyle(style));
}
void ExcelFile::writeDouble(int row, int col, const double number, const std::string& style)
{
worksheet_write_number(worksheet, row, col, number, efectiveStyle(style));
}
void ExcelFile::addColor(lxw_format* style, bool odd)
{
uint32_t efectiveColor = odd ? colorEven : colorOdd;
format_set_bg_color(style, lxw_color_t(efectiveColor));
}
void ExcelFile::createStyle(const std::string& name, lxw_format* style, bool odd)
{
addColor(style, odd);
if (name == "textCentered") {
format_set_align(style, LXW_ALIGN_CENTER);
format_set_font_size(style, normalSize);
format_set_border(style, LXW_BORDER_THIN);
} else if (name == "text") {
format_set_font_size(style, normalSize);
format_set_border(style, LXW_BORDER_THIN);
} else if (name == "bodyHeader") {
format_set_bold(style);
format_set_font_size(style, normalSize);
format_set_align(style, LXW_ALIGN_CENTER);
format_set_align(style, LXW_ALIGN_VERTICAL_CENTER);
format_set_border(style, LXW_BORDER_THIN);
format_set_bg_color(style, lxw_color_t(colorTitle));
} else if (name == "result") {
format_set_font_size(style, normalSize);
format_set_border(style, LXW_BORDER_THIN);
format_set_num_format(style, "0.0000000");
} else if (name == "time") {
format_set_font_size(style, normalSize);
format_set_border(style, LXW_BORDER_THIN);
format_set_num_format(style, "#,##0.000000");
} else if (name == "ints") {
format_set_font_size(style, normalSize);
format_set_num_format(style, "###,##0");
format_set_border(style, LXW_BORDER_THIN);
} else if (name == "floats") {
format_set_border(style, LXW_BORDER_THIN);
format_set_font_size(style, normalSize);
format_set_num_format(style, "#,##0.00");
}
}
void ExcelFile::createFormats()
{
auto styleNames = { "text", "textCentered", "bodyHeader", "result", "time", "ints", "floats" };
lxw_format* style;
for (std::string name : styleNames) {
lxw_format* style = workbook_add_format(workbook);
style = workbook_add_format(workbook);
createStyle(name, style, true);
styles[name + "_odd"] = style;
style = workbook_add_format(workbook);
createStyle(name, style, false);
styles[name + "_even"] = style;
}
// Header 1st line
lxw_format* headerFirst = workbook_add_format(workbook);
format_set_bold(headerFirst);
format_set_font_size(headerFirst, 18);
format_set_align(headerFirst, LXW_ALIGN_CENTER);
format_set_align(headerFirst, LXW_ALIGN_VERTICAL_CENTER);
format_set_border(headerFirst, LXW_BORDER_THIN);
format_set_bg_color(headerFirst, lxw_color_t(colorTitle));
// Header rest
lxw_format* headerRest = workbook_add_format(workbook);
format_set_bold(headerRest);
format_set_align(headerRest, LXW_ALIGN_CENTER);
format_set_font_size(headerRest, 16);
format_set_align(headerRest, LXW_ALIGN_VERTICAL_CENTER);
format_set_border(headerRest, LXW_BORDER_THIN);
format_set_bg_color(headerRest, lxw_color_t(colorOdd));
// Header small
lxw_format* headerSmall = workbook_add_format(workbook);
format_set_bold(headerSmall);
format_set_align(headerSmall, LXW_ALIGN_LEFT);
format_set_font_size(headerSmall, 12);
format_set_border(headerSmall, LXW_BORDER_THIN);
format_set_align(headerSmall, LXW_ALIGN_VERTICAL_CENTER);
format_set_bg_color(headerSmall, lxw_color_t(colorOdd));
// Summary style
lxw_format* summaryStyle = workbook_add_format(workbook);
format_set_bold(summaryStyle);
format_set_font_size(summaryStyle, 16);
format_set_border(summaryStyle, LXW_BORDER_THIN);
format_set_align(summaryStyle, LXW_ALIGN_VERTICAL_CENTER);
styles["headerFirst"] = headerFirst;
styles["headerRest"] = headerRest;
styles["headerSmall"] = headerSmall;
styles["summaryStyle"] = summaryStyle;
}
}

View File

@@ -1,43 +0,0 @@
#ifndef EXCELFILE_H
#define EXCELFILE_H
#include <locale>
#include <string>
#include <map>
#include "xlsxwriter.h"
namespace platform {
struct separated : std::numpunct<char> {
char do_decimal_point() const { return ','; }
char do_thousands_sep() const { return '.'; }
std::string do_grouping() const { return "\03"; }
};
class ExcelFile {
public:
ExcelFile();
ExcelFile(lxw_workbook* workbook);
ExcelFile(lxw_workbook* workbook, lxw_worksheet* worksheet);
lxw_workbook* getWorkbook();
protected:
void setProperties(std::string title);
void writeString(int row, int col, const std::string& text, const std::string& style = "");
void writeInt(int row, int col, const int number, const std::string& style = "");
void writeDouble(int row, int col, const double number, const std::string& style = "");
void createFormats();
void createStyle(const std::string& name, lxw_format* style, bool odd);
void addColor(lxw_format* style, bool odd);
lxw_format* efectiveStyle(const std::string& name);
lxw_workbook* workbook;
lxw_worksheet* worksheet;
std::map<std::string, lxw_format*> styles;
int row;
int normalSize; //font size for report body
uint32_t colorTitle;
uint32_t colorOdd;
uint32_t colorEven;
private:
void setDefault();
};
}
#endif // !EXCELFILE_H

View File

@@ -1,12 +1,8 @@
#include <fstream>
#include "Experiment.h" #include "Experiment.h"
#include "Datasets.h"
#include "Models.h"
#include "ReportConsole.h"
#include "Paths.h"
namespace platform { namespace platform {
using json = nlohmann::json; using json = nlohmann::json;
std::string get_date() string get_date()
{ {
time_t rawtime; time_t rawtime;
tm* timeinfo; tm* timeinfo;
@@ -16,7 +12,7 @@ namespace platform {
oss << std::put_time(timeinfo, "%Y-%m-%d"); oss << std::put_time(timeinfo, "%Y-%m-%d");
return oss.str(); return oss.str();
} }
std::string get_time() string get_time()
{ {
time_t rawtime; time_t rawtime;
tm* timeinfo; tm* timeinfo;
@@ -26,9 +22,9 @@ namespace platform {
oss << std::put_time(timeinfo, "%H:%M:%S"); oss << std::put_time(timeinfo, "%H:%M:%S");
return oss.str(); return oss.str();
} }
std::string Experiment::get_file_name() string Experiment::get_file_name()
{ {
std::string result = "results_" + score_name + "_" + model + "_" + platform + "_" + get_date() + "_" + get_time() + "_" + (stratified ? "1" : "0") + ".json"; string result = "results_" + score_name + "_" + model + "_" + platform + "_" + get_date() + "_" + get_time() + "_" + (stratified ? "1" : "0") + ".json";
return result; return result;
} }
@@ -47,7 +43,7 @@ namespace platform {
result["discretized"] = discretized; result["discretized"] = discretized;
result["stratified"] = stratified; result["stratified"] = stratified;
result["folds"] = nfolds; result["folds"] = nfolds;
result["seeds"] = randomSeeds; result["seeds"] = random_seeds;
result["duration"] = duration; result["duration"] = duration;
result["results"] = json::array(); result["results"] = json::array();
for (const auto& r : results) { for (const auto& r : results) {
@@ -80,147 +76,17 @@ namespace platform {
} }
return result; return result;
} }
void Experiment::save(const std::string& path) void Experiment::save(const string& path)
{ {
json data = build_json(); json data = build_json();
ofstream file(path + "/" + get_file_name()); ofstream file(path + "/" + get_file_name());
file << data; file << data;
file.close(); file.close();
} }
void Experiment::report()
{
json data = build_json();
ReportConsole report(data);
report.show();
}
void Experiment::show() void Experiment::show()
{ {
json data = build_json(); json data = build_json();
std::cout << data.dump(4) << std::endl; cout << data.dump(4) << endl;
} }
void Experiment::go(std::vector<std::string> filesToProcess, bool quiet)
{
std::cout << "*** Starting experiment: " << title << " ***" << std::endl;
for (auto fileName : filesToProcess) {
std::cout << "- " << setw(20) << left << fileName << " " << right << flush;
cross_validation(fileName, quiet);
std::cout << std::endl;
}
}
std::string getColor(bayesnet::status_t status)
{
switch (status) {
case bayesnet::NORMAL:
return Colors::GREEN();
case bayesnet::WARNING:
return Colors::YELLOW();
case bayesnet::ERROR:
return Colors::RED();
default:
return Colors::RESET();
}
}
void showProgress(int fold, const std::string& color, const std::string& phase)
{
std::string prefix = phase == "a" ? "" : "\b\b\b\b";
std::cout << prefix << color << fold << Colors::RESET() << "(" << color << phase << Colors::RESET() << ")" << flush;
}
void Experiment::cross_validation(const std::string& fileName, bool quiet)
{
auto datasets = Datasets(discretized, Paths::datasets());
// Get dataset
auto [X, y] = datasets.getTensors(fileName);
auto states = datasets.getStates(fileName);
auto features = datasets.getFeatures(fileName);
auto samples = datasets.getNSamples(fileName);
auto className = datasets.getClassName(fileName);
if (!quiet) {
std::cout << " (" << setw(5) << samples << "," << setw(3) << features.size() << ") " << flush;
}
// Prepare Result
auto result = Result();
auto [values, counts] = at::_unique(y);
result.setSamples(X.size(1)).setFeatures(X.size(0)).setClasses(values.size(0));
result.setHyperparameters(hyperparameters.get(fileName));
// Initialize results std::vectors
int nResults = nfolds * static_cast<int>(randomSeeds.size());
auto accuracy_test = torch::zeros({ nResults }, torch::kFloat64);
auto accuracy_train = torch::zeros({ nResults }, torch::kFloat64);
auto train_time = torch::zeros({ nResults }, torch::kFloat64);
auto test_time = torch::zeros({ nResults }, torch::kFloat64);
auto nodes = torch::zeros({ nResults }, torch::kFloat64);
auto edges = torch::zeros({ nResults }, torch::kFloat64);
auto num_states = torch::zeros({ nResults }, torch::kFloat64);
Timer train_timer, test_timer;
int item = 0;
for (auto seed : randomSeeds) {
if (!quiet)
std::cout << "(" << seed << ") doing Fold: " << flush;
Fold* fold;
if (stratified)
fold = new StratifiedKFold(nfolds, y, seed);
else
fold = new KFold(nfolds, y.size(0), seed);
for (int nfold = 0; nfold < nfolds; nfold++) {
auto clf = Models::instance()->create(model);
setModelVersion(clf->getVersion());
auto valid = clf->getValidHyperparameters();
hyperparameters.check(valid, fileName);
clf->setHyperparameters(hyperparameters.get(fileName));
// Split train - test dataset
train_timer.start();
auto [train, test] = fold->getFold(nfold);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
auto X_train = X.index({ "...", train_t });
auto y_train = y.index({ train_t });
auto X_test = X.index({ "...", test_t });
auto y_test = y.index({ test_t });
if (!quiet)
showProgress(nfold + 1, getColor(clf->getStatus()), "a");
// Train model
clf->fit(X_train, y_train, features, className, states);
if (!quiet)
showProgress(nfold + 1, getColor(clf->getStatus()), "b");
nodes[item] = clf->getNumberOfNodes();
edges[item] = clf->getNumberOfEdges();
num_states[item] = clf->getNumberOfStates();
train_time[item] = train_timer.getDuration();
// Score train
auto accuracy_train_value = clf->score(X_train, y_train);
// Test model
if (!quiet)
showProgress(nfold + 1, getColor(clf->getStatus()), "c");
test_timer.start();
auto accuracy_test_value = clf->score(X_test, y_test);
test_time[item] = test_timer.getDuration();
accuracy_train[item] = accuracy_train_value;
accuracy_test[item] = accuracy_test_value;
if (!quiet)
std::cout << "\b\b\b, " << flush;
// Store results and times in std::vector
result.addScoreTrain(accuracy_train_value);
result.addScoreTest(accuracy_test_value);
result.addTimeTrain(train_time[item].item<double>());
result.addTimeTest(test_time[item].item<double>());
item++;
}
if (!quiet)
std::cout << "end. " << flush;
delete fold;
}
result.setScoreTest(torch::mean(accuracy_test).item<double>()).setScoreTrain(torch::mean(accuracy_train).item<double>());
result.setScoreTestStd(torch::std(accuracy_test).item<double>()).setScoreTrainStd(torch::std(accuracy_train).item<double>());
result.setTrainTime(torch::mean(train_time).item<double>()).setTestTime(torch::mean(test_time).item<double>());
result.setTestTimeStd(torch::std(test_time).item<double>()).setTrainTimeStd(torch::std(train_time).item<double>());
result.setNodes(torch::mean(nodes).item<double>()).setLeaves(torch::mean(edges).item<double>()).setDepth(torch::mean(num_states).item<double>());
result.setDataset(fileName);
addResult(result);
}
} }

View File

@@ -3,50 +3,59 @@
#include <torch/torch.h> #include <torch/torch.h>
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
#include <string> #include <string>
#include "Folding.h" #include <chrono>
#include "BaseClassifier.h"
#include "HyperParameters.h"
#include "TAN.h"
#include "KDB.h"
#include "AODE.h"
#include "Timer.h"
using namespace std;
namespace platform { namespace platform {
using json = nlohmann::json; using json = nlohmann::json;
class Timer {
private:
chrono::high_resolution_clock::time_point begin;
public:
Timer() = default;
~Timer() = default;
void start() { begin = chrono::high_resolution_clock::now(); }
double getDuration()
{
chrono::high_resolution_clock::time_point end = chrono::high_resolution_clock::now();
chrono::duration<double> time_span = chrono::duration_cast<chrono::duration<double>>(end - begin);
return time_span.count();
}
};
class Result { class Result {
private: private:
std::string dataset, model_version; string dataset = "", hyperparameters = "", model_version = "";
json hyperparameters;
int samples{ 0 }, features{ 0 }, classes{ 0 }; int samples{ 0 }, features{ 0 }, classes{ 0 };
double score_train{ 0 }, score_test{ 0 }, score_train_std{ 0 }, score_test_std{ 0 }, train_time{ 0 }, train_time_std{ 0 }, test_time{ 0 }, test_time_std{ 0 }; double score_train{ 0 }, score_test = 0, score_train_std = 0, score_test_std = 0, train_time = 0, train_time_std = 0, test_time = 0, test_time_std = 0;
vector<double> scores_train{}, scores_test{}, times_train{}, times_test{};
float nodes{ 0 }, leaves{ 0 }, depth{ 0 }; float nodes{ 0 }, leaves{ 0 }, depth{ 0 };
std::vector<double> scores_train, scores_test, times_train, times_test;
public: public:
Result() = default; Result() = default;
Result& setDataset(const std::string& dataset) { this->dataset = dataset; return *this; } Result& setDataset(const string& dataset) { this->dataset = dataset; return *this; }
Result& setHyperparameters(const json& hyperparameters) { this->hyperparameters = hyperparameters; return *this; } Result& setHyperparameters(const string& hyperparameters) { this->hyperparameters = hyperparameters; return *this; }
Result& setSamples(int samples) { this->samples = samples; return *this; } Result& setSamples(const int samples) { this->samples = samples; return *this; }
Result& setFeatures(int features) { this->features = features; return *this; } Result& setFeatures(const int features) { this->features = features; return *this; }
Result& setClasses(int classes) { this->classes = classes; return *this; } Result& setClasses(const int classes) { this->classes = classes; return *this; }
Result& setScoreTrain(double score) { this->score_train = score; return *this; } Result& setScoreTrain(const double score) { this->score_train = score; return *this; }
Result& setScoreTest(double score) { this->score_test = score; return *this; } Result& setScoreTest(const double score) { this->score_test = score; return *this; }
Result& setScoreTrainStd(double score_std) { this->score_train_std = score_std; return *this; } Result& setScoreTrainStd(const double score_std) { this->score_train_std = score_std; return *this; }
Result& setScoreTestStd(double score_std) { this->score_test_std = score_std; return *this; } Result& setScoreTestStd(const double score_std) { this->score_test_std = score_std; return *this; }
Result& setTrainTime(double train_time) { this->train_time = train_time; return *this; } Result& setTrainTime(const double train_time) { this->train_time = train_time; return *this; }
Result& setTrainTimeStd(double train_time_std) { this->train_time_std = train_time_std; return *this; } Result& setTrainTimeStd(const double train_time_std) { this->train_time_std = train_time_std; return *this; }
Result& setTestTime(double test_time) { this->test_time = test_time; return *this; } Result& setTestTime(const double test_time) { this->test_time = test_time; return *this; }
Result& setTestTimeStd(double test_time_std) { this->test_time_std = test_time_std; return *this; } Result& setTestTimeStd(const double test_time_std) { this->test_time_std = test_time_std; return *this; }
Result& setNodes(float nodes) { this->nodes = nodes; return *this; } Result& setNodes(const float nodes) { this->nodes = nodes; return *this; }
Result& setLeaves(float leaves) { this->leaves = leaves; return *this; } Result& setLeaves(const float leaves) { this->leaves = leaves; return *this; }
Result& setDepth(float depth) { this->depth = depth; return *this; } Result& setDepth(const float depth) { this->depth = depth; return *this; }
Result& addScoreTrain(double score) { scores_train.push_back(score); return *this; } Result& setModelVersion(const string& model_version) { this->model_version = model_version; return *this; }
Result& addScoreTest(double score) { scores_test.push_back(score); return *this; } Result& addScoreTrain(const double score) { scores_train.push_back(score); return *this; }
Result& addTimeTrain(double time) { times_train.push_back(time); return *this; } Result& addScoreTest(const double score) { scores_test.push_back(score); return *this; }
Result& addTimeTest(double time) { times_test.push_back(time); return *this; } Result& addTimeTrain(const double time) { times_train.push_back(time); return *this; }
Result& addTimeTest(const double time) { times_test.push_back(time); return *this; }
const float get_score_train() const { return score_train; } const float get_score_train() const { return score_train; }
float get_score_test() { return score_test; } float get_score_test() { return score_test; }
const std::string& getDataset() const { return dataset; } const string& getDataset() const { return dataset; }
const json& getHyperparameters() const { return hyperparameters; } const string& getHyperparameters() const { return hyperparameters; }
const int getSamples() const { return samples; } const int getSamples() const { return samples; }
const int getFeatures() const { return features; } const int getFeatures() const { return features; }
const int getClasses() const { return classes; } const int getClasses() const { return classes; }
@@ -61,43 +70,39 @@ namespace platform {
const float getNodes() const { return nodes; } const float getNodes() const { return nodes; }
const float getLeaves() const { return leaves; } const float getLeaves() const { return leaves; }
const float getDepth() const { return depth; } const float getDepth() const { return depth; }
const std::vector<double>& getScoresTrain() const { return scores_train; } const vector<double>& getScoresTrain() const { return scores_train; }
const std::vector<double>& getScoresTest() const { return scores_test; } const vector<double>& getScoresTest() const { return scores_test; }
const std::vector<double>& getTimesTrain() const { return times_train; } const vector<double>& getTimesTrain() const { return times_train; }
const std::vector<double>& getTimesTest() const { return times_test; } const vector<double>& getTimesTest() const { return times_test; }
const string& getModelVersion() const { return model_version; }
}; };
class Experiment { class Experiment {
private:
string title{""}, model{""}, platform{""}, score_name{""}, model_version{""}, language_version{""}, language{""};
bool discretized{false}, stratified{false};
vector<Result> results;
vector<int> random_seeds;
int nfolds{0};
float duration{0};
json build_json();
public: public:
Experiment() = default; Experiment() = default;
Experiment& setTitle(const std::string& title) { this->title = title; return *this; } Experiment& setTitle(const string& title) { this->title = title; return *this; }
Experiment& setModel(const std::string& model) { this->model = model; return *this; } Experiment& setModel(const string& model) { this->model = model; return *this; }
Experiment& setPlatform(const std::string& platform) { this->platform = platform; return *this; } Experiment& setPlatform(const string& platform) { this->platform = platform; return *this; }
Experiment& setScoreName(const std::string& score_name) { this->score_name = score_name; return *this; } Experiment& setScoreName(const string& score_name) { this->score_name = score_name; return *this; }
Experiment& setModelVersion(const std::string& model_version) { this->model_version = model_version; return *this; } Experiment& setModelVersion(const string& model_version) { this->model_version = model_version; return *this; }
Experiment& setLanguage(const std::string& language) { this->language = language; return *this; } Experiment& setLanguage(const string& language) { this->language = language; return *this; }
Experiment& setLanguageVersion(const std::string& language_version) { this->language_version = language_version; return *this; } Experiment& setLanguageVersion(const string& language_version) { this->language_version = language_version; return *this; }
Experiment& setDiscretized(bool discretized) { this->discretized = discretized; return *this; } Experiment& setDiscretized(const bool discretized) { this->discretized = discretized; return *this; }
Experiment& setStratified(bool stratified) { this->stratified = stratified; return *this; } Experiment& setStratified(const bool stratified) { this->stratified = stratified; return *this; }
Experiment& setNFolds(int nfolds) { this->nfolds = nfolds; return *this; } Experiment& setNFolds(const int nfolds) { this->nfolds = nfolds; return *this; }
Experiment& addResult(Result result) { results.push_back(result); return *this; } Experiment& addResult(Result result) { results.push_back(result); return *this; }
Experiment& addRandomSeed(int randomSeed) { randomSeeds.push_back(randomSeed); return *this; } Experiment& addRandomSeed(const int random_seed) { random_seeds.push_back(random_seed); return *this; }
Experiment& setDuration(float duration) { this->duration = duration; return *this; } Experiment& setDuration(const float duration) { this->duration = duration; return *this; }
Experiment& setHyperparameters(const HyperParameters& hyperparameters_) { this->hyperparameters = hyperparameters_; return *this; } string get_file_name();
std::string get_file_name(); void save(const string& path);
void save(const std::string& path);
void cross_validation(const std::string& fileName, bool quiet);
void go(std::vector<std::string> filesToProcess, bool quiet);
void show(); void show();
void report();
private:
std::string title, model, platform, score_name, model_version, language_version, language;
bool discretized{ false }, stratified{ false };
std::vector<Result> results;
std::vector<int> randomSeeds;
HyperParameters hyperparameters;
int nfolds{ 0 };
float duration{ 0 };
json build_json();
}; };
} }
#endif #endif

View File

@@ -1,104 +1,95 @@
#include "Folding.h" #include "Folding.h"
#include <algorithm> #include <algorithm>
#include <map> #include <map>
namespace platform { Fold::Fold(int k, int n, int seed) : k(k), n(n), seed(seed)
Fold::Fold(int k, int n, int seed) : k(k), n(n), seed(seed) {
{ random_device rd;
std::random_device rd; random_seed = default_random_engine(seed == -1 ? rd() : seed);
random_seed = std::default_random_engine(seed == -1 ? rd() : seed); srand(seed == -1 ? time(0) : seed);
std::srand(seed == -1 ? time(0) : seed); }
KFold::KFold(int k, int n, int seed) : Fold(k, n, seed), indices(vector<int>())
{
iota(begin(indices), end(indices), 0); // fill with 0, 1, ..., n - 1
shuffle(indices.begin(), indices.end(), random_seed);
}
pair<vector<int>, vector<int>> KFold::getFold(int nFold)
{
if (nFold >= k || nFold < 0) {
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
} }
KFold::KFold(int k, int n, int seed) : Fold(k, n, seed), indices(std::vector<int>(n)) int nTest = n / k;
{ auto train = vector<int>();
std::iota(begin(indices), end(indices), 0); // fill with 0, 1, ..., n - 1 auto test = vector<int>();
for (int i = 0; i < n; i++) {
if (i >= nTest * nFold && i < nTest * (nFold + 1)) {
test.push_back(indices[i]);
} else {
train.push_back(indices[i]);
}
}
return { train, test };
}
StratifiedKFold::StratifiedKFold(int k, torch::Tensor& y, int seed) : Fold(k, y.numel(), seed)
{
n = y.numel();
this->y = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + n);
build();
}
StratifiedKFold::StratifiedKFold(int k, const vector<int>& y, int seed)
: Fold(k, y.size(), seed)
{
this->y = y;
n = y.size();
build();
}
void StratifiedKFold::build()
{
stratified_indices = vector<vector<int>>(k);
int fold_size = n / k;
// Compute class counts and indices
auto class_indices = map<int, vector<int>>();
vector<int> class_counts(*max_element(y.begin(), y.end()) + 1, 0);
for (auto i = 0; i < n; ++i) {
class_counts[y[i]]++;
class_indices[y[i]].push_back(i);
}
// Shuffle class indices
for (auto& [cls, indices] : class_indices) {
shuffle(indices.begin(), indices.end(), random_seed); shuffle(indices.begin(), indices.end(), random_seed);
} }
std::pair<std::vector<int>, std::vector<int>> KFold::getFold(int nFold) // Assign indices to folds
{ for (auto label = 0; label < class_counts.size(); ++label) {
if (nFold >= k || nFold < 0) { auto num_samples_to_take = class_counts[label] / k;
throw std::out_of_range("nFold (" + std::to_string(nFold) + ") must be less than k (" + std::to_string(k) + ")"); if (num_samples_to_take == 0)
continue;
auto remainder_samples_to_take = class_counts[label] % k;
for (auto fold = 0; fold < k; ++fold) {
auto it = next(class_indices[label].begin(), num_samples_to_take);
move(class_indices[label].begin(), it, back_inserter(stratified_indices[fold])); // ##
class_indices[label].erase(class_indices[label].begin(), it);
} }
int nTest = n / k; while (remainder_samples_to_take > 0) {
auto train = std::vector<int>(); int fold = (rand() % static_cast<int>(k));
auto test = std::vector<int>(); if (stratified_indices[fold].size() == fold_size + 1) {
for (int i = 0; i < n; i++) {
if (i >= nTest * nFold && i < nTest * (nFold + 1)) {
test.push_back(indices[i]);
} else {
train.push_back(indices[i]);
}
}
return { train, test };
}
StratifiedKFold::StratifiedKFold(int k, torch::Tensor& y, int seed) : Fold(k, y.numel(), seed)
{
n = y.numel();
this->y = std::vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + n);
build();
}
StratifiedKFold::StratifiedKFold(int k, const std::vector<int>& y, int seed)
: Fold(k, y.size(), seed)
{
this->y = y;
n = y.size();
build();
}
void StratifiedKFold::build()
{
stratified_indices = std::vector<std::vector<int>>(k);
int fold_size = n / k;
// Compute class counts and indices
auto class_indices = std::map<int, std::vector<int>>();
std::vector<int> class_counts(*max_element(y.begin(), y.end()) + 1, 0);
for (auto i = 0; i < n; ++i) {
class_counts[y[i]]++;
class_indices[y[i]].push_back(i);
}
// Shuffle class indices
for (auto& [cls, indices] : class_indices) {
shuffle(indices.begin(), indices.end(), random_seed);
}
// Assign indices to folds
for (auto label = 0; label < class_counts.size(); ++label) {
auto num_samples_to_take = class_counts.at(label) / k;
if (num_samples_to_take == 0) {
std::cerr << "Warning! The number of samples in class " << label << " (" << class_counts.at(label)
<< ") is less than the number of folds (" << k << ")." << std::endl;
faulty = true;
continue; continue;
} }
auto remainder_samples_to_take = class_counts[label] % k; auto it = next(class_indices[label].begin(), 1);
for (auto fold = 0; fold < k; ++fold) { stratified_indices[fold].push_back(*class_indices[label].begin());
auto it = next(class_indices[label].begin(), num_samples_to_take); class_indices[label].erase(class_indices[label].begin(), it);
move(class_indices[label].begin(), it, back_inserter(stratified_indices[fold])); // ## remainder_samples_to_take--;
class_indices[label].erase(class_indices[label].begin(), it);
}
auto chosen = std::vector<bool>(k, false);
while (remainder_samples_to_take > 0) {
int fold = (rand() % static_cast<int>(k));
if (chosen.at(fold)) {
continue;
}
chosen[fold] = true;
auto it = next(class_indices[label].begin(), 1);
stratified_indices[fold].push_back(*class_indices[label].begin());
class_indices[label].erase(class_indices[label].begin(), it);
remainder_samples_to_take--;
}
} }
} }
std::pair<std::vector<int>, std::vector<int>> StratifiedKFold::getFold(int nFold) }
{ pair<vector<int>, vector<int>> StratifiedKFold::getFold(int nFold)
if (nFold >= k || nFold < 0) { {
throw std::out_of_range("nFold (" + std::to_string(nFold) + ") must be less than k (" + std::to_string(k) + ")"); if (nFold >= k || nFold < 0) {
} throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
std::vector<int> test_indices = stratified_indices[nFold];
std::vector<int> train_indices;
for (int i = 0; i < k; ++i) {
if (i == nFold) continue;
train_indices.insert(train_indices.end(), stratified_indices[i].begin(), stratified_indices[i].end());
}
return { train_indices, test_indices };
} }
vector<int> test_indices = stratified_indices[nFold];
vector<int> train_indices;
for (int i = 0; i < k; ++i) {
if (i == nFold) continue;
train_indices.insert(train_indices.end(), stratified_indices[i].begin(), stratified_indices[i].end());
}
return { train_indices, test_indices };
} }

View File

@@ -3,37 +3,35 @@
#include <torch/torch.h> #include <torch/torch.h>
#include <vector> #include <vector>
#include <random> #include <random>
namespace platform { using namespace std;
class Fold {
protected: class Fold {
int k; protected:
int n; int k;
int seed; int n;
std::default_random_engine random_seed; int seed;
public: default_random_engine random_seed;
Fold(int k, int n, int seed = -1); public:
virtual std::pair<std::vector<int>, std::vector<int>> getFold(int nFold) = 0; Fold(int k, int n, int seed = -1);
virtual ~Fold() = default; virtual pair<vector<int>, vector<int>> getFold(int nFold) = 0;
int getNumberOfFolds() { return k; } virtual ~Fold() = default;
}; int getNumberOfFolds() { return k; }
class KFold : public Fold { };
private: class KFold : public Fold {
std::vector<int> indices; private:
public: vector<int> indices;
KFold(int k, int n, int seed = -1); public:
std::pair<std::vector<int>, std::vector<int>> getFold(int nFold) override; KFold(int k, int n, int seed = -1);
}; pair<vector<int>, vector<int>> getFold(int nFold) override;
class StratifiedKFold : public Fold { };
private: class StratifiedKFold : public Fold {
std::vector<int> y; private:
std::vector<std::vector<int>> stratified_indices; vector<int> y;
void build(); vector<vector<int>> stratified_indices;
bool faulty = false; // Only true if the number of samples of any class is less than the number of folds. void build();
public: public:
StratifiedKFold(int k, const std::vector<int>& y, int seed = -1); StratifiedKFold(int k, const vector<int>& y, int seed = -1);
StratifiedKFold(int k, torch::Tensor& y, int seed = -1); StratifiedKFold(int k, torch::Tensor& y, int seed = -1);
std::pair<std::vector<int>, std::vector<int>> getFold(int nFold) override; pair<vector<int>, vector<int>> getFold(int nFold) override;
bool isFaulty() { return faulty; } };
};
}
#endif #endif

View File

@@ -1,75 +0,0 @@
#include "GridData.h"
#include <fstream>
namespace platform {
GridData::GridData(const std::string& fileName)
{
json grid_file;
std::ifstream resultData(fileName);
if (resultData.is_open()) {
grid_file = json::parse(resultData);
} else {
throw std::invalid_argument("Unable to open input file. [" + fileName + "]");
}
for (const auto& item : grid_file.items()) {
auto key = item.key();
auto value = item.value();
grid[key] = value;
}
}
int GridData::computeNumCombinations(const json& line)
{
int numCombinations = 1;
for (const auto& item : line.items()) {
numCombinations *= item.value().size();
}
return numCombinations;
}
int GridData::getNumCombinations(const std::string& dataset)
{
int numCombinations = 0;
auto selected = decide_dataset(dataset);
for (const auto& line : grid.at(selected)) {
numCombinations += computeNumCombinations(line);
}
return numCombinations;
}
json GridData::generateCombinations(json::iterator index, const json::iterator last, std::vector<json>& output, json currentCombination)
{
if (index == last) {
// If we reached the end of input, store the current combination
output.push_back(currentCombination);
return currentCombination;
}
const auto& key = index.key();
const auto& values = index.value();
for (const auto& value : values) {
auto combination = currentCombination;
combination[key] = value;
json::iterator nextIndex = index;
generateCombinations(++nextIndex, last, output, combination);
}
return currentCombination;
}
std::vector<json> GridData::getGrid(const std::string& dataset)
{
auto selected = decide_dataset(dataset);
auto result = std::vector<json>();
for (json line : grid.at(selected)) {
generateCombinations(line.begin(), line.end(), result, json({}));
}
return result;
}
json& GridData::getInputGrid(const std::string& dataset)
{
auto selected = decide_dataset(dataset);
return grid.at(selected);
}
std::string GridData::decide_dataset(const std::string& dataset)
{
if (grid.find(dataset) != grid.end())
return dataset;
return ALL_DATASETS;
}
} /* namespace platform */

View File

@@ -1,26 +0,0 @@
#ifndef GRIDDATA_H
#define GRIDDATA_H
#include <string>
#include <vector>
#include <map>
#include <nlohmann/json.hpp>
namespace platform {
using json = nlohmann::json;
const std::string ALL_DATASETS = "all";
class GridData {
public:
explicit GridData(const std::string& fileName);
~GridData() = default;
std::vector<json> getGrid(const std::string& dataset = ALL_DATASETS);
int getNumCombinations(const std::string& dataset = ALL_DATASETS);
json& getInputGrid(const std::string& dataset = ALL_DATASETS);
std::map<std::string, json>& getGridFile() { return grid; }
private:
std::string decide_dataset(const std::string& dataset);
json generateCombinations(json::iterator index, const json::iterator last, std::vector<json>& output, json currentCombination);
int computeNumCombinations(const json& line);
std::map<std::string, json> grid;
};
} /* namespace platform */
#endif /* GRIDDATA_H */

View File

@@ -1,599 +0,0 @@
#include <iostream>
#include <torch/torch.h>
#include "GridSearch.h"
#include "Models.h"
#include "Paths.h"
#include "Folding.h"
#include "Colors.h"
namespace platform {
std::string get_date()
{
time_t rawtime;
tm* timeinfo;
time(&rawtime);
timeinfo = std::localtime(&rawtime);
std::ostringstream oss;
oss << std::put_time(timeinfo, "%Y-%m-%d");
return oss.str();
}
std::string get_time()
{
time_t rawtime;
tm* timeinfo;
time(&rawtime);
timeinfo = std::localtime(&rawtime);
std::ostringstream oss;
oss << std::put_time(timeinfo, "%H:%M:%S");
return oss.str();
}
GridSearch::GridSearch(struct ConfigGrid& config) : config(config)
{
}
json GridSearch::getResults()
{
std::ifstream file(Paths::grid_output(config.model));
if (file.is_open()) {
return json::parse(file);
}
return json();
}
vector<std::string> GridSearch::processDatasets(Datasets& datasets)
{
// Load datasets
auto datasets_names = datasets.getNames();
if (config.continue_from != NO_CONTINUE()) {
// Continue previous execution:
if (std::find(datasets_names.begin(), datasets_names.end(), config.continue_from) == datasets_names.end()) {
throw std::invalid_argument("Dataset " + config.continue_from + " not found");
}
// Remove datasets already processed
vector< string >::iterator it = datasets_names.begin();
while (it != datasets_names.end()) {
if (*it != config.continue_from) {
it = datasets_names.erase(it);
} else {
if (config.only)
++it;
else
break;
}
}
}
// Exclude datasets
for (const auto& name : config.excluded) {
auto dataset = name.get<std::string>();
auto it = std::find(datasets_names.begin(), datasets_names.end(), dataset);
if (it == datasets_names.end()) {
throw std::invalid_argument("Dataset " + dataset + " already excluded or doesn't exist!");
}
datasets_names.erase(it);
}
return datasets_names;
}
void showProgressComb(const int num, const int n_folds, const int total, const std::string& color)
{
int spaces = int(log(total) / log(10)) + 1;
int magic = n_folds * 3 + 22 + 2 * spaces;
std::string prefix = num == 1 ? "" : string(magic, '\b') + string(magic + 1, ' ') + string(magic + 1, '\b');
std::cout << prefix << color << "(" << setw(spaces) << num << "/" << setw(spaces) << total << ") " << Colors::RESET() << flush;
}
void showProgressFold(int fold, const std::string& color, const std::string& phase)
{
std::string prefix = phase == "a" ? "" : "\b\b\b\b";
std::cout << prefix << color << fold << Colors::RESET() << "(" << color << phase << Colors::RESET() << ")" << flush;
}
std::string getColor(bayesnet::status_t status)
{
switch (status) {
case bayesnet::NORMAL:
return Colors::GREEN();
case bayesnet::WARNING:
return Colors::YELLOW();
case bayesnet::ERROR:
return Colors::RED();
default:
return Colors::RESET();
}
}
json GridSearch::build_tasks_mpi()
{
auto tasks = json::array();
auto grid = GridData(Paths::grid_input(config.model));
auto datasets = Datasets(false, Paths::datasets());
auto datasets_names = processDatasets(datasets);
for (const auto& dataset : datasets_names) {
for (const auto& seed : config.seeds) {
auto combinations = grid.getGrid(dataset);
for (int n_fold = 0; n_fold < config.n_folds; n_fold++) {
json task = {
{ "dataset", dataset },
{ "seed", seed },
{ "fold", n_fold}
};
tasks.push_back(task);
}
}
}
// It's important to shuffle the array so heavy datasets are spread across the Workers
std::mt19937 g{ 271 }; // Use fixed seed to obtain the same shuffle
std::shuffle(tasks.begin(), tasks.end(), g);
std::cout << "Tasks size: " << tasks.size() << std::endl;
std::cout << "|";
for (int i = 0; i < tasks.size(); ++i) {
std::cout << (i + 1) % 10;
}
std::cout << "|" << std::endl << "|" << std::flush;
return tasks;
}
std::pair<int, int> GridSearch::part_range_mpi(int n_tasks, int nprocs, int rank)
{
int assigned = 0;
int remainder = n_tasks % nprocs;
int start = 0;
if (rank < remainder) {
assigned = n_tasks / nprocs + 1;
} else {
assigned = n_tasks / nprocs;
start = remainder;
}
start += rank * assigned;
int end = start + assigned;
if (rank == nprocs - 1) {
end = n_tasks;
}
return { start, end };
}
std::string get_color_rank(int rank)
{
auto colors = { Colors::RED(), Colors::GREEN(), Colors::BLUE(), Colors::MAGENTA(), Colors::CYAN() };
return *(colors.begin() + rank % colors.size());
}
void GridSearch::process_task_mpi(struct ConfigMPI& config_mpi, json& task, Datasets& datasets, json& results)
{
// Process the task and store the result in the results json
Timer timer;
timer.start();
auto grid = GridData(Paths::grid_input(config.model));
auto dataset = task["dataset"].get<std::string>();
auto seed = task["seed"].get<int>();
auto n_fold = task["fold"].get<int>();
// Generate the hyperparamters combinations
auto combinations = grid.getGrid(dataset);
auto [X, y] = datasets.getTensors(dataset);
auto states = datasets.getStates(dataset);
auto features = datasets.getFeatures(dataset);
auto className = datasets.getClassName(dataset);
//
// Start working on task
//
Fold* fold;
if (config.stratified)
fold = new StratifiedKFold(config.n_folds, y, seed);
else
fold = new KFold(config.n_folds, y.size(0), seed);
auto [train, test] = fold->getFold(n_fold);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
auto X_train = X.index({ "...", train_t });
auto y_train = y.index({ train_t });
auto X_test = X.index({ "...", test_t });
auto y_test = y.index({ test_t });
auto num = 0;
double best_fold_score = 0.0;
json best_fold_hyper;
for (const auto& hyperparam_line : combinations) {
auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line);
Fold* nested_fold;
if (config.stratified)
nested_fold = new StratifiedKFold(config.nested, y_train, seed);
else
nested_fold = new KFold(config.nested, y_train.size(0), seed);
double score = 0.0;
for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) {
// Nested level fold
auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold);
auto train_nested_t = torch::tensor(train_nested);
auto test_nested_t = torch::tensor(test_nested);
auto X_nested_train = X_train.index({ "...", train_nested_t });
auto y_nested_train = y_train.index({ train_nested_t });
auto X_nested_test = X_train.index({ "...", test_nested_t });
auto y_nested_test = y_train.index({ test_nested_t });
// Build Classifier with selected hyperparameters
auto clf = Models::instance()->create(config.model);
auto valid = clf->getValidHyperparameters();
hyperparameters.check(valid, dataset);
clf->setHyperparameters(hyperparameters.get(dataset));
// Train model
clf->fit(X_nested_train, y_nested_train, features, className, states);
// Test model
score += clf->score(X_nested_test, y_nested_test);
}
delete nested_fold;
score /= config.nested;
if (score > best_fold_score) {
best_fold_score = score;
best_fold_hyper = hyperparam_line;
}
}
delete fold;
// Build Classifier with the best hyperparameters to obtain the best score
auto hyperparameters = platform::HyperParameters(datasets.getNames(), best_fold_hyper);
auto clf = Models::instance()->create(config.model);
auto valid = clf->getValidHyperparameters();
hyperparameters.check(valid, dataset);
clf->setHyperparameters(best_fold_hyper);
clf->fit(X_train, y_train, features, className, states);
best_fold_score = clf->score(X_test, y_test);
// Save results
results[dataset][std::to_string(n_fold)]["score"] = best_fold_score;
results[dataset][std::to_string(n_fold)]["hyperparameters"] = best_fold_hyper;
results[dataset][std::to_string(n_fold)]["seed"] = seed;
results[dataset][std::to_string(n_fold)]["duration"] = timer.getDuration();
std::cout << get_color_rank(config_mpi.rank) << "*" << std::flush;
}
void GridSearch::go_mpi(struct ConfigMPI& config_mpi)
{
/*
* Each task is a json object with the following structure:
* {
* "dataset": "dataset_name",
* "seed": # of seed to use,
* "model": "model_name",
* "Fold": # of fold to process
* }
*
* The overall process consists in these steps:
* 1. Manager will broadcast the tasks to all the processes
* 1.1 Broadcast the number of tasks
* 1.2 Broadcast the length of the following string
* 1.2 Broadcast the tasks as a char* string
* 2. Workers will receive the tasks and start the process
* 2.1 A method will tell each worker the range of tasks to process
* 2.2 Each worker will process the tasks and generate the best score for each task
* 3. Manager gather the scores from all the workers and find out the best hyperparameters for each dataset
* 3.1 Obtain the maximum size of the results message of all the workers
* 3.2 Gather all the results from the workers into the manager
* 3.3 Compile the results from all the workers
* 3.4 Filter the best hyperparameters for each dataset
*/
char* msg;
int tasks_size;
if (config_mpi.rank == config_mpi.manager) {
timer.start();
auto tasks = build_tasks_mpi();
auto tasks_str = tasks.dump();
tasks_size = tasks_str.size();
msg = new char[tasks_size + 1];
strcpy(msg, tasks_str.c_str());
}
//
// 1. Manager will broadcast the tasks to all the processes
//
MPI_Bcast(&tasks_size, 1, MPI_INT, config_mpi.manager, MPI_COMM_WORLD);
if (config_mpi.rank != config_mpi.manager) {
msg = new char[tasks_size + 1];
}
MPI_Bcast(msg, tasks_size + 1, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD);
json tasks = json::parse(msg);
delete[] msg;
//
// 2. All Workers will receive the tasks and start the process
//
int num_tasks = tasks.size();
// 2.1 A method will tell each worker the range of tasks to process
auto [start, end] = part_range_mpi(num_tasks, config_mpi.n_procs, config_mpi.rank);
// 2.2 Each worker will process the tasks and return the best scores obtained
auto datasets = Datasets(config.discretize, Paths::datasets());
json results;
for (int i = start; i < end; ++i) {
// Process task
process_task_mpi(config_mpi, tasks[i], datasets, results);
}
int size = results.dump().size() + 1;
int max_size = 0;
//
// 3. Manager gather the scores from all the workers and find out the best hyperparameters for each dataset
//
//3.1 Obtain the maximum size of the results message of all the workers
MPI_Allreduce(&size, &max_size, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
// Assign the memory to the message and initialize it to 0s
char* total = NULL;
msg = new char[max_size];
strncpy(msg, results.dump().c_str(), size);
if (config_mpi.rank == config_mpi.manager) {
total = new char[max_size * config_mpi.n_procs];
}
// 3.2 Gather all the results from the workers into the manager
MPI_Gather(msg, max_size, MPI_CHAR, total, max_size, MPI_CHAR, config_mpi.manager, MPI_COMM_WORLD);
delete[] msg;
if (config_mpi.rank == config_mpi.manager) {
std::cout << Colors::RESET() << "|" << std::endl;
json total_results;
json best_results;
// 3.3 Compile the results from all the workers
for (int i = 0; i < config_mpi.n_procs; ++i) {
json partial_results = json::parse(total + i * max_size);
for (auto& [dataset, folds] : partial_results.items()) {
for (auto& [fold, result] : folds.items()) {
total_results[dataset][fold] = result;
}
}
}
delete[] total;
// 3.4 Filter the best hyperparameters for each dataset
auto grid = GridData(Paths::grid_input(config.model));
for (auto& [dataset, folds] : total_results.items()) {
double best_score = 0.0;
double duration = 0.0;
json best_hyper;
for (auto& [fold, result] : folds.items()) {
duration += result["duration"].get<double>();
if (result["score"] > best_score) {
best_score = result["score"];
best_hyper = result["hyperparameters"];
}
}
auto timer = Timer();
json result = {
{ "score", best_score },
{ "hyperparameters", best_hyper },
{ "date", get_date() + " " + get_time() },
{ "grid", grid.getInputGrid(dataset) },
{ "duration", timer.translate2String(duration) }
};
best_results[dataset] = result;
}
save(best_results);
}
}
void GridSearch::go()
{
timer.start();
auto grid_type = config.nested == 0 ? "Single" : "Nested";
auto datasets = Datasets(config.discretize, Paths::datasets());
auto datasets_names = processDatasets(datasets);
json results = initializeResults();
std::cout << "***************** Starting " << grid_type << " Gridsearch *****************" << std::endl;
std::cout << "input file=" << Paths::grid_input(config.model) << std::endl;
auto grid = GridData(Paths::grid_input(config.model));
Timer timer_dataset;
double bestScore = 0;
json bestHyperparameters;
for (const auto& dataset : datasets_names) {
if (!config.quiet)
std::cout << "- " << setw(20) << left << dataset << " " << right << flush;
auto combinations = grid.getGrid(dataset);
timer_dataset.start();
if (config.nested == 0)
// for dataset // for hyperparameters // for seed // for fold
tie(bestScore, bestHyperparameters) = processFileSingle(dataset, datasets, combinations);
else
// for dataset // for seed // for fold // for hyperparameters // for nested fold
tie(bestScore, bestHyperparameters) = processFileNested(dataset, datasets, combinations);
if (!config.quiet) {
std::cout << "end." << " Score: " << Colors::IBLUE() << setw(9) << setprecision(7) << fixed
<< bestScore << Colors::BLUE() << " [" << bestHyperparameters.dump() << "]"
<< Colors::RESET() << ::endl;
}
json result = {
{ "score", bestScore },
{ "hyperparameters", bestHyperparameters },
{ "date", get_date() + " " + get_time() },
{ "grid", grid.getInputGrid(dataset) },
{ "duration", timer_dataset.getDurationString() }
};
results[dataset] = result;
// Save partial results
save(results);
}
// Save final results
save(results);
std::cout << "***************** Ending " << grid_type << " Gridsearch *******************" << std::endl;
}
pair<double, json> GridSearch::processFileSingle(std::string fileName, Datasets& datasets, vector<json>& combinations)
{
int num = 0;
double bestScore = 0.0;
json bestHyperparameters;
auto totalComb = combinations.size();
for (const auto& hyperparam_line : combinations) {
if (!config.quiet)
showProgressComb(++num, config.n_folds, totalComb, Colors::CYAN());
auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line);
// Get dataset
auto [X, y] = datasets.getTensors(fileName);
auto states = datasets.getStates(fileName);
auto features = datasets.getFeatures(fileName);
auto className = datasets.getClassName(fileName);
double totalScore = 0.0;
int numItems = 0;
for (const auto& seed : config.seeds) {
if (!config.quiet)
std::cout << "(" << seed << ") doing Fold: " << flush;
Fold* fold;
if (config.stratified)
fold = new StratifiedKFold(config.n_folds, y, seed);
else
fold = new KFold(config.n_folds, y.size(0), seed);
for (int nfold = 0; nfold < config.n_folds; nfold++) {
auto clf = Models::instance()->create(config.model);
auto valid = clf->getValidHyperparameters();
hyperparameters.check(valid, fileName);
clf->setHyperparameters(hyperparameters.get(fileName));
auto [train, test] = fold->getFold(nfold);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
auto X_train = X.index({ "...", train_t });
auto y_train = y.index({ train_t });
auto X_test = X.index({ "...", test_t });
auto y_test = y.index({ test_t });
// Train model
if (!config.quiet)
showProgressFold(nfold + 1, getColor(clf->getStatus()), "a");
clf->fit(X_train, y_train, features, className, states);
// Test model
if (!config.quiet)
showProgressFold(nfold + 1, getColor(clf->getStatus()), "b");
totalScore += clf->score(X_test, y_test);
numItems++;
if (!config.quiet)
std::cout << "\b\b\b, \b" << flush;
}
delete fold;
}
double score = numItems == 0 ? 0.0 : totalScore / numItems;
if (score > bestScore) {
bestScore = score;
bestHyperparameters = hyperparam_line;
}
}
return { bestScore, bestHyperparameters };
}
pair<double, json> GridSearch::processFileNested(std::string fileName, Datasets& datasets, vector<json>& combinations)
{
// Get dataset
auto [X, y] = datasets.getTensors(fileName);
auto states = datasets.getStates(fileName);
auto features = datasets.getFeatures(fileName);
auto className = datasets.getClassName(fileName);
int spcs_combinations = int(log(combinations.size()) / log(10)) + 1;
double goatScore = 0.0;
json goatHyperparameters;
// for dataset // for seed // for fold // for hyperparameters // for nested fold
for (const auto& seed : config.seeds) {
Fold* fold;
if (config.stratified)
fold = new StratifiedKFold(config.n_folds, y, seed);
else
fold = new KFold(config.n_folds, y.size(0), seed);
double bestScore = 0.0;
json bestHyperparameters;
std::cout << "(" << seed << ") doing Fold: " << flush;
for (int nfold = 0; nfold < config.n_folds; nfold++) {
if (!config.quiet)
std::cout << Colors::GREEN() << nfold + 1 << " " << flush;
// First level fold
auto [train, test] = fold->getFold(nfold);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
auto X_train = X.index({ "...", train_t });
auto y_train = y.index({ train_t });
auto X_test = X.index({ "...", test_t });
auto y_test = y.index({ test_t });
auto num = 0;
json result_fold;
double hypScore = 0.0;
double bestHypScore = 0.0;
json bestHypHyperparameters;
for (const auto& hyperparam_line : combinations) {
std::cout << "[" << setw(spcs_combinations) << ++num << "/" << setw(spcs_combinations)
<< combinations.size() << "] " << std::flush;
Fold* nested_fold;
if (config.stratified)
nested_fold = new StratifiedKFold(config.nested, y_train, seed);
else
nested_fold = new KFold(config.nested, y_train.size(0), seed);
for (int n_nested_fold = 0; n_nested_fold < config.nested; n_nested_fold++) {
// Nested level fold
auto [train_nested, test_nested] = nested_fold->getFold(n_nested_fold);
auto train_nested_t = torch::tensor(train_nested);
auto test_nested_t = torch::tensor(test_nested);
auto X_nexted_train = X_train.index({ "...", train_nested_t });
auto y_nested_train = y_train.index({ train_nested_t });
auto X_nested_test = X_train.index({ "...", test_nested_t });
auto y_nested_test = y_train.index({ test_nested_t });
// Build Classifier with selected hyperparameters
auto hyperparameters = platform::HyperParameters(datasets.getNames(), hyperparam_line);
auto clf = Models::instance()->create(config.model);
auto valid = clf->getValidHyperparameters();
hyperparameters.check(valid, fileName);
clf->setHyperparameters(hyperparameters.get(fileName));
// Train model
if (!config.quiet)
showProgressFold(n_nested_fold + 1, getColor(clf->getStatus()), "a");
clf->fit(X_nexted_train, y_nested_train, features, className, states);
// Test model
if (!config.quiet)
showProgressFold(n_nested_fold + 1, getColor(clf->getStatus()), "b");
hypScore += clf->score(X_nested_test, y_nested_test);
if (!config.quiet)
std::cout << "\b\b\b, \b" << flush;
}
int magic = 3 * config.nested + 2 * spcs_combinations + 4;
std::cout << string(magic, '\b') << string(magic, ' ') << string(magic, '\b') << flush;
delete nested_fold;
hypScore /= config.nested;
if (hypScore > bestHypScore) {
bestHypScore = hypScore;
bestHypHyperparameters = hyperparam_line;
}
}
// Build Classifier with selected hyperparameters
auto clf = Models::instance()->create(config.model);
clf->setHyperparameters(bestHypHyperparameters);
// Train model
if (!config.quiet)
showProgressFold(nfold + 1, getColor(clf->getStatus()), "a");
clf->fit(X_train, y_train, features, className, states);
// Test model
if (!config.quiet)
showProgressFold(nfold + 1, getColor(clf->getStatus()), "b");
double score = clf->score(X_test, y_test);
if (!config.quiet)
std::cout << string(2 * config.nested - 1, '\b') << "," << string(2 * config.nested, ' ') << string(2 * config.nested - 1, '\b') << flush;
if (score > bestScore) {
bestScore = score;
bestHyperparameters = bestHypHyperparameters;
}
}
if (bestScore > goatScore) {
goatScore = bestScore;
goatHyperparameters = bestHyperparameters;
}
delete fold;
}
return { goatScore, goatHyperparameters };
}
json GridSearch::initializeResults()
{
// Load previous results
json results;
if (config.continue_from != NO_CONTINUE()) {
if (!config.quiet)
std::cout << "* Loading previous results" << std::endl;
try {
std::ifstream file(Paths::grid_output(config.model));
if (file.is_open()) {
results = json::parse(file);
results = results["results"];
}
}
catch (const std::exception& e) {
std::cerr << "* There were no previous results" << std::endl;
std::cerr << "* Initizalizing new results" << std::endl;
results = json();
}
}
return results;
}
void GridSearch::save(json& results)
{
std::ofstream file(Paths::grid_output(config.model));
json output = {
{ "model", config.model },
{ "score", config.score },
{ "discretize", config.discretize },
{ "stratified", config.stratified },
{ "n_folds", config.n_folds },
{ "seeds", config.seeds },
{ "date", get_date() + " " + get_time()},
{ "nested", config.nested},
{ "platform", config.platform },
{ "duration", timer.getDurationString(true)},
{ "results", results }
};
file << output.dump(4);
}
} /* namespace platform */

View File

@@ -1,54 +0,0 @@
#ifndef GRIDSEARCH_H
#define GRIDSEARCH_H
#include <string>
#include <map>
#include <mpi.h>
#include <nlohmann/json.hpp>
#include "Datasets.h"
#include "HyperParameters.h"
#include "GridData.h"
#include "Timer.h"
namespace platform {
using json = nlohmann::json;
struct ConfigGrid {
std::string model;
std::string score;
std::string continue_from;
std::string platform;
bool quiet;
bool only; // used with continue_from to only compute that dataset
bool discretize;
bool stratified;
int nested;
int n_folds;
json excluded;
std::vector<int> seeds;
};
struct ConfigMPI {
int rank;
int n_procs;
int manager;
};
class GridSearch {
public:
explicit GridSearch(struct ConfigGrid& config);
void go();
void go_mpi(struct ConfigMPI& config_mpi);
~GridSearch() = default;
json getResults();
static inline std::string NO_CONTINUE() { return "NO_CONTINUE"; }
private:
void save(json& results);
json initializeResults();
vector<std::string> processDatasets(Datasets& datasets);
pair<double, json> processFileSingle(std::string fileName, Datasets& datasets, std::vector<json>& combinations);
pair<double, json> processFileNested(std::string fileName, Datasets& datasets, std::vector<json>& combinations);
struct ConfigGrid config;
pair<int, int> part_range_mpi(int n_tasks, int nprocs, int rank);
json build_tasks_mpi();
void process_task_mpi(struct ConfigMPI& config_mpi, json& task, Datasets& datasets, json& results);
Timer timer; // used to measure the time of the whole process
};
} /* namespace platform */
#endif /* GRIDSEARCH_H */

View File

@@ -1,55 +0,0 @@
#include "HyperParameters.h"
#include <fstream>
#include <sstream>
#include <iostream>
namespace platform {
HyperParameters::HyperParameters(const std::vector<std::string>& datasets, const json& hyperparameters_)
{
// Initialize all datasets with the given hyperparameters
for (const auto& item : datasets) {
hyperparameters[item] = hyperparameters_;
}
}
// https://www.techiedelight.com/implode-a-vector-of-strings-into-a-comma-separated-string-in-cpp/
std::string join(std::vector<std::string> const& strings, std::string delim)
{
std::stringstream ss;
std::copy(strings.begin(), strings.end(),
std::ostream_iterator<std::string>(ss, delim.c_str()));
return ss.str();
}
HyperParameters::HyperParameters(const std::vector<std::string>& datasets, const std::string& hyperparameters_file)
{
// Check if file exists
std::ifstream file(hyperparameters_file);
if (!file.is_open()) {
throw std::runtime_error("File " + hyperparameters_file + " not found");
}
// Check if file is a json
json input_hyperparameters = json::parse(file);
// Check if hyperparameters are valid
for (const auto& dataset : datasets) {
if (!input_hyperparameters.contains(dataset)) {
std::cerr << "*Warning: Dataset " << dataset << " not found in hyperparameters file" << " assuming default hyperparameters" << std::endl;
hyperparameters[dataset] = json({});
continue;
}
hyperparameters[dataset] = input_hyperparameters[dataset]["hyperparameters"].get<json>();
}
}
void HyperParameters::check(const std::vector<std::string>& valid, const std::string& fileName)
{
json result = hyperparameters.at(fileName);
for (const auto& item : result.items()) {
if (find(valid.begin(), valid.end(), item.key()) == valid.end()) {
throw std::invalid_argument("Hyperparameter " + item.key() + " is not valid. Passed Hyperparameters are: "
+ result.dump(4) + "\n Valid hyperparameters are: {" + join(valid, ",") + "}");
}
}
}
json HyperParameters::get(const std::string& fileName)
{
return hyperparameters.at(fileName);
}
} /* namespace platform */

View File

@@ -1,23 +0,0 @@
#ifndef HYPERPARAMETERS_H
#define HYPERPARAMETERS_H
#include <string>
#include <map>
#include <vector>
#include <nlohmann/json.hpp>
namespace platform {
using json = nlohmann::json;
class HyperParameters {
public:
HyperParameters() = default;
explicit HyperParameters(const std::vector<std::string>& datasets, const json& hyperparameters_);
explicit HyperParameters(const std::vector<std::string>& datasets, const std::string& hyperparameters_file);
~HyperParameters() = default;
bool notEmpty(const std::string& key) const { return !hyperparameters.at(key).empty(); }
void check(const std::vector<std::string>& valid, const std::string& fileName);
json get(const std::string& fileName);
private:
std::map<std::string, json> hyperparameters;
};
} /* namespace platform */
#endif /* HYPERPARAMETERS_H */

View File

@@ -1,213 +0,0 @@
#include "ManageResults.h"
#include "CommandParser.h"
#include <filesystem>
#include <tuple>
#include "Colors.h"
#include "CLocale.h"
#include "Paths.h"
#include "ReportConsole.h"
#include "ReportExcel.h"
namespace platform {
ManageResults::ManageResults(int numFiles, const std::string& model, const std::string& score, bool complete, bool partial, bool compare) :
numFiles{ numFiles }, complete{ complete }, partial{ partial }, compare{ compare }, results(Results(Paths::results(), model, score, complete, partial))
{
indexList = true;
openExcel = false;
workbook = NULL;
if (numFiles == 0) {
this->numFiles = results.size();
}
}
void ManageResults::doMenu()
{
if (results.empty()) {
std::cout << Colors::MAGENTA() << "No results found!" << Colors::RESET() << std::endl;
return;
}
results.sortDate();
list();
menu();
if (openExcel) {
workbook_close(workbook);
}
std::cout << Colors::RESET() << "Done!" << std::endl;
}
void ManageResults::list()
{
auto temp = ConfigLocale();
std::string suffix = numFiles != results.size() ? " of " + std::to_string(results.size()) : "";
std::stringstream oss;
oss << "Results on screen: " << numFiles << suffix;
std::cout << Colors::GREEN() << oss.str() << std::endl;
std::cout << std::string(oss.str().size(), '-') << std::endl;
if (complete) {
std::cout << Colors::MAGENTA() << "Only listing complete results" << std::endl;
}
if (partial) {
std::cout << Colors::MAGENTA() << "Only listing partial results" << std::endl;
}
auto i = 0;
int maxModel = results.maxModelSize();
std::cout << Colors::GREEN() << " # Date " << std::setw(maxModel) << std::left << "Model" << " Score Name Score C/P Duration Title" << std::endl;
std::cout << "=== ========== " << std::string(maxModel, '=') << " =========== =========== === ========= =============================================================" << std::endl;
bool odd = true;
for (auto& result : results) {
auto color = odd ? Colors::BLUE() : Colors::CYAN();
std::cout << color << std::setw(3) << std::fixed << std::right << i++ << " ";
std::cout << result.to_string(maxModel) << std::endl;
if (i == numFiles) {
break;
}
odd = !odd;
}
}
bool ManageResults::confirmAction(const std::string& intent, const std::string& fileName) const
{
std::string color;
if (intent == "delete") {
color = Colors::RED();
} else {
color = Colors::YELLOW();
}
std::string line;
bool finished = false;
while (!finished) {
std::cout << color << "Really want to " << intent << " " << fileName << "? (y/n): ";
getline(std::cin, line);
finished = line.size() == 1 && (tolower(line[0]) == 'y' || tolower(line[0] == 'n'));
}
if (tolower(line[0]) == 'y') {
return true;
}
std::cout << "Not done!" << std::endl;
return false;
}
void ManageResults::report(const int index, const bool excelReport)
{
std::cout << Colors::YELLOW() << "Reporting " << results.at(index).getFilename() << std::endl;
auto data = results.at(index).load();
if (excelReport) {
ReportExcel reporter(data, compare, workbook);
reporter.show();
openExcel = true;
workbook = reporter.getWorkbook();
std::cout << "Adding sheet to " << Paths::excel() + Paths::excelResults() << std::endl;
} else {
ReportConsole reporter(data, compare);
reporter.show();
}
}
void ManageResults::showIndex(const int index, const int idx)
{
// Show a dataset result inside a report
auto data = results.at(index).load();
std::cout << Colors::YELLOW() << "Showing " << results.at(index).getFilename() << std::endl;
ReportConsole reporter(data, compare, idx);
reporter.show();
}
void ManageResults::sortList()
{
std::cout << Colors::YELLOW() << "Choose sorting field (date='d', score='s', duration='u', model='m'): ";
std::string line;
char option;
getline(std::cin, line);
if (line.size() == 0)
return;
if (line.size() > 1) {
std::cout << "Invalid option" << std::endl;
return;
}
option = line[0];
switch (option) {
case 'd':
results.sortDate();
break;
case 's':
results.sortScore();
break;
case 'u':
results.sortDuration();
break;
case 'm':
results.sortModel();
break;
default:
std::cout << "Invalid option" << std::endl;
}
}
void ManageResults::menu()
{
char option;
int index, subIndex;
bool finished = false;
std::string filename;
// tuple<Option, digit, requires value>
std::vector<std::tuple<std::string, char, bool>> mainOptions = {
{"quit", 'q', false},
{"list", 'l', false},
{"delete", 'd', true},
{"hide", 'h', true},
{"sort", 's', false},
{"report", 'r', true},
{"excel", 'e', true}
};
std::vector<std::tuple<std::string, char, bool>> listOptions = {
{"report", 'r', true},
{"list", 'l', false},
{"quit", 'q', false}
};
auto parser = CommandParser();
while (!finished) {
if (indexList) {
std::tie(option, index) = parser.parse(Colors::GREEN(), mainOptions, 'r', numFiles - 1);
} else {
std::tie(option, subIndex) = parser.parse(Colors::MAGENTA(), listOptions, 'r', results.at(index).load()["results"].size() - 1);
}
switch (option) {
case 'q':
finished = true;
break;
case 'l':
list();
indexList = true;
break;
case 'd':
filename = results.at(index).getFilename();
if (!confirmAction("delete", filename))
break;
std::cout << "Deleting " << filename << std::endl;
results.deleteResult(index);
std::cout << "File: " + filename + " deleted!" << std::endl;
list();
break;
case 'h':
filename = results.at(index).getFilename();
if (!confirmAction("hide", filename))
break;
filename = results.at(index).getFilename();
std::cout << "Hiding " << filename << std::endl;
results.hideResult(index, Paths::hiddenResults());
std::cout << "File: " + filename + " hidden! (moved to " << Paths::hiddenResults() << ")" << std::endl;
list();
break;
case 's':
sortList();
list();
break;
case 'r':
if (indexList) {
report(index, false);
indexList = false;
} else {
showIndex(index, subIndex);
}
break;
case 'e':
report(index, true);
break;
}
}
}
} /* namespace platform */

View File

@@ -1,31 +0,0 @@
#ifndef MANAGE_RESULTS_H
#define MANAGE_RESULTS_H
#include "Results.h"
#include "xlsxwriter.h"
namespace platform {
class ManageResults {
public:
ManageResults(int numFiles, const std::string& model, const std::string& score, bool complete, bool partial, bool compare);
~ManageResults() = default;
void doMenu();
private:
void list();
bool confirmAction(const std::string& intent, const std::string& fileName) const;
void report(const int index, const bool excelReport);
void showIndex(const int index, const int idx);
void sortList();
void menu();
int numFiles;
bool indexList;
bool openExcel;
bool complete;
bool partial;
bool compare;
Results results;
lxw_workbook* workbook;
};
}
#endif /* MANAGE_RESULTS_H */

Some files were not shown because too many files have changed in this diff Show More