Compare commits

..

3 Commits

Author SHA1 Message Date
d8764db716 Fix linter warnings 2023-07-29 01:16:19 +02:00
8049df436c Add Models class 2023-07-28 12:11:52 +02:00
b420ad2bc2 Add dotenv and possible multiple seeds 2023-07-28 00:53:16 +02:00
82 changed files with 879 additions and 2553 deletions

View File

@@ -13,4 +13,5 @@ HeaderFilterRegex: 'src/*'
AnalyzeTemporaryDtors: false AnalyzeTemporaryDtors: false
WarningsAsErrors: '' WarningsAsErrors: ''
FormatStyle: file FormatStyle: file
FormatStyleOptions: ''
... ...

View File

@@ -1,31 +0,0 @@
compilation_database_dir: build
output_directory: puml
diagrams:
BayesNet:
type: class
glob:
- src/BayesNet/*.cc
- src/Platform/*.cc
using_namespace: bayesnet
include:
namespaces:
- bayesnet
- platform
plantuml:
after:
- "note left of {{ alias(\"MyProjectMain\") }}: Main class of myproject library."
sequence:
type: sequence
glob:
- src/Platform/main.cc
combine_free_functions_into_file_participants: true
using_namespace:
- std
- bayesnet
- platform
include:
paths:
- src/BayesNet
- src/Platform
start_from:
- function: main(int,const char **)

1
.gitignore vendored
View File

@@ -35,4 +35,3 @@ build/
*.dSYM/** *.dSYM/**
cmake-build*/** cmake-build*/**
.idea .idea
puml/**

3
.gitmodules vendored
View File

@@ -10,6 +10,3 @@
[submodule "lib/json"] [submodule "lib/json"]
path = lib/json path = lib/json
url = https://github.com/nlohmann/json.git url = https://github.com/nlohmann/json.git
[submodule "lib/openXLSX"]
path = lib/openXLSX
url = https://github.com/troldal/OpenXLSX.git

44
.vscode/launch.json vendored
View File

@@ -10,13 +10,12 @@
"-d", "-d",
"iris", "iris",
"-m", "-m",
"TANLd", "TAN",
"-s",
"271",
"-p", "-p",
"/Users/rmontanana/Code/discretizbench/datasets/", "../../data/",
"--tensors"
], ],
//"cwd": "${workspaceFolder}/build/sample/", "cwd": "${workspaceFolder}/build/sample/",
}, },
{ {
"type": "lldb", "type": "lldb",
@@ -25,36 +24,19 @@
"program": "${workspaceFolder}/build/src/Platform/main", "program": "${workspaceFolder}/build/src/Platform/main",
"args": [ "args": [
"-m", "-m",
"AODE", "TAN",
"-p", "-p",
"/home/rmontanana/Code/discretizbench/datasets", "/Users/rmontanana/Code/discretizbench/datasets",
"--discretize",
"--stratified", "--stratified",
"--title",
"Debug test",
"--seeds",
"1",
"-d", "-d",
"mfeat-morphological", "ionosphere"
"--discretize"
// "--hyperparameters",
// "{\"repeatSparent\": true, \"maxModels\": 12}"
], ],
"cwd": "/home/rmontanana/Code/discretizbench", "cwd": "${workspaceFolder}/build/src/Platform",
},
{
"type": "lldb",
"request": "launch",
"name": "manage",
"program": "${workspaceFolder}/build/src/Platform/manage",
"args": [
"-n",
"20"
],
"cwd": "/Users/rmontanana/Code/discretizbench",
},
{
"type": "lldb",
"request": "launch",
"name": "list",
"program": "${workspaceFolder}/build/src/Platform/list",
"args": [],
"cwd": "/Users/rmontanana/Code/discretizbench",
}, },
{ {
"name": "Build & debug active file", "name": "Build & debug active file",

23
.vscode/tasks.json vendored
View File

@@ -32,29 +32,6 @@
], ],
"group": "build", "group": "build",
"detail": "Task generated by Debugger." "detail": "Task generated by Debugger."
},
{
"type": "cppbuild",
"label": "C/C++: g++ build active file",
"command": "/usr/bin/g++",
"args": [
"-fdiagnostics-color=always",
"-g",
"${file}",
"-o",
"${fileDirname}/${fileBasenameNoExtension}"
],
"options": {
"cwd": "${fileDirname}"
},
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"detail": "Task generated by Debugger."
} }
] ]
} }

View File

@@ -1,20 +1,16 @@
cmake_minimum_required(VERSION 3.20) cmake_minimum_required(VERSION 3.20)
project(BayesNet project(BayesNet
VERSION 0.2.0 VERSION 0.1.0
DESCRIPTION "Bayesian Network and basic classifiers Library." DESCRIPTION "Bayesian Network and basic classifiers Library."
HOMEPAGE_URL "https://github.com/rmontanana/bayesnet" HOMEPAGE_URL "https://github.com/rmontanana/bayesnet"
LANGUAGES CXX LANGUAGES CXX
) )
if (CODE_COVERAGE AND NOT ENABLE_TESTING)
MESSAGE(FATAL_ERROR "Code coverage requires testing enabled")
endif (CODE_COVERAGE AND NOT ENABLE_TESTING)
find_package(Torch REQUIRED) find_package(Torch REQUIRED)
if (POLICY CMP0135) if (POLICY CMP0135)
cmake_policy(SET CMP0135 NEW) cmake_policy(SET CMP0135 NEW)
endif () endif ()
# Global CMake variables # Global CMake variables
@@ -28,33 +24,26 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
# Options # Options
# ------- # -------
option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF) option(ENABLE_CLANG_TIDY "Enable to add clang tidy." OFF)
option(ENABLE_TESTING "Unit testing build" OFF) option(ENABLE_TESTING "Unit testing build" ON)
option(CODE_COVERAGE "Collect coverage from test library" OFF) option(CODE_COVERAGE "Collect coverage from test library" ON)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
set(CMAKE_BUILD_TYPE "Debug")
# CMakes modules # CMakes modules
# -------------- # --------------
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH}) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH})
include(AddGitSubmodule) include(AddGitSubmodule)
if (CODE_COVERAGE) include(StaticAnalyzers) # clang-tidy
enable_testing() include(CodeCoverage)
include(CodeCoverage)
MESSAGE("Code coverage enabled")
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -O0")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
endif (CODE_COVERAGE)
if (ENABLE_CLANG_TIDY)
include(StaticAnalyzers) # clang-tidy
endif (ENABLE_CLANG_TIDY)
# External libraries - dependencies of BayesNet # External libraries - dependencies of BayesNet
# --------------------------------------------- # ---------------------------------------------
# include(FetchContent) # include(FetchContent)
add_git_submodule("lib/mdlp") add_git_submodule("lib/mdlp")
add_git_submodule("lib/catch2")
add_git_submodule("lib/argparse") add_git_submodule("lib/argparse")
add_git_submodule("lib/json") add_git_submodule("lib/json")
add_git_submodule("lib/openXLSX")
# Subdirectories # Subdirectories
# -------------- # --------------
@@ -70,11 +59,18 @@ file(GLOB Platform_SOURCES CONFIGURE_DEPENDS ${BayesNet_SOURCE_DIR}/src/Platform
# Testing # Testing
# ------- # -------
if (ENABLE_TESTING) if (ENABLE_TESTING)
MESSAGE("Testing enabled") MESSAGE("Testing enabled")
add_git_submodule("lib/catch2") enable_testing()
if (CODE_COVERAGE)
#include(CodeCoverage)
MESSAGE("Code coverage enabled")
set(CMAKE_C_FLAGS " ${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage")
set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
endif (CODE_COVERAGE)
#find_package(Catch2 3 REQUIRED)
include(CTest) include(CTest)
#include(Catch)
add_subdirectory(tests) add_subdirectory(tests)
endif (ENABLE_TESTING) endif (ENABLE_TESTING)

View File

@@ -11,46 +11,19 @@ setup: ## Install dependencies for tests and coverage
pip install gcovr; \ pip install gcovr; \
fi fi
dest ?= ../discretizbench
copy: ## Copy binary files to selected folder
@echo "Destination folder: $(dest)"
make build
@echo ">>> Copying files to $(dest)"
@cp build/src/Platform/main $(dest)
@cp build/src/Platform/list $(dest)
@cp build/src/Platform/manage $(dest)
@echo ">>> Done"
dependency: ## Create a dependency graph diagram of the project (build/dependency.png) dependency: ## Create a dependency graph diagram of the project (build/dependency.png)
cd build && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png cd build && cmake .. --graphviz=dependency.dot && dot -Tpng dependency.dot -o dependency.png
build: ## Build the main and BayesNetSample build: ## Build the project
cmake --build build -t main -t BayesNetSample -t manage -t list -j 32 @echo ">>> Building BayesNet ...";
clean: ## Clean the debug info
@echo ">>> Cleaning Debug BayesNet ...";
find . -name "*.gcda" -print0 | xargs -0 rm
@echo ">>> Done";
clang-uml: ## Create uml class and sequence diagrams
clang-uml -p --add-compile-flag -I /usr/lib/gcc/x86_64-redhat-linux/8/include/
debug: ## Build a debug version of the project
@echo ">>> Building Debug BayesNet ...";
@if [ -d ./build ]; then rm -rf ./build; fi @if [ -d ./build ]; then rm -rf ./build; fi
@mkdir build; @mkdir build;
cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -D ENABLE_TESTING=ON -D CODE_COVERAGE=ON; \ cmake -S . -B build; \
cmake --build build -j 32; cd build; \
make; \
@echo ">>> Done"; @echo ">>> Done";
release: ## Build a Release version of the project
@echo ">>> Building Release BayesNet ...";
@if [ -d ./build ]; then rm -rf ./build; fi
@mkdir build;
cmake -S . -B build -D CMAKE_BUILD_TYPE=Release; \
cmake --build build -t main -t BayesNetSample -t manage -t list -j 32;
@echo ">>> Done";
test: ## Run tests test: ## Run tests
@echo "* Running tests..."; @echo "* Running tests...";
find . -name "*.gcda" -print0 | xargs -0 rm find . -name "*.gcda" -print0 | xargs -0 rm

View File

@@ -0,0 +1 @@
null

Binary file not shown.

View File

@@ -1,4 +1,5 @@
filter = src/ filter = src/
exclude-directories = build/lib/ exclude = external/
exclude = tests/
print-summary = yes print-summary = yes
sort-percentage = yes sort-percentage = yes

View File

@@ -1 +1,2 @@
add_library(ArffFiles ArffFiles.cc) add_library(ArffFiles ArffFiles.cc)
#target_link_libraries(BayesNet "${TORCH_LIBRARIES}")

Submodule lib/openXLSX deleted from b80da42d14

View File

@@ -3,6 +3,5 @@ include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
include_directories(${BayesNet_SOURCE_DIR}/lib/Files) include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp) include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include) include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
add_executable(BayesNetSample sample.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc) add_executable(BayesNetSample sample.cc ${BayesNet_SOURCE_DIR}/src/Platform/Folding.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
target_link_libraries(BayesNetSample BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}") target_link_libraries(BayesNetSample BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")

View File

@@ -1,16 +1,15 @@
#include <iostream> #include <iostream>
#include <torch/torch.h> #include <torch/torch.h>
#include <string> #include <string>
#include <thread>
#include <map> #include <map>
#include <argparse/argparse.hpp> #include <argparse/argparse.hpp>
#include <nlohmann/json.hpp>
#include "ArffFiles.h" #include "ArffFiles.h"
#include "BayesMetrics.h" #include "BayesMetrics.h"
#include "CPPFImdlp.h" #include "CPPFImdlp.h"
#include "Folding.h" #include "Folding.h"
#include "Models.h" #include "Models.h"
#include "modelRegister.h"
#include <fstream>
using namespace std; using namespace std;
@@ -42,7 +41,7 @@ bool file_exists(const std::string& name)
} }
pair<vector<vector<int>>, vector<int>> extract_indices(vector<int> indices, vector<vector<int>> X, vector<int> y) pair<vector<vector<int>>, vector<int>> extract_indices(vector<int> indices, vector<vector<int>> X, vector<int> y)
{ {
vector<vector<int>> Xr; // nxm vector<vector<int>> Xr;
vector<int> yr; vector<int> yr;
for (int col = 0; col < X.size(); ++col) { for (int col = 0; col < X.size(); ++col) {
Xr.push_back(vector<int>()); Xr.push_back(vector<int>());
@@ -69,8 +68,9 @@ int main(int argc, char** argv)
{"mfeat-factors", true}, {"mfeat-factors", true},
}; };
auto valid_datasets = vector<string>(); auto valid_datasets = vector<string>();
transform(datasets.begin(), datasets.end(), back_inserter(valid_datasets), for (auto dataset : datasets) {
[](const pair<string, bool>& pair) { return pair.first; }); valid_datasets.push_back(dataset.first);
}
argparse::ArgumentParser program("BayesNetSample"); argparse::ArgumentParser program("BayesNetSample");
program.add_argument("-d", "--dataset") program.add_argument("-d", "--dataset")
.help("Dataset file name") .help("Dataset file name")
@@ -86,17 +86,16 @@ int main(int argc, char** argv)
.default_value(string{ PATH } .default_value(string{ PATH }
); );
program.add_argument("-m", "--model") program.add_argument("-m", "--model")
.help("Model to use " + platform::Models::instance()->toString()) .help("Model to use " + platform::Models::toString())
.action([](const std::string& value) { .action([](const std::string& value) {
static const vector<string> choices = platform::Models::instance()->getNames(); static const vector<string> choices = platform::Models::getNames();
if (find(choices.begin(), choices.end(), value) != choices.end()) { if (find(choices.begin(), choices.end(), value) != choices.end()) {
return value; return value;
} }
throw runtime_error("Model must be one of " + platform::Models::instance()->toString()); throw runtime_error("Model must be one of " + platform::Models::toString());
} }
); );
program.add_argument("--discretize").help("Discretize input dataset").default_value(false).implicit_value(true); program.add_argument("--discretize").help("Discretize input dataset").default_value(false).implicit_value(true);
program.add_argument("--dumpcpt").help("Dump CPT Tables").default_value(false).implicit_value(true);
program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value(false).implicit_value(true); program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value(false).implicit_value(true);
program.add_argument("--tensors").help("Use tensors to store samples").default_value(false).implicit_value(true); program.add_argument("--tensors").help("Use tensors to store samples").default_value(false).implicit_value(true);
program.add_argument("-f", "--folds").help("Number of folds").default_value(5).scan<'i', int>().action([](const string& value) { program.add_argument("-f", "--folds").help("Number of folds").default_value(5).scan<'i', int>().action([](const string& value) {
@@ -114,7 +113,7 @@ int main(int argc, char** argv)
throw runtime_error("Number of folds must be an integer"); throw runtime_error("Number of folds must be an integer");
}}); }});
program.add_argument("-s", "--seed").help("Random seed").default_value(-1).scan<'i', int>(); program.add_argument("-s", "--seed").help("Random seed").default_value(-1).scan<'i', int>();
bool class_last, stratified, tensors, dump_cpt; bool class_last, stratified, tensors;
string model_name, file_name, path, complete_file_name; string model_name, file_name, path, complete_file_name;
int nFolds, seed; int nFolds, seed;
try { try {
@@ -127,7 +126,6 @@ int main(int argc, char** argv)
tensors = program.get<bool>("tensors"); tensors = program.get<bool>("tensors");
nFolds = program.get<int>("folds"); nFolds = program.get<int>("folds");
seed = program.get<int>("seed"); seed = program.get<int>("seed");
dump_cpt = program.get<bool>("dumpcpt");
class_last = datasets[file_name]; class_last = datasets[file_name];
if (!file_exists(complete_file_name)) { if (!file_exists(complete_file_name)) {
throw runtime_error("Data File " + path + file_name + ".arff" + " does not exist"); throw runtime_error("Data File " + path + file_name + ".arff" + " does not exist");
@@ -150,9 +148,9 @@ int main(int argc, char** argv)
// Get className & Features // Get className & Features
auto className = handler.getClassName(); auto className = handler.getClassName();
vector<string> features; vector<string> features;
auto attributes = handler.getAttributes(); for (auto feature : handler.getAttributes()) {
transform(attributes.begin(), attributes.end(), back_inserter(features), features.push_back(feature.first);
[](const pair<string, string>& item) { return item.first; }); }
// Discretize Dataset // Discretize Dataset
auto [Xd, maxes] = discretize(X, y, features); auto [Xd, maxes] = discretize(X, y, features);
maxes[className] = *max_element(y.begin(), y.end()) + 1; maxes[className] = *max_element(y.begin(), y.end()) + 1;
@@ -161,25 +159,16 @@ int main(int argc, char** argv)
states[feature] = vector<int>(maxes[feature]); states[feature] = vector<int>(maxes[feature]);
} }
states[className] = vector<int>(maxes[className]); states[className] = vector<int>(maxes[className]);
auto clf = platform::Models::instance()->create(model_name);
bayesnet::BaseClassifier* clf = platform::Models::get(model_name);
clf->fit(Xd, y, features, className, states); clf->fit(Xd, y, features, className, states);
if (dump_cpt) { auto score = clf->score(Xd, y);
cout << "--- CPT Tables ---" << endl;
clf->dump_cpt();
}
auto lines = clf->show(); auto lines = clf->show();
auto graph = clf->graph();
for (auto line : lines) { for (auto line : lines) {
cout << line << endl; cout << line << endl;
} }
cout << "--- Topological Order ---" << endl;
auto order = clf->topological_order();
for (auto name : order) {
cout << name << ", ";
}
cout << "end." << endl;
auto score = clf->score(Xd, y);
cout << "Score: " << score << endl; cout << "Score: " << score << endl;
auto graph = clf->graph();
auto dot_file = model_name + "_" + file_name; auto dot_file = model_name + "_" + file_name;
ofstream file(dot_file + ".dot"); ofstream file(dot_file + ".dot");
file << graph; file << graph;
@@ -195,11 +184,11 @@ int main(int argc, char** argv)
Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32)); Xt.index_put_({ i, "..." }, torch::tensor(Xd[i], torch::kInt32));
} }
float total_score = 0, total_score_train = 0, score_train, score_test; float total_score = 0, total_score_train = 0, score_train, score_test;
platform::Fold* fold; Fold* fold;
if (stratified) if (stratified)
fold = new platform::StratifiedKFold(nFolds, y, seed); fold = new StratifiedKFold(nFolds, y, seed);
else else
fold = new platform::KFold(nFolds, y.size(), seed); fold = new KFold(nFolds, y.size(), seed);
for (auto i = 0; i < nFolds; ++i) { for (auto i = 0; i < nFolds; ++i) {
auto [train, test] = fold->getFold(i); auto [train, test] = fold->getFold(i);
cout << "Fold: " << i + 1 << endl; cout << "Fold: " << i + 1 << endl;
@@ -211,7 +200,6 @@ int main(int argc, char** argv)
torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest); torch::Tensor Xtestt = torch::index_select(Xt, 1, ttest);
torch::Tensor ytestt = yt.index({ ttest }); torch::Tensor ytestt = yt.index({ ttest });
clf->fit(Xtraint, ytraint, features, className, states); clf->fit(Xtraint, ytraint, features, className, states);
auto temp = clf->predict(Xtraint);
score_train = clf->score(Xtraint, ytraint); score_train = clf->score(Xtraint, ytraint);
score_test = clf->score(Xtestt, ytestt); score_test = clf->score(Xtestt, ytestt);
} else { } else {
@@ -221,10 +209,6 @@ int main(int argc, char** argv)
score_train = clf->score(Xtrain, ytrain); score_train = clf->score(Xtrain, ytrain);
score_test = clf->score(Xtest, ytest); score_test = clf->score(Xtest, ytest);
} }
if (dump_cpt) {
cout << "--- CPT Tables ---" << endl;
clf->dump_cpt();
}
total_score_train += score_train; total_score_train += score_train;
total_score += score_test; total_score += score_test;
cout << "Score Train: " << score_train << endl; cout << "Score Train: " << score_train << endl;
@@ -233,5 +217,6 @@ int main(int argc, char** argv)
} }
cout << "**********************************************************************************" << endl; cout << "**********************************************************************************" << endl;
cout << "Average Score Train: " << total_score_train / nFolds << endl; cout << "Average Score Train: " << total_score_train / nFolds << endl;
cout << "Average Score Test : " << total_score / nFolds << endl;return 0; cout << "Average Score Test : " << total_score / nFolds << endl;
return 0;
} }

View File

@@ -2,16 +2,14 @@
namespace bayesnet { namespace bayesnet {
AODE::AODE() : Ensemble() {} AODE::AODE() : Ensemble() {}
void AODE::buildModel(const torch::Tensor& weights) void AODE::train()
{ {
models.clear(); models.clear();
for (int i = 0; i < features.size(); ++i) { for (int i = 0; i < features.size(); ++i) {
models.push_back(std::make_unique<SPODE>(i)); models.push_back(std::make_unique<SPODE>(i));
} }
n_models = models.size();
significanceModels = vector<double>(n_models, 1.0);
} }
vector<string> AODE::graph(const string& title) const vector<string> AODE::graph(string title)
{ {
return Ensemble::graph(title); return Ensemble::graph(title);
} }

View File

@@ -5,11 +5,10 @@
namespace bayesnet { namespace bayesnet {
class AODE : public Ensemble { class AODE : public Ensemble {
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
AODE(); AODE();
virtual ~AODE() {}; vector<string> graph(string title = "AODE") override;
vector<string> graph(const string& title = "AODE") const override;
}; };
} }
#endif #endif

View File

@@ -1,41 +0,0 @@
#include "AODELd.h"
#include "Models.h"
namespace bayesnet {
using namespace std;
AODELd::AODELd() : Ensemble(), Proposal(dataset, features, className) {}
AODELd& AODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
Ensemble::fit(dataset, features, className, states);
return *this;
}
void AODELd::buildModel(const torch::Tensor& weights)
{
models.clear();
for (int i = 0; i < features.size(); ++i) {
models.push_back(std::make_unique<SPODELd>(i));
}
n_models = models.size();
significanceModels = vector<double>(n_models, 1.0);
}
void AODELd::trainModel(const torch::Tensor& weights)
{
for (const auto& model : models) {
model->fit(Xf, y, features, className, states);
}
}
vector<string> AODELd::graph(const string& name) const
{
return Ensemble::graph(name);
}
}

View File

@@ -1,21 +0,0 @@
#ifndef AODELD_H
#define AODELD_H
#include "Ensemble.h"
#include "Proposal.h"
#include "SPODELd.h"
namespace bayesnet {
using namespace std;
class AODELd : public Ensemble, public Proposal {
protected:
void trainModel(const torch::Tensor& weights) override;
void buildModel(const torch::Tensor& weights) override;
public:
AODELd();
AODELd& fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_) override;
virtual ~AODELd() = default;
vector<string> graph(const string& name = "AODELd") const override;
static inline string version() { return "0.0.1"; };
};
}
#endif // !AODELD_H

View File

@@ -1,34 +1,23 @@
#ifndef BASE_H #ifndef BASE_H
#define BASE_H #define BASE_H
#include <torch/torch.h> #include <torch/torch.h>
#include <nlohmann/json.hpp>
#include <vector> #include <vector>
namespace bayesnet { namespace bayesnet {
using namespace std; using namespace std;
class BaseClassifier { class BaseClassifier {
protected:
virtual void trainModel(const torch::Tensor& weights) = 0;
public: public:
// X is nxm vector, y is nx1 vector virtual BaseClassifier& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) = 0;
virtual BaseClassifier& fit(vector<vector<int>>& X, vector<int>& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) = 0; virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) = 0;
// X is nxm tensor, y is nx1 tensor
virtual BaseClassifier& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states) = 0;
virtual BaseClassifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights) = 0;
virtual ~BaseClassifier() = default;
torch::Tensor virtual predict(torch::Tensor& X) = 0;
vector<int> virtual predict(vector<vector<int>>& X) = 0; vector<int> virtual predict(vector<vector<int>>& X) = 0;
float virtual score(vector<vector<int>>& X, vector<int>& y) = 0; float virtual score(vector<vector<int>>& X, vector<int>& y) = 0;
float virtual score(torch::Tensor& X, torch::Tensor& y) = 0; float virtual score(torch::Tensor& X, torch::Tensor& y) = 0;
int virtual getNumberOfNodes()const = 0; int virtual getNumberOfNodes() = 0;
int virtual getNumberOfEdges()const = 0; int virtual getNumberOfEdges() = 0;
int virtual getNumberOfStates() const = 0; int virtual getNumberOfStates() = 0;
vector<string> virtual show() const = 0; vector<string> virtual show() = 0;
vector<string> virtual graph(const string& title = "") const = 0; vector<string> virtual graph(string title = "") = 0;
const string inline getVersion() const { return "0.2.0"; }; virtual ~BaseClassifier() = default;
vector<string> virtual topological_order() = 0; const string inline getVersion() const { return "0.1.0"; };
void virtual dump_cpt()const = 0;
virtual void setHyperparameters(nlohmann::json& hyperparameters) = 0;
}; };
} }
#endif #endif

View File

@@ -1,15 +1,13 @@
#include "BayesMetrics.h" #include "BayesMetrics.h"
#include "Mst.h" #include "Mst.h"
namespace bayesnet { namespace bayesnet {
//samples is nxm tensor used to fit the model Metrics::Metrics(torch::Tensor& samples, vector<string>& features, string& className, int classNumStates)
Metrics::Metrics(const torch::Tensor& samples, const vector<string>& features, const string& className, const int classNumStates)
: samples(samples) : samples(samples)
, features(features) , features(features)
, className(className) , className(className)
, classNumStates(classNumStates) , classNumStates(classNumStates)
{ {
} }
//samples is nxm vector used to fit the model
Metrics::Metrics(const vector<vector<int>>& vsamples, const vector<int>& labels, const vector<string>& features, const string& className, const int classNumStates) Metrics::Metrics(const vector<vector<int>>& vsamples, const vector<int>& labels, const vector<string>& features, const string& className, const int classNumStates)
: features(features) : features(features)
, className(className) , className(className)
@@ -17,48 +15,9 @@ namespace bayesnet {
, samples(torch::zeros({ static_cast<int>(vsamples[0].size()), static_cast<int>(vsamples.size() + 1) }, torch::kInt32)) , samples(torch::zeros({ static_cast<int>(vsamples[0].size()), static_cast<int>(vsamples.size() + 1) }, torch::kInt32))
{ {
for (int i = 0; i < vsamples.size(); ++i) { for (int i = 0; i < vsamples.size(); ++i) {
samples.index_put_({ i, "..." }, torch::tensor(vsamples[i], torch::kInt32)); samples.index_put_({ "...", i }, torch::tensor(vsamples[i], torch::kInt32));
} }
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32)); samples.index_put_({ "...", -1 }, torch::tensor(labels, torch::kInt32));
}
vector<int> Metrics::SelectKBestWeighted(const torch::Tensor& weights, bool ascending, unsigned k)
{
// Return the K Best features
auto n = samples.size(0) - 1;
if (k == 0) {
k = n;
}
// compute scores
scoresKBest.clear();
featuresKBest.clear();
auto label = samples.index({ -1, "..." });
for (int i = 0; i < n; ++i) {
scoresKBest.push_back(mutualInformation(label, samples.index({ i, "..." }), weights));
featuresKBest.push_back(i);
}
// sort & reduce scores and features
if (ascending) {
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
{ return scoresKBest[i] < scoresKBest[j]; });
sort(scoresKBest.begin(), scoresKBest.end(), std::less<double>());
if (k < n) {
for (int i = 0; i < n - k; ++i) {
featuresKBest.erase(featuresKBest.begin());
scoresKBest.erase(scoresKBest.begin());
}
}
} else {
sort(featuresKBest.begin(), featuresKBest.end(), [&](int i, int j)
{ return scoresKBest[i] > scoresKBest[j]; });
sort(scoresKBest.begin(), scoresKBest.end(), std::greater<double>());
featuresKBest.resize(k);
scoresKBest.resize(k);
}
return featuresKBest;
}
vector<double> Metrics::getScoresKBest() const
{
return scoresKBest;
} }
vector<pair<string, string>> Metrics::doCombinations(const vector<string>& source) vector<pair<string, string>> Metrics::doCombinations(const vector<string>& source)
{ {
@@ -71,29 +30,28 @@ namespace bayesnet {
} }
return result; return result;
} }
torch::Tensor Metrics::conditionalEdge(const torch::Tensor& weights) torch::Tensor Metrics::conditionalEdge()
{ {
auto result = vector<double>(); auto result = vector<double>();
auto source = vector<string>(features); auto source = vector<string>(features);
source.push_back(className); source.push_back(className);
auto combinations = doCombinations(source); auto combinations = doCombinations(source);
// Compute class prior // Compute class prior
auto margin = torch::zeros({ classNumStates }, torch::kFloat); auto margin = torch::zeros({ classNumStates });
for (int value = 0; value < classNumStates; ++value) { for (int value = 0; value < classNumStates; ++value) {
auto mask = samples.index({ -1, "..." }) == value; auto mask = samples.index({ "...", -1 }) == value;
margin[value] = mask.sum().item<double>() / samples.size(1); margin[value] = mask.sum().item<float>() / samples.sizes()[0];
} }
for (auto [first, second] : combinations) { for (auto [first, second] : combinations) {
int index_first = find(features.begin(), features.end(), first) - features.begin(); int index_first = find(features.begin(), features.end(), first) - features.begin();
int index_second = find(features.begin(), features.end(), second) - features.begin(); int index_second = find(features.begin(), features.end(), second) - features.begin();
double accumulated = 0; double accumulated = 0;
for (int value = 0; value < classNumStates; ++value) { for (int value = 0; value < classNumStates; ++value) {
auto mask = samples.index({ -1, "..." }) == value; auto mask = samples.index({ "...", -1 }) == value;
auto first_dataset = samples.index({ index_first, mask }); auto first_dataset = samples.index({ mask, index_first });
auto second_dataset = samples.index({ index_second, mask }); auto second_dataset = samples.index({ mask, index_second });
auto weights_dataset = weights.index({ mask }); auto mi = mutualInformation(first_dataset, second_dataset);
auto mi = mutualInformation(first_dataset, second_dataset, weights_dataset); auto pb = margin[value].item<float>();
auto pb = margin[value].item<double>();
accumulated += pb * mi; accumulated += pb * mi;
} }
result.push_back(accumulated); result.push_back(accumulated);
@@ -109,33 +67,32 @@ namespace bayesnet {
} }
return matrix; return matrix;
} }
// To use in Python // To Interface with Python
vector<float> Metrics::conditionalEdgeWeights(vector<float>& weights_) vector<float> Metrics::conditionalEdgeWeights()
{ {
const torch::Tensor weights = torch::tensor(weights_); auto matrix = conditionalEdge();
auto matrix = conditionalEdge(weights);
std::vector<float> v(matrix.data_ptr<float>(), matrix.data_ptr<float>() + matrix.numel()); std::vector<float> v(matrix.data_ptr<float>(), matrix.data_ptr<float>() + matrix.numel());
return v; return v;
} }
double Metrics::entropy(const torch::Tensor& feature, const torch::Tensor& weights) double Metrics::entropy(torch::Tensor& feature)
{ {
torch::Tensor counts = feature.bincount(weights); torch::Tensor counts = feature.bincount();
double totalWeight = counts.sum().item<double>(); int totalWeight = counts.sum().item<int>();
torch::Tensor probs = counts.to(torch::kFloat) / totalWeight; torch::Tensor probs = counts.to(torch::kFloat) / totalWeight;
torch::Tensor logProbs = torch::log(probs); torch::Tensor logProbs = torch::log(probs);
torch::Tensor entropy = -probs * logProbs; torch::Tensor entropy = -probs * logProbs;
return entropy.nansum().item<double>(); return entropy.nansum().item<double>();
} }
// H(Y|X) = sum_{x in X} p(x) H(Y|X=x) // H(Y|X) = sum_{x in X} p(x) H(Y|X=x)
double Metrics::conditionalEntropy(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights) double Metrics::conditionalEntropy(torch::Tensor& firstFeature, torch::Tensor& secondFeature)
{ {
int numSamples = firstFeature.sizes()[0]; int numSamples = firstFeature.sizes()[0];
torch::Tensor featureCounts = secondFeature.bincount(weights); torch::Tensor featureCounts = secondFeature.bincount();
unordered_map<int, unordered_map<int, double>> jointCounts; unordered_map<int, unordered_map<int, double>> jointCounts;
double totalWeight = 0; double totalWeight = 0;
for (auto i = 0; i < numSamples; i++) { for (auto i = 0; i < numSamples; i++) {
jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += weights[i].item<double>(); jointCounts[secondFeature[i].item<int>()][firstFeature[i].item<int>()] += 1;
totalWeight += weights[i].item<float>(); totalWeight += 1;
} }
if (totalWeight == 0) if (totalWeight == 0)
return 0; return 0;
@@ -156,16 +113,16 @@ namespace bayesnet {
return entropyValue; return entropyValue;
} }
// I(X;Y) = H(Y) - H(Y|X) // I(X;Y) = H(Y) - H(Y|X)
double Metrics::mutualInformation(const torch::Tensor& firstFeature, const torch::Tensor& secondFeature, const torch::Tensor& weights) double Metrics::mutualInformation(torch::Tensor& firstFeature, torch::Tensor& secondFeature)
{ {
return entropy(firstFeature, weights) - conditionalEntropy(firstFeature, secondFeature, weights); return entropy(firstFeature) - conditionalEntropy(firstFeature, secondFeature);
} }
/* /*
Compute the maximum spanning tree considering the weights as distances Compute the maximum spanning tree considering the weights as distances
and the indices of the weights as nodes of this square matrix using and the indices of the weights as nodes of this square matrix using
Kruskal algorithm Kruskal algorithm
*/ */
vector<pair<int, int>> Metrics::maximumSpanningTree(const vector<string>& features, const Tensor& weights, const int root) vector<pair<int, int>> Metrics::maximumSpanningTree(vector<string> features, Tensor& weights, int root)
{ {
auto mst = MST(features, weights, root); auto mst = MST(features, weights, root);
return mst.maximumSpanningTree(); return mst.maximumSpanningTree();

View File

@@ -8,25 +8,21 @@ namespace bayesnet {
using namespace torch; using namespace torch;
class Metrics { class Metrics {
private: private:
Tensor samples; // nxm tensor used to fit the model Tensor samples;
vector<string> features; vector<string> features;
string className; string className;
int classNumStates = 0; int classNumStates;
vector<double> scoresKBest;
vector<int> featuresKBest; // sorted indices of the features
double entropy(const Tensor& feature, const Tensor& weights);
double conditionalEntropy(const Tensor& firstFeature, const Tensor& secondFeature, const Tensor& weights);
vector<pair<string, string>> doCombinations(const vector<string>&);
public: public:
Metrics() = default; Metrics() = default;
Metrics(const torch::Tensor& samples, const vector<string>& features, const string& className, const int classNumStates); Metrics(Tensor&, vector<string>&, string&, int);
Metrics(const vector<vector<int>>& vsamples, const vector<int>& labels, const vector<string>& features, const string& className, const int classNumStates); Metrics(const vector<vector<int>>&, const vector<int>&, const vector<string>&, const string&, const int);
vector<int> SelectKBestWeighted(const torch::Tensor& weights, bool ascending=false, unsigned k = 0); double entropy(Tensor&);
vector<double> getScoresKBest() const; double conditionalEntropy(Tensor&, Tensor&);
double mutualInformation(const Tensor& firstFeature, const Tensor& secondFeature, const Tensor& weights); double mutualInformation(Tensor&, Tensor&);
vector<float> conditionalEdgeWeights(vector<float>& weights); // To use in Python vector<float> conditionalEdgeWeights();
Tensor conditionalEdge(const torch::Tensor& weights); Tensor conditionalEdge();
vector<pair<int, int>> maximumSpanningTree(const vector<string>& features, const Tensor& weights, const int root); vector<pair<string, string>> doCombinations(const vector<string>&);
vector<pair<int, int>> maximumSpanningTree(vector<string> features, Tensor& weights, int root);
}; };
} }
#endif #endif

View File

@@ -1,90 +0,0 @@
#include "BoostAODE.h"
#include <set>
#include "BayesMetrics.h"
namespace bayesnet {
BoostAODE::BoostAODE() : Ensemble() {}
void BoostAODE::buildModel(const torch::Tensor& weights)
{
// Models shall be built in trainModel
}
void BoostAODE::setHyperparameters(nlohmann::json& hyperparameters)
{
// Check if hyperparameters are valid
const vector<string> validKeys = { "repeatSparent", "maxModels", "ascending" };
checkHyperparameters(validKeys, hyperparameters);
if (hyperparameters.contains("repeatSparent")) {
repeatSparent = hyperparameters["repeatSparent"];
}
if (hyperparameters.contains("maxModels")) {
maxModels = hyperparameters["maxModels"];
}
if (hyperparameters.contains("ascending")) {
ascending = hyperparameters["ascending"];
}
}
void BoostAODE::trainModel(const torch::Tensor& weights)
{
models.clear();
n_models = 0;
if (maxModels == 0)
maxModels = .1 * n > 10 ? .1 * n : n;
Tensor weights_ = torch::full({ m }, 1.0 / m, torch::kFloat64);
auto X_ = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." });
auto y_ = dataset.index({ -1, "..." });
bool exitCondition = false;
unordered_set<int> featuresUsed;
// Step 0: Set the finish condition
// if not repeatSparent a finish condition is run out of features
// n_models == maxModels
while (!exitCondition) {
// Step 1: Build ranking with mutual information
auto featureSelection = metrics.SelectKBestWeighted(weights_, ascending, n); // Get all the features sorted
unique_ptr<Classifier> model;
auto feature = featureSelection[0];
if (!repeatSparent || featuresUsed.size() < featureSelection.size()) {
bool found = false;
for (auto feat : featureSelection) {
if (find(featuresUsed.begin(), featuresUsed.end(), feat) != featuresUsed.end()) {
continue;
}
found = true;
feature = feat;
break;
}
if (!found) {
exitCondition = true;
continue;
}
}
featuresUsed.insert(feature);
model = std::make_unique<SPODE>(feature);
n_models++;
model->fit(dataset, features, className, states, weights_);
auto ypred = model->predict(X_);
// Step 3.1: Compute the classifier amout of say
auto mask_wrong = ypred != y_;
auto masked_weights = weights_ * mask_wrong.to(weights_.dtype());
double wrongWeights = masked_weights.sum().item<double>();
double significance = wrongWeights == 0 ? 1 : 0.5 * log((1 - wrongWeights) / wrongWeights);
// Step 3.2: Update weights for next classifier
// Step 3.2.1: Update weights of wrong samples
weights_ += mask_wrong.to(weights_.dtype()) * exp(significance) * weights_;
// Step 3.3: Normalise the weights
double totalWeights = torch::sum(weights_).item<double>();
weights_ = weights_ / totalWeights;
// Step 3.4: Store classifier and its accuracy to weigh its future vote
models.push_back(std::move(model));
significanceModels.push_back(significance);
exitCondition = n_models == maxModels && repeatSparent;
}
if (featuresUsed.size() != features.size()) {
cout << "Warning: BoostAODE did not use all the features" << endl;
}
weights.copy_(weights_);
}
vector<string> BoostAODE::graph(const string& title) const
{
return Ensemble::graph(title);
}
}

View File

@@ -1,21 +0,0 @@
#ifndef BOOSTAODE_H
#define BOOSTAODE_H
#include "Ensemble.h"
#include "SPODE.h"
namespace bayesnet {
class BoostAODE : public Ensemble {
public:
BoostAODE();
virtual ~BoostAODE() {};
vector<string> graph(const string& title = "BoostAODE") const override;
void setHyperparameters(nlohmann::json& hyperparameters) override;
protected:
void buildModel(const torch::Tensor& weights) override;
void trainModel(const torch::Tensor& weights) override;
private:
bool repeatSparent=false;
int maxModels=0;
bool ascending=false; //Process KBest features ascending or descending order
};
}
#endif

View File

@@ -1,9 +1,2 @@
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp) add_library(BayesNet bayesnetUtils.cc Network.cc Node.cc BayesMetrics.cc Classifier.cc KDB.cc TAN.cc SPODE.cc Ensemble.cc AODE.cc Mst.cc)
include_directories(${BayesNet_SOURCE_DIR}/lib/Files) target_link_libraries(BayesNet "${TORCH_LIBRARIES}")
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
include_directories(${BayesNet_SOURCE_DIR}/src/BayesNet)
include_directories(${BayesNet_SOURCE_DIR}/src/Platform)
add_library(BayesNet bayesnetUtils.cc Network.cc Node.cc BayesMetrics.cc Classifier.cc
KDB.cc TAN.cc SPODE.cc Ensemble.cc AODE.cc TANLd.cc KDBLd.cc SPODELd.cc AODELd.cc BoostAODE.cc
Mst.cc Proposal.cc ${BayesNet_SOURCE_DIR}/src/Platform/Models.cc)
target_link_libraries(BayesNet mdlp "${TORCH_LIBRARIES}")

View File

@@ -4,78 +4,57 @@
namespace bayesnet { namespace bayesnet {
using namespace torch; using namespace torch;
Classifier::Classifier(Network model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {} Classifier::Classifier(const Network& model) : model(model), m(0), n(0), metrics(Metrics()), fitted(false) {}
Classifier& Classifier::build(const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights) Classifier& Classifier::build(vector<string>& features, string className, map<string, vector<int>>& states)
{ {
dataset = torch::cat({ X, y.view({y.size(0), 1}) }, 1);
this->features = features; this->features = features;
this->className = className; this->className = className;
this->states = states; this->states = states;
m = dataset.size(1);
n = dataset.size(0) - 1;
checkFitParameters(); checkFitParameters();
auto n_classes = states.at(className).size(); auto n_classes = states[className].size();
metrics = Metrics(dataset, features, className, n_classes); metrics = Metrics(dataset, features, className, n_classes);
model.initialize(); train();
buildModel(weights); if (Xv == vector<vector<int>>()) {
trainModel(weights); // fit with tensors
model.fit(X, y, features, className);
} else {
// fit with vectors
model.fit(Xv, yv, features, className);
}
fitted = true; fitted = true;
return *this; return *this;
} }
void Classifier::buildDataset(Tensor& ytmp) Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states)
{ {
try { this->X = torch::transpose(X, 0, 1);
auto yresized = torch::transpose(ytmp.view({ ytmp.size(0), 1 }), 0, 1); this->y = y;
dataset = torch::cat({ dataset, yresized }, 0); Xv = vector<vector<int>>();
} yv = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
catch (const std::exception& e) { return build(features, className, states);
std::cerr << e.what() << '\n';
cout << "X dimensions: " << dataset.sizes() << "\n";
cout << "y dimensions: " << ytmp.sizes() << "\n";
exit(1);
}
} }
void Classifier::trainModel(const torch::Tensor& weights)
Classifier& Classifier::fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states)
{ {
model.fit(dataset, weights, features, className, states); this->X = torch::zeros({ static_cast<int>(X[0].size()), static_cast<int>(X.size()) }, kInt32);
} Xv = X;
// X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states)
{
dataset = X;
buildDataset(y);
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights);
}
// X is nxm where n is the number of features and m the number of samples
Classifier& Classifier::fit(vector<vector<int>>& X, vector<int>& y, const vector<string>& features, const string& className, map<string, vector<int>>& states)
{
dataset = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, kInt32);
for (int i = 0; i < X.size(); ++i) { for (int i = 0; i < X.size(); ++i) {
dataset.index_put_({ i, "..." }, torch::tensor(X[i], kInt32)); this->X.index_put_({ "...", i }, torch::tensor(X[i], kInt32));
} }
auto ytmp = torch::tensor(y, kInt32); this->y = torch::tensor(y, kInt32);
buildDataset(ytmp); yv = y;
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble); return build(features, className, states);
return build(features, className, states, weights);
}
Classifier& Classifier::fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states)
{
this->dataset = dataset;
const torch::Tensor weights = torch::full({ dataset.size(1) }, 1.0 / dataset.size(1), torch::kDouble);
return build(features, className, states, weights);
}
Classifier& Classifier::fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights)
{
this->dataset = dataset;
return build(features, className, states, weights);
} }
void Classifier::checkFitParameters() void Classifier::checkFitParameters()
{ {
if (torch::is_floating_point(dataset)) { auto sizes = X.sizes();
throw invalid_argument("dataset (X, y) must be of type Integer"); m = sizes[0];
n = sizes[1];
if (m != y.size(0)) {
throw invalid_argument("X and y must have the same number of samples");
} }
if (n != features.size()) { if (n != features.size()) {
throw invalid_argument("X " + to_string(n) + " and features " + to_string(features.size()) + " must have the same number of features"); throw invalid_argument("X and features must have the same number of features");
} }
if (states.find(className) == states.end()) { if (states.find(className) == states.end()) {
throw invalid_argument("className not found in states"); throw invalid_argument("className not found in states");
@@ -86,12 +65,23 @@ namespace bayesnet {
} }
} }
} }
Tensor Classifier::predict(Tensor& X) Tensor Classifier::predict(Tensor& X)
{ {
if (!fitted) { if (!fitted) {
throw logic_error("Classifier has not been fitted"); throw logic_error("Classifier has not been fitted");
} }
return model.predict(X); auto m_ = X.size(0);
auto n_ = X.size(1);
//auto Xt = torch::transpose(X, 0, 1);
vector<vector<int>> Xd(n_, vector<int>(m_, 0));
for (auto i = 0; i < n_; i++) {
auto temp = X.index({ "...", i });
Xd[i] = vector<int>(temp.data_ptr<int>(), temp.data_ptr<int>() + temp.numel());
}
auto yp = model.predict(Xd);
auto ypred = torch::tensor(yp, torch::kInt32);
return ypred;
} }
vector<int> Classifier::predict(vector<vector<int>>& X) vector<int> Classifier::predict(vector<vector<int>>& X)
{ {
@@ -112,7 +102,8 @@ namespace bayesnet {
if (!fitted) { if (!fitted) {
throw logic_error("Classifier has not been fitted"); throw logic_error("Classifier has not been fitted");
} }
Tensor y_pred = predict(X); auto Xt = torch::transpose(X, 0, 1);
Tensor y_pred = predict(Xt);
return (y_pred == y).sum().item<float>() / y.size(0); return (y_pred == y).sum().item<float>() / y.size(0);
} }
float Classifier::score(vector<vector<int>>& X, vector<int>& y) float Classifier::score(vector<vector<int>>& X, vector<int>& y)
@@ -120,53 +111,37 @@ namespace bayesnet {
if (!fitted) { if (!fitted) {
throw logic_error("Classifier has not been fitted"); throw logic_error("Classifier has not been fitted");
} }
return model.score(X, y); auto m_ = X[0].size();
auto n_ = X.size();
vector<vector<int>> Xd(n_, vector<int>(m_, 0));
for (auto i = 0; i < n_; i++) {
Xd[i] = vector<int>(X[i].begin(), X[i].end());
}
return model.score(Xd, y);
} }
vector<string> Classifier::show() const vector<string> Classifier::show()
{ {
return model.show(); return model.show();
} }
void Classifier::addNodes() void Classifier::addNodes()
{ {
// Add all nodes to the network // Add all nodes to the network
for (const auto& feature : features) { for (auto feature : features) {
model.addNode(feature); model.addNode(feature, states[feature].size());
} }
model.addNode(className); model.addNode(className, states[className].size());
} }
int Classifier::getNumberOfNodes() const int Classifier::getNumberOfNodes()
{ {
// Features does not include class // Features does not include class
return fitted ? model.getFeatures().size() + 1 : 0; return fitted ? model.getFeatures().size() + 1 : 0;
} }
int Classifier::getNumberOfEdges() const int Classifier::getNumberOfEdges()
{ {
return fitted ? model.getNumEdges() : 0; return fitted ? model.getEdges().size() : 0;
} }
int Classifier::getNumberOfStates() const int Classifier::getNumberOfStates()
{ {
return fitted ? model.getStates() : 0; return fitted ? model.getStates() : 0;
} }
vector<string> Classifier::topological_order()
{
return model.topological_sort();
}
void Classifier::dump_cpt() const
{
model.dump_cpt();
}
void Classifier::checkHyperparameters(const vector<string>& validKeys, nlohmann::json& hyperparameters)
{
for (const auto& item : hyperparameters.items()) {
if (find(validKeys.begin(), validKeys.end(), item.key()) == validKeys.end()) {
throw invalid_argument("Hyperparameter " + item.key() + " is not valid");
}
}
}
void Classifier::setHyperparameters(nlohmann::json& hyperparameters)
{
// Check if hyperparameters are valid, default is no hyperparameters
const vector<string> validKeys = { };
checkHyperparameters(validKeys, hyperparameters);
}
} }

View File

@@ -10,40 +10,36 @@ using namespace torch;
namespace bayesnet { namespace bayesnet {
class Classifier : public BaseClassifier { class Classifier : public BaseClassifier {
private: private:
void buildDataset(torch::Tensor& y);
Classifier& build(const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights);
protected:
bool fitted; bool fitted;
int m, n; // m: number of samples, n: number of features Classifier& build(vector<string>& features, string className, map<string, vector<int>>& states);
protected:
Network model; Network model;
int m, n; // m: number of samples, n: number of features
Tensor X;
vector<vector<int>> Xv;
Tensor y;
vector<int> yv;
Tensor dataset;
Metrics metrics; Metrics metrics;
vector<string> features; vector<string> features;
string className; string className;
map<string, vector<int>> states; map<string, vector<int>> states;
Tensor dataset; // (n+1)xm tensor
void checkFitParameters(); void checkFitParameters();
virtual void buildModel(const torch::Tensor& weights) = 0; virtual void train() = 0;
void trainModel(const torch::Tensor& weights) override;
void checkHyperparameters(const vector<string>& validKeys, nlohmann::json& hyperparameters);
public: public:
Classifier(Network model); Classifier(const Network& model);
virtual ~Classifier() = default; virtual ~Classifier() = default;
Classifier& fit(vector<vector<int>>& X, vector<int>& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override; Classifier& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
Classifier& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override; Classifier& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
Classifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
Classifier& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states, const torch::Tensor& weights) override;
void addNodes(); void addNodes();
int getNumberOfNodes() const override; int getNumberOfNodes() override;
int getNumberOfEdges() const override; int getNumberOfEdges() override;
int getNumberOfStates() const override; int getNumberOfStates() override;
Tensor predict(Tensor& X) override; Tensor predict(Tensor& X);
vector<int> predict(vector<vector<int>>& X) override; vector<int> predict(vector<vector<int>>& X) override;
float score(Tensor& X, Tensor& y) override; float score(Tensor& X, Tensor& y) override;
float score(vector<vector<int>>& X, vector<int>& y) override; float score(vector<vector<int>>& X, vector<int>& y) override;
vector<string> show() const override; vector<string> show() override;
vector<string> topological_order() override;
void dump_cpt() const override;
void setHyperparameters(nlohmann::json& hyperparameters) override;
}; };
} }
#endif #endif

View File

@@ -3,55 +3,69 @@
namespace bayesnet { namespace bayesnet {
using namespace torch; using namespace torch;
Ensemble::Ensemble() : Classifier(Network()), n_models(0) {} Ensemble::Ensemble() : m(0), n(0), n_models(0), metrics(Metrics()), fitted(false) {}
Ensemble& Ensemble::build(vector<string>& features, string className, map<string, vector<int>>& states)
void Ensemble::trainModel(const torch::Tensor& weights)
{ {
dataset = cat({ X, y.view({y.size(0), 1}) }, 1);
this->features = features;
this->className = className;
this->states = states;
auto n_classes = states[className].size();
metrics = Metrics(dataset, features, className, n_classes);
// Build models
train();
// Train models
n_models = models.size(); n_models = models.size();
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
// fit with vectors models[i]->fit(Xv, yv, features, className, states);
models[i]->fit(dataset, features, className, states);
} }
fitted = true;
return *this;
} }
vector<int> Ensemble::voting(Tensor& y_pred) Ensemble& Ensemble::fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states)
{ {
auto y_pred_ = y_pred.accessor<int, 2>(); this->X = X;
vector<int> y_pred_final; this->y = y;
int numClasses = states.at(className).size(); Xv = vector<vector<int>>();
// y_pred is m x n_models with the prediction of every model for each sample yv = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
for (int i = 0; i < y_pred.size(0); ++i) { return build(features, className, states);
// votes store in each index (value of class) the significance added by each model }
// i.e. votes[0] contains how much value has the value 0 of class. That value is generated by the models predictions Ensemble& Ensemble::fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states)
vector<double> votes(numClasses, 0.0); {
for (int j = 0; j < n_models; ++j) { this->X = torch::zeros({ static_cast<int>(X[0].size()), static_cast<int>(X.size()) }, kInt32);
votes[y_pred_[i][j]] += significanceModels[j]; Xv = X;
} for (int i = 0; i < X.size(); ++i) {
// argsort in descending order this->X.index_put_({ "...", i }, torch::tensor(X[i], kInt32));
auto indices = argsort(votes);
y_pred_final.push_back(indices[0]);
} }
return y_pred_final; this->y = torch::tensor(y, kInt32);
yv = y;
return build(features, className, states);
} }
Tensor Ensemble::predict(Tensor& X) Tensor Ensemble::predict(Tensor& X)
{ {
if (!fitted) { if (!fitted) {
throw logic_error("Ensemble has not been fitted"); throw logic_error("Ensemble has not been fitted");
} }
Tensor y_pred = torch::zeros({ X.size(1), n_models }, kInt32); Tensor y_pred = torch::zeros({ X.size(0), n_models }, kInt32);
auto threads{ vector<thread>() };
mutex mtx;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
threads.push_back(thread([&, i]() { y_pred.index_put_({ "...", i }, models[i]->predict(X));
auto ypredict = models[i]->predict(X);
lock_guard<mutex> lock(mtx);
y_pred.index_put_({ "...", i }, ypredict);
}));
}
for (auto& thread : threads) {
thread.join();
} }
return torch::tensor(voting(y_pred)); return torch::tensor(voting(y_pred));
} }
vector<int> Ensemble::voting(Tensor& y_pred)
{
auto y_pred_ = y_pred.accessor<int, 2>();
vector<int> y_pred_final;
for (int i = 0; i < y_pred.size(0); ++i) {
vector<float> votes(states[className].size(), 0);
for (int j = 0; j < y_pred.size(1); ++j) {
votes[y_pred_[i][j]] += 1;
}
auto indices = argsort(votes);
y_pred_final.push_back(indices[0]);
}
return y_pred_final;
}
vector<int> Ensemble::predict(vector<vector<int>>& X) vector<int> Ensemble::predict(vector<vector<int>>& X)
{ {
if (!fitted) { if (!fitted) {
@@ -96,8 +110,9 @@ namespace bayesnet {
} }
} }
return (double)correct / y_pred.size(); return (double)correct / y_pred.size();
} }
vector<string> Ensemble::show() const vector<string> Ensemble::show()
{ {
auto result = vector<string>(); auto result = vector<string>();
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
@@ -106,7 +121,7 @@ namespace bayesnet {
} }
return result; return result;
} }
vector<string> Ensemble::graph(const string& title) const vector<string> Ensemble::graph(string title)
{ {
auto result = vector<string>(); auto result = vector<string>();
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
@@ -115,7 +130,7 @@ namespace bayesnet {
} }
return result; return result;
} }
int Ensemble::getNumberOfNodes() const int Ensemble::getNumberOfNodes()
{ {
int nodes = 0; int nodes = 0;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
@@ -123,7 +138,7 @@ namespace bayesnet {
} }
return nodes; return nodes;
} }
int Ensemble::getNumberOfEdges() const int Ensemble::getNumberOfEdges()
{ {
int edges = 0; int edges = 0;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {
@@ -131,7 +146,7 @@ namespace bayesnet {
} }
return edges; return edges;
} }
int Ensemble::getNumberOfStates() const int Ensemble::getNumberOfStates()
{ {
int nstates = 0; int nstates = 0;
for (auto i = 0; i < n_models; ++i) { for (auto i = 0; i < n_models; ++i) {

View File

@@ -8,34 +8,39 @@ using namespace std;
using namespace torch; using namespace torch;
namespace bayesnet { namespace bayesnet {
class Ensemble : public Classifier { class Ensemble : public BaseClassifier {
private: private:
bool fitted;
long n_models;
Ensemble& build(vector<string>& features, string className, map<string, vector<int>>& states); Ensemble& build(vector<string>& features, string className, map<string, vector<int>>& states);
protected: protected:
unsigned n_models;
vector<unique_ptr<Classifier>> models; vector<unique_ptr<Classifier>> models;
vector<double> significanceModels; int m, n; // m: number of samples, n: number of features
void trainModel(const torch::Tensor& weights) override; Tensor X;
vector<vector<int>> Xv;
Tensor y;
vector<int> yv;
Tensor dataset;
Metrics metrics;
vector<string> features;
string className;
map<string, vector<int>> states;
void virtual train() = 0;
vector<int> voting(Tensor& y_pred); vector<int> voting(Tensor& y_pred);
public: public:
Ensemble(); Ensemble();
virtual ~Ensemble() = default; virtual ~Ensemble() = default;
Tensor predict(Tensor& X) override; Ensemble& fit(vector<vector<int>>& X, vector<int>& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
Ensemble& fit(torch::Tensor& X, torch::Tensor& y, vector<string>& features, string className, map<string, vector<int>>& states) override;
Tensor predict(Tensor& X);
vector<int> predict(vector<vector<int>>& X) override; vector<int> predict(vector<vector<int>>& X) override;
float score(Tensor& X, Tensor& y) override; float score(Tensor& X, Tensor& y) override;
float score(vector<vector<int>>& X, vector<int>& y) override; float score(vector<vector<int>>& X, vector<int>& y) override;
int getNumberOfNodes() const override; int getNumberOfNodes() override;
int getNumberOfEdges() const override; int getNumberOfEdges() override;
int getNumberOfStates() const override; int getNumberOfStates() override;
vector<string> show() const override; vector<string> show() override;
vector<string> graph(const string& title) const override; vector<string> graph(string title) override;
vector<string> topological_order() override
{
return vector<string>();
}
void dump_cpt() const override
{
}
}; };
} }
#endif #endif

View File

@@ -4,19 +4,7 @@ namespace bayesnet {
using namespace torch; using namespace torch;
KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta) {} KDB::KDB(int k, float theta) : Classifier(Network()), k(k), theta(theta) {}
void KDB::setHyperparameters(nlohmann::json& hyperparameters) void KDB::train()
{
// Check if hyperparameters are valid
const vector<string> validKeys = { "k", "theta" };
checkHyperparameters(validKeys, hyperparameters);
if (hyperparameters.contains("k")) {
k = hyperparameters["k"];
}
if (hyperparameters.contains("theta")) {
theta = hyperparameters["theta"];
}
}
void KDB::buildModel(const torch::Tensor& weights)
{ {
/* /*
1. For each feature Xi, compute mutual information, I(X;C), 1. For each feature Xi, compute mutual information, I(X;C),
@@ -39,25 +27,25 @@ namespace bayesnet {
*/ */
// 1. For each feature Xi, compute mutual information, I(X;C), // 1. For each feature Xi, compute mutual information, I(X;C),
// where C is the class. // where C is the class.
addNodes(); vector <float> mi;
const Tensor& y = dataset.index({ -1, "..." });
vector<double> mi;
for (auto i = 0; i < features.size(); i++) { for (auto i = 0; i < features.size(); i++) {
Tensor firstFeature = dataset.index({ i, "..." }); Tensor firstFeature = X.index({ "...", i });
mi.push_back(metrics.mutualInformation(firstFeature, y, weights)); mi.push_back(metrics.mutualInformation(firstFeature, y));
} }
// 2. Compute class conditional mutual information I(Xi;XjIC), f or each // 2. Compute class conditional mutual information I(Xi;XjIC), f or each
auto conditionalEdgeWeights = metrics.conditionalEdge(weights); auto conditionalEdgeWeights = metrics.conditionalEdge();
// 3. Let the used variable list, S, be empty. // 3. Let the used variable list, S, be empty.
vector<int> S; vector<int> S;
// 4. Let the DAG network being constructed, BN, begin with a single // 4. Let the DAG network being constructed, BN, begin with a single
// class node, C. // class node, C.
model.addNode(className, states[className].size());
// 5. Repeat until S includes all domain features // 5. Repeat until S includes all domain features
// 5.1. Select feature Xmax which is not in S and has the largest value // 5.1. Select feature Xmax which is not in S and has the largest value
// I(Xmax;C). // I(Xmax;C).
auto order = argsort(mi); auto order = argsort(mi);
for (auto idx : order) { for (auto idx : order) {
// 5.2. Add a node to BN representing Xmax. // 5.2. Add a node to BN representing Xmax.
model.addNode(features[idx], states[features[idx]].size());
// 5.3. Add an arc from C to Xmax in BN. // 5.3. Add an arc from C to Xmax in BN.
model.addEdge(className, features[idx]); model.addEdge(className, features[idx]);
// 5.4. Add m = min(lSl,/c) arcs from m distinct features Xj in S with // 5.4. Add m = min(lSl,/c) arcs from m distinct features Xj in S with
@@ -91,12 +79,11 @@ namespace bayesnet {
exit_cond = num == n_edges || candidates.size(0) == 0; exit_cond = num == n_edges || candidates.size(0) == 0;
} }
} }
vector<string> KDB::graph(const string& title) const vector<string> KDB::graph(string title)
{ {
string header{ title };
if (title == "KDB") { if (title == "KDB") {
header += " (k=" + to_string(k) + ", theta=" + to_string(theta) + ")"; title += " (k=" + to_string(k) + ", theta=" + to_string(theta) + ")";
} }
return model.graph(header); return model.graph(title);
} }
} }

View File

@@ -1,6 +1,5 @@
#ifndef KDB_H #ifndef KDB_H
#define KDB_H #define KDB_H
#include <torch/torch.h>
#include "Classifier.h" #include "Classifier.h"
#include "bayesnetUtils.h" #include "bayesnetUtils.h"
namespace bayesnet { namespace bayesnet {
@@ -12,12 +11,10 @@ namespace bayesnet {
float theta; float theta;
void add_m_edges(int idx, vector<int>& S, Tensor& weights); void add_m_edges(int idx, vector<int>& S, Tensor& weights);
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
explicit KDB(int k, float theta = 0.03); explicit KDB(int k, float theta = 0.03);
virtual ~KDB() {}; vector<string> graph(string name = "KDB") override;
void setHyperparameters(nlohmann::json& hyperparameters) override;
vector<string> graph(const string& name = "KDB") const override;
}; };
} }
#endif #endif

View File

@@ -1,30 +0,0 @@
#include "KDBLd.h"
namespace bayesnet {
using namespace std;
KDBLd::KDBLd(int k) : KDB(k), Proposal(dataset, features, className) {}
KDBLd& KDBLd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal KDB structure, KDB::fit initializes the base Bayesian network
KDB::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
Tensor KDBLd::predict(Tensor& X)
{
auto Xt = prepareX(X);
return KDB::predict(Xt);
}
vector<string> KDBLd::graph(const string& name) const
{
return KDB::graph(name);
}
}

View File

@@ -1,19 +0,0 @@
#ifndef KDBLD_H
#define KDBLD_H
#include "KDB.h"
#include "Proposal.h"
namespace bayesnet {
using namespace std;
class KDBLd : public KDB, public Proposal {
private:
public:
explicit KDBLd(int k);
virtual ~KDBLd() = default;
KDBLd& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
vector<string> graph(const string& name = "KDB") const override;
Tensor predict(Tensor& X) override;
static inline string version() { return "0.0.1"; };
};
}
#endif // !KDBLD_H

View File

@@ -7,7 +7,7 @@
namespace bayesnet { namespace bayesnet {
using namespace std; using namespace std;
Graph::Graph(int V) : V(V), parent(vector<int>(V)) Graph::Graph(int V) : V(V), parent{ vector<int>(V) }
{ {
for (int i = 0; i < V; i++) for (int i = 0; i < V; i++)
parent[i] = i; parent[i] = i;
@@ -33,9 +33,10 @@ namespace bayesnet {
} }
void Graph::kruskal_algorithm() void Graph::kruskal_algorithm()
{ {
int i;
// sort the edges ordered on decreasing weight // sort the edges ordered on decreasing weight
sort(G.begin(), G.end(), [](const auto& left, const auto& right) {return left.first > right.first;}); sort(G.begin(), G.end(), [](const auto& left, const auto& right) {return left.first > right.first;});
for (int i = 0; i < G.size(); i++) { for (i = 0; i < G.size(); i++) {
int uSt, vEd; int uSt, vEd;
uSt = find_set(G[i].second.first); uSt = find_set(G[i].second.first);
vEd = find_set(G[i].second.second); vEd = find_set(G[i].second.second);
@@ -94,7 +95,7 @@ namespace bayesnet {
return result; return result;
} }
MST::MST(const vector<string>& features, const Tensor& weights, const int root) : features(features), weights(weights), root(root) {} MST::MST(vector<string>& features, Tensor& weights, int root) : features(features), weights(weights), root(root) {}
vector<pair<int, int>> MST::maximumSpanningTree() vector<pair<int, int>> MST::maximumSpanningTree()
{ {
auto num_features = features.size(); auto num_features = features.size();

View File

@@ -10,10 +10,9 @@ namespace bayesnet {
private: private:
Tensor weights; Tensor weights;
vector<string> features; vector<string> features;
int root = 0; int root;
public: public:
MST() = default; MST(vector<string>& features, Tensor& weights, int root);
MST(const vector<string>& features, const Tensor& weights, const int root);
vector<pair<int, int>> maximumSpanningTree(); vector<pair<int, int>> maximumSpanningTree();
}; };
class Graph { class Graph {

View File

@@ -3,24 +3,15 @@
#include "Network.h" #include "Network.h"
#include "bayesnetUtils.h" #include "bayesnetUtils.h"
namespace bayesnet { namespace bayesnet {
Network::Network() : features(vector<string>()), className(""), classNumStates(0), fitted(false), laplaceSmoothing(0) {} Network::Network() : laplaceSmoothing(1), features(vector<string>()), className(""), classNumStates(0), maxThreads(0.8), fitted(false) {}
Network::Network(float maxT) : features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false), laplaceSmoothing(0) {} Network::Network(const float maxT) : laplaceSmoothing(1), features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
Network::Network(Network& other) : laplaceSmoothing(other.laplaceSmoothing), features(other.features), className(other.className), classNumStates(other.getClassNumStates()), maxThreads(other. Network::Network(const float maxT, const int smoothing) : laplaceSmoothing(smoothing), features(vector<string>()), className(""), classNumStates(0), maxThreads(maxT), fitted(false) {}
getmaxThreads()), fitted(other.fitted) Network::Network(const Network& other) : laplaceSmoothing(other.laplaceSmoothing), features(other.features), className(other.className), classNumStates(other.classNumStates), maxThreads(other.maxThreads), fitted(other.fitted)
{ {
for (const auto& pair : other.nodes) { for (const auto& pair : other.nodes) {
nodes[pair.first] = std::make_unique<Node>(*pair.second); nodes[pair.first] = std::make_unique<Node>(*pair.second);
} }
} }
void Network::initialize()
{
features = vector<string>();
className = "";
classNumStates = 0;
fitted = false;
nodes.clear();
samples = torch::Tensor();
}
float Network::getmaxThreads() float Network::getmaxThreads()
{ {
return maxThreads; return maxThreads;
@@ -29,28 +20,28 @@ namespace bayesnet {
{ {
return samples; return samples;
} }
void Network::addNode(const string& name) void Network::addNode(const string& name, const int numStates)
{ {
if (name == "") {
throw invalid_argument("Node name cannot be empty");
}
if (nodes.find(name) != nodes.end()) {
return;
}
if (find(features.begin(), features.end(), name) == features.end()) { if (find(features.begin(), features.end(), name) == features.end()) {
features.push_back(name); features.push_back(name);
} }
nodes[name] = std::make_unique<Node>(name); if (nodes.find(name) != nodes.end()) {
// if node exists update its number of states and remove parents, children and CPT
nodes[name]->clear();
nodes[name]->setNumStates(numStates);
return;
}
nodes[name] = std::make_unique<Node>(name, numStates);
} }
vector<string> Network::getFeatures() const vector<string> Network::getFeatures()
{ {
return features; return features;
} }
int Network::getClassNumStates() const const int Network::getClassNumStates()
{ {
return classNumStates; return classNumStates;
} }
int Network::getStates() const const int Network::getStates()
{ {
int result = 0; int result = 0;
for (auto& node : nodes) { for (auto& node : nodes) {
@@ -58,7 +49,7 @@ namespace bayesnet {
} }
return result; return result;
} }
string Network::getClassName() const const string Network::getClassName()
{ {
return className; return className;
} }
@@ -103,115 +94,86 @@ namespace bayesnet {
{ {
return nodes; return nodes;
} }
void Network::checkFitData(int n_samples, int n_features, int n_samples_y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states, const torch::Tensor& weights) void Network::fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& featureNames, const string& className)
{ {
if (weights.size(0) != n_samples) { features = featureNames;
throw invalid_argument("Weights (" + to_string(weights.size(0)) + ") must have the same number of elements as samples (" + to_string(n_samples) + ") in Network::fit");
}
if (n_samples != n_samples_y) {
throw invalid_argument("X and y must have the same number of samples in Network::fit (" + to_string(n_samples) + " != " + to_string(n_samples_y) + ")");
}
if (n_features != featureNames.size()) {
throw invalid_argument("X and features must have the same number of features in Network::fit (" + to_string(n_features) + " != " + to_string(featureNames.size()) + ")");
}
if (n_features != features.size() - 1) {
throw invalid_argument("X and local features must have the same number of features in Network::fit (" + to_string(n_features) + " != " + to_string(features.size() - 1) + ")");
}
if (find(features.begin(), features.end(), className) == features.end()) {
throw invalid_argument("className not found in Network::features");
}
for (auto& feature : featureNames) {
if (find(features.begin(), features.end(), feature) == features.end()) {
throw invalid_argument("Feature " + feature + " not found in Network::features");
}
if (states.find(feature) == states.end()) {
throw invalid_argument("Feature " + feature + " not found in states");
}
}
}
void Network::setStates(const map<string, vector<int>>& states)
{
// Set states to every Node in the network
for (int i = 0; i < features.size(); ++i) {
nodes[features[i]]->setNumStates(states.at(features[i]).size());
}
classNumStates = nodes[className]->getNumStates();
}
// X comes in nxm, where n is the number of features and m the number of samples
void Network::fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states)
{
checkFitData(X.size(1), X.size(0), y.size(0), featureNames, className, states, weights);
this->className = className; this->className = className;
Tensor ytmp = torch::transpose(y.view({ y.size(0), 1 }), 0, 1); dataset.clear();
samples = torch::cat({ X , ytmp }, 0); // Specific part
classNumStates = torch::max(y).item<int>() + 1;
samples = torch::cat({ X, y.view({ y.size(0), 1 }) }, 1);
for (int i = 0; i < featureNames.size(); ++i) { for (int i = 0; i < featureNames.size(); ++i) {
auto row_feature = X.index({ i, "..." }); auto column = torch::flatten(X.index({ "...", i }));
auto k = vector<int>();
for (auto z = 0; z < X.size(0); ++z) {
k.push_back(column[z].item<int>());
}
dataset[featureNames[i]] = k;
} }
completeFit(states, weights); dataset[className] = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
completeFit();
} }
void Network::fit(const torch::Tensor& samples, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states) void Network::fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<string>& featureNames, const string& className)
{ {
checkFitData(samples.size(1), samples.size(0) - 1, samples.size(1), featureNames, className, states, weights); features = featureNames;
this->className = className; this->className = className;
this->samples = samples; dataset.clear();
completeFit(states, weights); // Specific part
} classNumStates = *max_element(labels.begin(), labels.end()) + 1;
// input_data comes in nxm, where n is the number of features and m the number of samples // Build dataset & tensor of samples
void Network::fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<float>& weights_, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states) samples = torch::zeros({ static_cast<int>(input_data[0].size()), static_cast<int>(input_data.size() + 1) }, torch::kInt32);
{
const torch::Tensor weights = torch::tensor(weights_, torch::kFloat64);
checkFitData(input_data[0].size(), input_data.size(), labels.size(), featureNames, className, states, weights);
this->className = className;
// Build tensor of samples (nxm) (n+1 because of the class)
samples = torch::zeros({ static_cast<int>(input_data.size() + 1), static_cast<int>(input_data[0].size()) }, torch::kInt32);
for (int i = 0; i < featureNames.size(); ++i) { for (int i = 0; i < featureNames.size(); ++i) {
samples.index_put_({ i, "..." }, torch::tensor(input_data[i], torch::kInt32)); dataset[featureNames[i]] = input_data[i];
samples.index_put_({ "...", i }, torch::tensor(input_data[i], torch::kInt32));
} }
samples.index_put_({ -1, "..." }, torch::tensor(labels, torch::kInt32)); dataset[className] = labels;
completeFit(states, weights); samples.index_put_({ "...", -1 }, torch::tensor(labels, torch::kInt32));
completeFit();
} }
void Network::completeFit(const map<string, vector<int>>& states, const torch::Tensor& weights) void Network::completeFit()
{ {
setStates(states);
laplaceSmoothing = 1.0 / samples.size(1); // To use in CPT computation int maxThreadsRunning = static_cast<int>(std::thread::hardware_concurrency() * maxThreads);
for (auto& node : nodes) { if (maxThreadsRunning < 1) {
node.second->computeCPT(samples, features, laplaceSmoothing, weights); maxThreadsRunning = 1;
fitted = true;
} }
} vector<thread> threads;
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba) mutex mtx;
{ condition_variable cv;
if (!fitted) { int activeThreads = 0;
throw logic_error("You must call fit() before calling predict()"); int nextNodeIndex = 0;
while (nextNodeIndex < nodes.size()) {
unique_lock<mutex> lock(mtx);
cv.wait(lock, [&activeThreads, &maxThreadsRunning]() { return activeThreads < maxThreadsRunning; });
if (nextNodeIndex >= nodes.size()) {
break; // No more work remaining
}
threads.emplace_back([this, &nextNodeIndex, &mtx, &cv, &activeThreads]() {
while (true) {
unique_lock<mutex> lock(mtx);
if (nextNodeIndex >= nodes.size()) {
break; // No more work remaining
}
auto& pair = *std::next(nodes.begin(), nextNodeIndex);
++nextNodeIndex;
lock.unlock();
pair.second->computeCPT(dataset, laplaceSmoothing);
lock.lock();
nodes[pair.first] = std::move(pair.second);
lock.unlock();
}
lock_guard<mutex> lock(mtx);
--activeThreads;
cv.notify_one();
});
++activeThreads;
} }
torch::Tensor result; for (auto& thread : threads) {
result = torch::zeros({ samples.size(1), classNumStates }, torch::kFloat64); thread.join();
for (int i = 0; i < samples.size(1); ++i) {
const Tensor sample = samples.index({ "...", i });
auto psample = predict_sample(sample);
auto temp = torch::tensor(psample, torch::kFloat64);
// result.index_put_({ i, "..." }, torch::tensor(predict_sample(sample), torch::kFloat64));
result.index_put_({ i, "..." }, temp);
} }
if (proba) fitted = true;
return result;
else
return result.argmax(1);
}
// Return mxn tensor of probabilities
Tensor Network::predict_proba(const Tensor& samples)
{
return predict_tensor(samples, true);
} }
// Return mxn tensor of probabilities
Tensor Network::predict(const Tensor& samples)
{
return predict_tensor(samples, false);
}
// Return mx1 vector of predictions
// tsamples is nxm vector of samples
vector<int> Network::predict(const vector<vector<int>>& tsamples) vector<int> Network::predict(const vector<vector<int>>& tsamples)
{ {
if (!fitted) { if (!fitted) {
@@ -232,7 +194,6 @@ namespace bayesnet {
} }
return predictions; return predictions;
} }
// Return mxn vector of probabilities
vector<vector<double>> Network::predict_proba(const vector<vector<int>>& tsamples) vector<vector<double>> Network::predict_proba(const vector<vector<int>>& tsamples)
{ {
if (!fitted) { if (!fitted) {
@@ -260,13 +221,12 @@ namespace bayesnet {
} }
return (double)correct / y_pred.size(); return (double)correct / y_pred.size();
} }
// Return 1xn vector of probabilities
vector<double> Network::predict_sample(const vector<int>& sample) vector<double> Network::predict_sample(const vector<int>& sample)
{ {
// Ensure the sample size is equal to the number of features // Ensure the sample size is equal to the number of features
if (sample.size() != features.size() - 1) { if (sample.size() != features.size()) {
throw invalid_argument("Sample size (" + to_string(sample.size()) + throw invalid_argument("Sample size (" + to_string(sample.size()) +
") does not match the number of features (" + to_string(features.size() - 1) + ")"); ") does not match the number of features (" + to_string(features.size()) + ")");
} }
map<string, int> evidence; map<string, int> evidence;
for (int i = 0; i < sample.size(); ++i) { for (int i = 0; i < sample.size(); ++i) {
@@ -274,20 +234,6 @@ namespace bayesnet {
} }
return exactInference(evidence); return exactInference(evidence);
} }
// Return 1xn vector of probabilities
vector<double> Network::predict_sample(const Tensor& sample)
{
// Ensure the sample size is equal to the number of features
if (sample.size(0) != features.size() - 1) {
throw invalid_argument("Sample size (" + to_string(sample.size(0)) +
") does not match the number of features (" + to_string(features.size() - 1) + ")");
}
map<string, int> evidence;
for (int i = 0; i < sample.size(0); ++i) {
evidence[features[i]] = sample[i].item<int>();
}
return exactInference(evidence);
}
double Network::computeFactor(map<string, int>& completeEvidence) double Network::computeFactor(map<string, int>& completeEvidence)
{ {
double result = 1.0; double result = 1.0;
@@ -313,12 +259,13 @@ namespace bayesnet {
for (auto& thread : threads) { for (auto& thread : threads) {
thread.join(); thread.join();
} }
// Normalize result // Normalize result
double sum = accumulate(result.begin(), result.end(), 0.0); double sum = accumulate(result.begin(), result.end(), 0.0);
transform(result.begin(), result.end(), result.begin(), [sum](const double& value) { return value / sum; }); transform(result.begin(), result.end(), result.begin(), [sum](double x) { return x / sum; });
return result; return result;
} }
vector<string> Network::show() const vector<string> Network::show()
{ {
vector<string> result; vector<string> result;
// Draw the network // Draw the network
@@ -331,7 +278,7 @@ namespace bayesnet {
} }
return result; return result;
} }
vector<string> Network::graph(const string& title) const vector<string> Network::graph(const string& title)
{ {
auto output = vector<string>(); auto output = vector<string>();
auto prefix = "digraph BayesNet {\nlabel=<BayesNet "; auto prefix = "digraph BayesNet {\nlabel=<BayesNet ";
@@ -345,7 +292,7 @@ namespace bayesnet {
output.push_back("}\n"); output.push_back("}\n");
return output; return output;
} }
vector<pair<string, string>> Network::getEdges() const vector<pair<string, string>> Network::getEdges()
{ {
auto edges = vector<pair<string, string>>(); auto edges = vector<pair<string, string>>();
for (const auto& node : nodes) { for (const auto& node : nodes) {
@@ -357,52 +304,4 @@ namespace bayesnet {
} }
return edges; return edges;
} }
int Network::getNumEdges() const
{
return getEdges().size();
}
vector<string> Network::topological_sort()
{
/* Check if al the fathers of every node are before the node */
auto result = features;
result.erase(remove(result.begin(), result.end(), className), result.end());
bool ending{ false };
while (!ending) {
ending = true;
for (auto feature : features) {
auto fathers = nodes[feature]->getParents();
for (const auto& father : fathers) {
auto fatherName = father->getName();
if (fatherName == className) {
continue;
}
// Check if father is placed before the actual feature
auto it = find(result.begin(), result.end(), fatherName);
if (it != result.end()) {
auto it2 = find(result.begin(), result.end(), feature);
if (it2 != result.end()) {
if (distance(it, it2) < 0) {
// if it is not, insert it before the feature
result.erase(remove(result.begin(), result.end(), fatherName), result.end());
result.insert(it2, fatherName);
ending = false;
}
} else {
throw logic_error("Error in topological sort because of node " + feature + " is not in result");
}
} else {
throw logic_error("Error in topological sort because of node father " + fatherName + " is not in result");
}
}
}
}
return result;
}
void Network::dump_cpt() const
{
for (auto& node : nodes) {
cout << "* " << node.first << ": (" << node.second->getNumStates() << ") : " << node.second->getCPT().sizes() << endl;
cout << node.second->getCPT() << endl;
}
}
} }

View File

@@ -8,52 +8,48 @@ namespace bayesnet {
class Network { class Network {
private: private:
map<string, unique_ptr<Node>> nodes; map<string, unique_ptr<Node>> nodes;
map<string, vector<int>> dataset;
bool fitted; bool fitted;
float maxThreads = 0.95; float maxThreads;
int classNumStates; int classNumStates;
vector<string> features; // Including classname vector<string> features;
string className; string className;
double laplaceSmoothing; int laplaceSmoothing;
torch::Tensor samples; // nxm tensor used to fit the model torch::Tensor samples;
bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&); bool isCyclic(const std::string&, std::unordered_set<std::string>&, std::unordered_set<std::string>&);
vector<double> predict_sample(const vector<int>&); vector<double> predict_sample(const vector<int>&);
vector<double> predict_sample(const torch::Tensor&);
vector<double> exactInference(map<string, int>&); vector<double> exactInference(map<string, int>&);
double computeFactor(map<string, int>&); double computeFactor(map<string, int>&);
void completeFit(const map<string, vector<int>>& states, const torch::Tensor& weights); double mutual_info(torch::Tensor&, torch::Tensor&);
void checkFitData(int n_features, int n_samples, int n_samples_y, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states, const torch::Tensor& weights); double entropy(torch::Tensor&);
void setStates(const map<string, vector<int>>&); double conditionalEntropy(torch::Tensor&, torch::Tensor&);
double mutualInformation(torch::Tensor&, torch::Tensor&);
void completeFit();
public: public:
Network(); Network();
explicit Network(float); explicit Network(const float, const int);
explicit Network(Network&); explicit Network(const float);
~Network() = default; explicit Network(const Network&);
torch::Tensor& getSamples(); torch::Tensor& getSamples();
float getmaxThreads(); float getmaxThreads();
void addNode(const string&); void addNode(const string&, const int);
void addEdge(const string&, const string&); void addEdge(const string&, const string&);
map<string, std::unique_ptr<Node>>& getNodes(); map<string, std::unique_ptr<Node>>& getNodes();
vector<string> getFeatures() const; vector<string> getFeatures();
int getStates() const; const int getStates();
vector<pair<string, string>> getEdges() const; vector<pair<string, string>> getEdges();
int getNumEdges() const; const int getClassNumStates();
int getClassNumStates() const; const string getClassName();
string getClassName() const; void fit(const vector<vector<int>>&, const vector<int>&, const vector<string>&, const string&);
void fit(const vector<vector<int>>& input_data, const vector<int>& labels, const vector<float>& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states); void fit(torch::Tensor&, torch::Tensor&, const vector<string>&, const string&);
void fit(const torch::Tensor& X, const torch::Tensor& y, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states); vector<int> predict(const vector<vector<int>>&);
void fit(const torch::Tensor& samples, const torch::Tensor& weights, const vector<string>& featureNames, const string& className, const map<string, vector<int>>& states); //Computes the conditional edge weight of variable index u and v conditioned on class_node
vector<int> predict(const vector<vector<int>>&); // Return mx1 vector of predictions torch::Tensor conditionalEdgeWeight();
torch::Tensor predict(const torch::Tensor&); // Return mx1 tensor of predictions vector<vector<double>> predict_proba(const vector<vector<int>>&);
torch::Tensor predict_tensor(const torch::Tensor& samples, const bool proba);
vector<vector<double>> predict_proba(const vector<vector<int>>&); // Return mxn vector of probabilities
torch::Tensor predict_proba(const torch::Tensor&); // Return mxn tensor of probabilities
double score(const vector<vector<int>>&, const vector<int>&); double score(const vector<vector<int>>&, const vector<int>&);
vector<string> topological_sort(); vector<string> show();
vector<string> show() const; vector<string> graph(const string& title); // Returns a vector of strings representing the graph in graphviz format
vector<string> graph(const string& title) const; // Returns a vector of strings representing the graph in graphviz format inline string version() { return "0.1.0"; }
void initialize();
void dump_cpt() const;
inline string version() { return "0.2.0"; }
}; };
} }
#endif #endif

View File

@@ -2,8 +2,8 @@
namespace bayesnet { namespace bayesnet {
Node::Node(const std::string& name) Node::Node(const std::string& name, int numStates)
: name(name), numStates(0), cpTable(torch::Tensor()), parents(vector<Node*>()), children(vector<Node*>()) : name(name), numStates(numStates), cpTable(torch::Tensor()), parents(vector<Node*>()), children(vector<Node*>())
{ {
} }
void Node::clear() void Node::clear()
@@ -84,44 +84,30 @@ namespace bayesnet {
} }
return result; return result;
} }
void Node::computeCPT(const torch::Tensor& dataset, const vector<string>& features, const double laplaceSmoothing, const torch::Tensor& weights) void Node::computeCPT(map<string, vector<int>>& dataset, const int laplaceSmoothing)
{ {
dimensions.clear();
// Get dimensions of the CPT // Get dimensions of the CPT
dimensions.push_back(numStates); dimensions.push_back(numStates);
transform(parents.begin(), parents.end(), back_inserter(dimensions), [](const auto& parent) { return parent->getNumStates(); }); transform(parents.begin(), parents.end(), back_inserter(dimensions), [](Node* parent) { return parent->getNumStates(); });
// Create a tensor of zeros with the dimensions of the CPT // Create a tensor of zeros with the dimensions of the CPT
cpTable = torch::zeros(dimensions, torch::kFloat) + laplaceSmoothing; cpTable = torch::zeros(dimensions, torch::kFloat) + laplaceSmoothing;
// Fill table with counts // Fill table with counts
auto pos = find(features.begin(), features.end(), name); for (int n_sample = 0; n_sample < dataset[name].size(); ++n_sample) {
if (pos == features.end()) { torch::List<c10::optional<torch::Tensor>> coordinates;
throw logic_error("Feature " + name + " not found in dataset"); coordinates.push_back(torch::tensor(dataset[name][n_sample]));
} transform(parents.begin(), parents.end(), back_inserter(coordinates), [&dataset, &n_sample](Node* parent) { return torch::tensor(dataset[parent->getName()][n_sample]); });
int name_index = pos - features.begin();
for (int n_sample = 0; n_sample < dataset.size(1); ++n_sample) {
c10::List<c10::optional<at::Tensor>> coordinates;
coordinates.push_back(dataset.index({ name_index, n_sample }));
for (auto parent : parents) {
pos = find(features.begin(), features.end(), parent->getName());
if (pos == features.end()) {
throw logic_error("Feature parent " + parent->getName() + " not found in dataset");
}
int parent_index = pos - features.begin();
coordinates.push_back(dataset.index({ parent_index, n_sample }));
}
// Increment the count of the corresponding coordinate // Increment the count of the corresponding coordinate
cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + weights.index({ n_sample }).item<double>()); cpTable.index_put_({ coordinates }, cpTable.index({ coordinates }) + 1);
} }
// Normalize the counts // Normalize the counts
cpTable = cpTable / cpTable.sum(0); cpTable = cpTable / cpTable.sum(0);
} }
float Node::getFactorValue(map<string, int>& evidence) float Node::getFactorValue(map<string, int>& evidence)
{ {
c10::List<c10::optional<at::Tensor>> coordinates; torch::List<c10::optional<torch::Tensor>> coordinates;
// following predetermined order of indices in the cpTable (see Node.h) // following predetermined order of indices in the cpTable (see Node.h)
coordinates.push_back(at::tensor(evidence[name])); coordinates.push_back(torch::tensor(evidence[name]));
transform(parents.begin(), parents.end(), back_inserter(coordinates), [&evidence](const auto& parent) { return at::tensor(evidence[parent->getName()]); }); transform(parents.begin(), parents.end(), back_inserter(coordinates), [&evidence](Node* parent) { return torch::tensor(evidence[parent->getName()]); });
return cpTable.index({ coordinates }).item<float>(); return cpTable.index({ coordinates }).item<float>();
} }
vector<string> Node::graph(const string& className) vector<string> Node::graph(const string& className)
@@ -129,7 +115,7 @@ namespace bayesnet {
auto output = vector<string>(); auto output = vector<string>();
auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : ""; auto suffix = name == className ? ", fontcolor=red, fillcolor=lightblue, style=filled " : "";
output.push_back(name + " [shape=circle" + suffix + "] \n"); output.push_back(name + " [shape=circle" + suffix + "] \n");
transform(children.begin(), children.end(), back_inserter(output), [this](const auto& child) { return name + " -> " + child->getName(); }); transform(children.begin(), children.end(), back_inserter(output), [this](Node* child) { return name + " -> " + child->getName() + "\n"; });
return output; return output;
} }
} }

View File

@@ -16,7 +16,7 @@ namespace bayesnet {
vector<int64_t> dimensions; // dimensions of the cpTable vector<int64_t> dimensions; // dimensions of the cpTable
public: public:
vector<pair<string, string>> combinations(const vector<string>&); vector<pair<string, string>> combinations(const vector<string>&);
explicit Node(const string&); Node(const std::string&, int);
void clear(); void clear();
void addParent(Node*); void addParent(Node*);
void addChild(Node*); void addChild(Node*);
@@ -26,7 +26,7 @@ namespace bayesnet {
vector<Node*>& getParents(); vector<Node*>& getParents();
vector<Node*>& getChildren(); vector<Node*>& getChildren();
torch::Tensor& getCPT(); torch::Tensor& getCPT();
void computeCPT(const torch::Tensor& dataset, const vector<string>& features, const double laplaceSmoothing, const torch::Tensor& weights); void computeCPT(map<string, vector<int>>&, const int);
int getNumStates() const; int getNumStates() const;
void setNumStates(int); void setNumStates(int);
unsigned minFill(); unsigned minFill();

View File

@@ -1,110 +0,0 @@
#include "Proposal.h"
#include "ArffFiles.h"
namespace bayesnet {
Proposal::Proposal(torch::Tensor& dataset_, vector<string>& features_, string& className_) : pDataset(dataset_), pFeatures(features_), pClassName(className_) {}
Proposal::~Proposal()
{
for (auto& [key, value] : discretizers) {
delete value;
}
}
void Proposal::checkInput(const torch::Tensor& X, const torch::Tensor& y)
{
if (!torch::is_floating_point(X)) {
throw std::invalid_argument("X must be a floating point tensor");
}
if (torch::is_floating_point(y)) {
throw std::invalid_argument("y must be an integer tensor");
}
}
map<string, vector<int>> Proposal::localDiscretizationProposal(const map<string, vector<int>>& oldStates, Network& model)
{
// order of local discretization is important. no good 0, 1, 2...
// although we rediscretize features after the local discretization of every feature
auto order = model.topological_sort();
auto& nodes = model.getNodes();
map<string, vector<int>> states = oldStates;
vector<int> indicesToReDiscretize;
bool upgrade = false; // Flag to check if we need to upgrade the model
for (auto feature : order) {
auto nodeParents = nodes[feature]->getParents();
if (nodeParents.size() < 2) continue; // Only has class as parent
upgrade = true;
int index = find(pFeatures.begin(), pFeatures.end(), feature) - pFeatures.begin();
indicesToReDiscretize.push_back(index); // We need to re-discretize this feature
vector<string> parents;
transform(nodeParents.begin(), nodeParents.end(), back_inserter(parents), [](const auto& p) { return p->getName(); });
// Remove class as parent as it will be added later
parents.erase(remove(parents.begin(), parents.end(), pClassName), parents.end());
// Get the indices of the parents
vector<int> indices;
indices.push_back(-1); // Add class index
transform(parents.begin(), parents.end(), back_inserter(indices), [&](const auto& p) {return find(pFeatures.begin(), pFeatures.end(), p) - pFeatures.begin(); });
// Now we fit the discretizer of the feature, conditioned on its parents and the class i.e. discretizer.fit(X[index], X[indices] + y)
vector<string> yJoinParents(Xf.size(1));
for (auto idx : indices) {
for (int i = 0; i < Xf.size(1); ++i) {
yJoinParents[i] += to_string(pDataset.index({ idx, i }).item<int>());
}
}
auto arff = ArffFiles();
auto yxv = arff.factorize(yJoinParents);
auto xvf_ptr = Xf.index({ index }).data_ptr<float>();
auto xvf = vector<mdlp::precision_t>(xvf_ptr, xvf_ptr + Xf.size(1));
discretizers[feature]->fit(xvf, yxv);
}
if (upgrade) {
// Discretize again X (only the affected indices) with the new fitted discretizers
for (auto index : indicesToReDiscretize) {
auto Xt_ptr = Xf.index({ index }).data_ptr<float>();
auto Xt = vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
pDataset.index_put_({ index, "..." }, torch::tensor(discretizers[pFeatures[index]]->transform(Xt)));
auto xStates = vector<int>(discretizers[pFeatures[index]]->getCutPoints().size() + 1);
iota(xStates.begin(), xStates.end(), 0);
//Update new states of the feature/node
states[pFeatures[index]] = xStates;
}
const torch::Tensor weights = torch::full({ pDataset.size(1) }, 1.0 / pDataset.size(1), torch::kDouble);
model.fit(pDataset, weights, pFeatures, pClassName, states);
}
return states;
}
map<string, vector<int>> Proposal::fit_local_discretization(const torch::Tensor& y)
{
// Discretize the continuous input data and build pDataset (Classifier::dataset)
int m = Xf.size(1);
int n = Xf.size(0);
map<string, vector<int>> states;
pDataset = torch::zeros({ n + 1, m }, kInt32);
auto yv = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + y.size(0));
// discretize input data by feature(row)
for (auto i = 0; i < pFeatures.size(); ++i) {
auto* discretizer = new mdlp::CPPFImdlp();
auto Xt_ptr = Xf.index({ i }).data_ptr<float>();
auto Xt = vector<float>(Xt_ptr, Xt_ptr + Xf.size(1));
discretizer->fit(Xt, yv);
pDataset.index_put_({ i, "..." }, torch::tensor(discretizer->transform(Xt)));
auto xStates = vector<int>(discretizer->getCutPoints().size() + 1);
iota(xStates.begin(), xStates.end(), 0);
states[pFeatures[i]] = xStates;
discretizers[pFeatures[i]] = discretizer;
}
int n_classes = torch::max(y).item<int>() + 1;
auto yStates = vector<int>(n_classes);
iota(yStates.begin(), yStates.end(), 0);
states[pClassName] = yStates;
pDataset.index_put_({ n, "..." }, y);
return states;
}
torch::Tensor Proposal::prepareX(torch::Tensor& X)
{
auto Xtd = torch::zeros_like(X, torch::kInt32);
for (int i = 0; i < X.size(0); ++i) {
auto Xt = vector<float>(X[i].data_ptr<float>(), X[i].data_ptr<float>() + X.size(1));
auto Xd = discretizers[pFeatures[i]]->transform(Xt);
Xtd.index_put_({ i }, torch::tensor(Xd, torch::kInt32));
}
return Xtd;
}
}

View File

@@ -1,30 +0,0 @@
#ifndef PROPOSAL_H
#define PROPOSAL_H
#include <string>
#include <map>
#include <torch/torch.h>
#include "Network.h"
#include "CPPFImdlp.h"
#include "Classifier.h"
namespace bayesnet {
class Proposal {
public:
Proposal(torch::Tensor& pDataset, vector<string>& features_, string& className_);
virtual ~Proposal();
protected:
void checkInput(const torch::Tensor& X, const torch::Tensor& y);
torch::Tensor prepareX(torch::Tensor& X);
map<string, vector<int>> localDiscretizationProposal(const map<string, vector<int>>& states, Network& model);
map<string, vector<int>> fit_local_discretization(const torch::Tensor& y);
torch::Tensor Xf; // X continuous nxm tensor
torch::Tensor y; // y discrete nx1 tensor
map<string, mdlp::CPPFImdlp*> discretizers;
private:
torch::Tensor& pDataset; // (n+1)xm tensor
vector<string>& pFeatures;
string& pClassName;
};
}
#endif

View File

@@ -4,7 +4,7 @@ namespace bayesnet {
SPODE::SPODE(int root) : Classifier(Network()), root(root) {} SPODE::SPODE(int root) : Classifier(Network()), root(root) {}
void SPODE::buildModel(const torch::Tensor& weights) void SPODE::train()
{ {
// 0. Add all nodes to the model // 0. Add all nodes to the model
addNodes(); addNodes();
@@ -17,7 +17,7 @@ namespace bayesnet {
} }
} }
} }
vector<string> SPODE::graph(const string& name) const vector<string> SPODE::graph(string name )
{ {
return model.graph(name); return model.graph(name);
} }

View File

@@ -1,17 +1,15 @@
#ifndef SPODE_H #ifndef SPODE_H
#define SPODE_H #define SPODE_H
#include "Classifier.h" #include "Classifier.h"
namespace bayesnet { namespace bayesnet {
class SPODE : public Classifier { class SPODE : public Classifier {
private: private:
int root; int root;
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
explicit SPODE(int root); explicit SPODE(int root);
virtual ~SPODE() {}; vector<string> graph(string name = "SPODE") override;
vector<string> graph(const string& name = "SPODE") const override;
}; };
} }
#endif #endif

View File

@@ -1,48 +0,0 @@
#include "SPODELd.h"
namespace bayesnet {
using namespace std;
SPODELd::SPODELd(int root) : SPODE(root), Proposal(dataset, features, className) {}
SPODELd& SPODELd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
SPODE::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
SPODELd& SPODELd::fit(torch::Tensor& dataset, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
{
if (!torch::is_floating_point(dataset)) {
throw std::runtime_error("Dataset must be a floating point tensor");
}
Xf = dataset.index({ torch::indexing::Slice(0, dataset.size(0) - 1), "..." }).clone();
y = dataset.index({ -1, "..." }).clone();
features = features_;
className = className_;
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal SPODE structure, SPODE::fit initializes the base Bayesian network
SPODE::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
Tensor SPODELd::predict(Tensor& X)
{
auto Xt = prepareX(X);
return SPODE::predict(Xt);
}
vector<string> SPODELd::graph(const string& name) const
{
return SPODE::graph(name);
}
}

View File

@@ -1,19 +0,0 @@
#ifndef SPODELD_H
#define SPODELD_H
#include "SPODE.h"
#include "Proposal.h"
namespace bayesnet {
using namespace std;
class SPODELd : public SPODE, public Proposal {
public:
explicit SPODELd(int root);
virtual ~SPODELd() = default;
SPODELd& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
SPODELd& fit(torch::Tensor& dataset, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
vector<string> graph(const string& name = "SPODE") const override;
Tensor predict(Tensor& X) override;
static inline string version() { return "0.0.1"; };
};
}
#endif // !SPODELD_H

View File

@@ -5,25 +5,25 @@ namespace bayesnet {
TAN::TAN() : Classifier(Network()) {} TAN::TAN() : Classifier(Network()) {}
void TAN::buildModel(const torch::Tensor& weights) void TAN::train()
{ {
// 0. Add all nodes to the model // 0. Add all nodes to the model
addNodes(); addNodes();
// 1. Compute mutual information between each feature and the class and set the root node // 1. Compute mutual information between each feature and the class and set the root node
// as the highest mutual information with the class // as the highest mutual information with the class
auto mi = vector <pair<int, float >>(); auto mi = vector <pair<int, float >>();
Tensor class_dataset = dataset.index({ -1, "..." }); Tensor class_dataset = dataset.index({ "...", -1 });
for (int i = 0; i < static_cast<int>(features.size()); ++i) { for (int i = 0; i < static_cast<int>(features.size()); ++i) {
Tensor feature_dataset = dataset.index({ i, "..." }); Tensor feature_dataset = dataset.index({ "...", i });
auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset, weights); auto mi_value = metrics.mutualInformation(class_dataset, feature_dataset);
mi.push_back({ i, mi_value }); mi.push_back({ i, mi_value });
} }
sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;}); sort(mi.begin(), mi.end(), [](const auto& left, const auto& right) {return left.second < right.second;});
auto root = mi[mi.size() - 1].first; auto root = mi[mi.size() - 1].first;
// 2. Compute mutual information between each feature and the class // 2. Compute mutual information between each feature and the class
auto weights_matrix = metrics.conditionalEdge(weights); auto weights = metrics.conditionalEdge();
// 3. Compute the maximum spanning tree // 3. Compute the maximum spanning tree
auto mst = metrics.maximumSpanningTree(features, weights_matrix, root); auto mst = metrics.maximumSpanningTree(features, weights, root);
// 4. Add edges from the maximum spanning tree to the model // 4. Add edges from the maximum spanning tree to the model
for (auto i = 0; i < mst.size(); ++i) { for (auto i = 0; i < mst.size(); ++i) {
auto [from, to] = mst[i]; auto [from, to] = mst[i];
@@ -34,7 +34,7 @@ namespace bayesnet {
model.addEdge(className, feature); model.addEdge(className, feature);
} }
} }
vector<string> TAN::graph(const string& title) const vector<string> TAN::graph(string title)
{ {
return model.graph(title); return model.graph(title);
} }

View File

@@ -3,14 +3,14 @@
#include "Classifier.h" #include "Classifier.h"
namespace bayesnet { namespace bayesnet {
using namespace std; using namespace std;
using namespace torch;
class TAN : public Classifier { class TAN : public Classifier {
private: private:
protected: protected:
void buildModel(const torch::Tensor& weights) override; void train() override;
public: public:
TAN(); TAN();
virtual ~TAN() {}; vector<string> graph(string name = "TAN") override;
vector<string> graph(const string& name = "TAN") const override;
}; };
} }
#endif #endif

View File

@@ -1,31 +0,0 @@
#include "TANLd.h"
namespace bayesnet {
using namespace std;
TANLd::TANLd() : TAN(), Proposal(dataset, features, className) {}
TANLd& TANLd::fit(torch::Tensor& X_, torch::Tensor& y_, const vector<string>& features_, const string& className_, map<string, vector<int>>& states_)
{
checkInput(X_, y_);
features = features_;
className = className_;
Xf = X_;
y = y_;
// Fills vectors Xv & yv with the data from tensors X_ (discretized) & y
states = fit_local_discretization(y);
// We have discretized the input data
// 1st we need to fit the model to build the normal TAN structure, TAN::fit initializes the base Bayesian network
TAN::fit(dataset, features, className, states);
states = localDiscretizationProposal(states, model);
return *this;
}
Tensor TANLd::predict(Tensor& X)
{
auto Xt = prepareX(X);
return TAN::predict(Xt);
}
vector<string> TANLd::graph(const string& name) const
{
return TAN::graph(name);
}
}

View File

@@ -1,19 +0,0 @@
#ifndef TANLD_H
#define TANLD_H
#include "TAN.h"
#include "Proposal.h"
namespace bayesnet {
using namespace std;
class TANLd : public TAN, public Proposal {
private:
public:
TANLd();
virtual ~TANLd() = default;
TANLd& fit(torch::Tensor& X, torch::Tensor& y, const vector<string>& features, const string& className, map<string, vector<int>>& states) override;
vector<string> graph(const string& name = "TAN") const override;
Tensor predict(Tensor& X) override;
static inline string version() { return "0.0.1"; };
};
}
#endif // !TANLD_H

View File

@@ -3,8 +3,7 @@
namespace bayesnet { namespace bayesnet {
using namespace std; using namespace std;
using namespace torch; using namespace torch;
// Return the indices in descending order vector<int> argsort(vector<float>& nums)
vector<int> argsort(vector<double>& nums)
{ {
int n = nums.size(); int n = nums.size();
vector<int> indices(n); vector<int> indices(n);

View File

@@ -5,7 +5,7 @@
namespace bayesnet { namespace bayesnet {
using namespace std; using namespace std;
using namespace torch; using namespace torch;
vector<int> argsort(vector<double>& nums); vector<int> argsort(vector<float>& nums);
vector<vector<int>> tensorToVector(Tensor& tensor); vector<vector<int>> tensorToVector(Tensor& tensor);
} }
#endif //BAYESNET_UTILS_H #endif //BAYESNET_UTILS_H

View File

@@ -1,10 +0,0 @@
#ifndef BESTRESULT_H
#define BESTRESULT_H
#include <string>
class BestResult {
public:
static std::string title() { return "STree_default (linear-ovo)"; }
static double score() { return 22.109799; }
static std::string scoreName() { return "accuracy"; }
};
#endif

View File

@@ -4,9 +4,5 @@ include_directories(${BayesNet_SOURCE_DIR}/lib/Files)
include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp) include_directories(${BayesNet_SOURCE_DIR}/lib/mdlp)
include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include) include_directories(${BayesNet_SOURCE_DIR}/lib/argparse/include)
include_directories(${BayesNet_SOURCE_DIR}/lib/json/include) include_directories(${BayesNet_SOURCE_DIR}/lib/json/include)
add_executable(main main.cc Folding.cc platformUtils.cc Experiment.cc Datasets.cc Models.cc ReportConsole.cc ReportBase.cc) add_executable(main main.cc Folding.cc platformUtils.cc Experiment.cc Datasets.cc CrossValidation.cc Models.cc)
add_executable(manage manage.cc Results.cc ReportConsole.cc ReportExcel.cc ReportBase.cc) target_link_libraries(main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
add_executable(list list.cc platformUtils Datasets.cc)
target_link_libraries(main BayesNet ArffFiles mdlp "${TORCH_LIBRARIES}")
target_link_libraries(manage "${TORCH_LIBRARIES}" OpenXLSX::OpenXLSX)
target_link_libraries(list ArffFiles mdlp "${TORCH_LIBRARIES}")

View File

@@ -1,14 +0,0 @@
#ifndef COLORS_H
#define COLORS_H
class Colors {
public:
static std::string MAGENTA() { return "\033[1;35m"; }
static std::string BLUE() { return "\033[1;34m"; }
static std::string CYAN() { return "\033[1;36m"; }
static std::string GREEN() { return "\033[1;32m"; }
static std::string YELLOW() { return "\033[1;33m"; }
static std::string RED() { return "\033[1;31m"; }
static std::string WHITE() { return "\033[1;37m"; }
static std::string RESET() { return "\033[0m"; }
};
#endif // COLORS_H

View File

@@ -0,0 +1,79 @@
#include "CrossValidation.h"
#include "Models.h"
namespace platform {
using json = nlohmann::json;
using namespace std::chrono;
CrossValidation::CrossValidation(const string& modelName, bool stratified, int nfolds, const vector<int>& randomSeeds, platform::Datasets& datasets) : modelName(modelName), stratified(stratified), nfolds(nfolds), randomSeeds(randomSeeds), datasets(datasets)
{
}
Result CrossValidation::crossValidate(const string& fileName)
{
auto [Xt, y] = datasets.getTensors(fileName);
auto states = datasets.getStates(fileName);
auto className = datasets.getClassName(fileName);
auto features = datasets.getFeatures(fileName);
auto samples = datasets.getNSamples(fileName);
auto result = Result();
auto [values, counts] = at::_unique(y);
result.setSamples(Xt.size(1)).setFeatures(Xt.size(0)).setClasses(values.size(0));
int nSeeds = static_cast<int>(randomSeeds.size());
auto accuracy_test = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto accuracy_train = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto train_time = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto test_time = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto nodes = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto edges = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
auto num_states = torch::zeros({ nfolds * nSeeds }, torch::kFloat64);
Timer train_timer, test_timer;
int item = 0;
for (auto seed : randomSeeds) {
cout << "(" << seed << ") " << flush;
Fold* fold;
if (stratified)
fold = new StratifiedKFold(nfolds, y, seed);
else
fold = new KFold(nfolds, samples, seed);
cout << "Fold: " << flush;
for (int nfold = 0; nfold < nfolds; nfold++) {
bayesnet::BaseClassifier* model = Models::get(modelName);
result.setModelVersion(model->getVersion());
train_timer.start();
auto [train, test] = fold->getFold(nfold);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
auto X_train = Xt.index({ "...", train_t });
auto y_train = y.index({ train_t });
auto X_test = Xt.index({ "...", test_t });
auto y_test = y.index({ test_t });
cout << nfold + 1 << ", " << flush;
model->fit(X_train, y_train, features, className, states);
nodes[item] = model->getNumberOfNodes();
edges[item] = model->getNumberOfEdges();
num_states[item] = model->getNumberOfStates();
train_time[item] = train_timer.getDuration();
auto accuracy_train_value = model->score(X_train, y_train);
test_timer.start();
auto accuracy_test_value = model->score(X_test, y_test);
test_time[item] = test_timer.getDuration();
accuracy_train[item] = accuracy_train_value;
accuracy_test[item] = accuracy_test_value;
// Store results and times in vector
result.addScoreTrain(accuracy_train_value);
result.addScoreTest(accuracy_test_value);
result.addTimeTrain(train_time[item].item<double>());
result.addTimeTest(test_time[item].item<double>());
item++;
}
delete fold;
}
cout << "end." << endl;
result.setScoreTest(torch::mean(accuracy_test).item<double>()).setScoreTrain(torch::mean(accuracy_train).item<double>());
result.setScoreTestStd(torch::std(accuracy_test).item<double>()).setScoreTrainStd(torch::std(accuracy_train).item<double>());
result.setTrainTime(torch::mean(train_time).item<double>()).setTestTime(torch::mean(test_time).item<double>());
result.setNodes(torch::mean(nodes).item<double>()).setLeaves(torch::mean(edges).item<double>()).setDepth(torch::mean(num_states).item<double>());
return result;
}
} // namespace platform

View File

@@ -0,0 +1,25 @@
#ifndef CROSSVALIDATION_H
#define CROSSVALIDATION_H
#include <torch/torch.h>
#include <nlohmann/json.hpp>
#include <string>
#include <chrono>
#include "Folding.h"
#include "Datasets.h"
#include "Experiment.h"
namespace platform {
class CrossValidation {
private:
bool stratified;
int nfolds;
string modelName;
vector<int> randomSeeds;
platform::Datasets& datasets;
public:
CrossValidation(const string& modelName, bool stratified, int nfolds, const vector<int>& randomSeeds, platform::Datasets& datasets);
~CrossValidation() = default;
Result crossValidate(const string& fileName);
};
}
#endif // !CROSSVALIDATION_H

View File

@@ -1,8 +1,17 @@
#include "Datasets.h" #include "Datasets.h"
#include "platformUtils.h" #include "platformUtils.h"
#include "ArffFiles.h" #include "ArffFiles.h"
#include <fstream>
namespace platform { namespace platform {
vector<string> split(const string& text, char delimiter)
{
vector<string> result;
stringstream ss(text);
string token;
while (getline(ss, token, delimiter)) {
result.push_back(token);
}
return result;
}
void Datasets::load() void Datasets::load()
{ {
ifstream catalog(path + "/all.txt"); ifstream catalog(path + "/all.txt");
@@ -25,69 +34,34 @@ namespace platform {
transform(datasets.begin(), datasets.end(), back_inserter(result), [](const auto& d) { return d.first; }); transform(datasets.begin(), datasets.end(), back_inserter(result), [](const auto& d) { return d.first; });
return result; return result;
} }
vector<string> Datasets::getFeatures(const string& name) const vector<string> Datasets::getFeatures(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return datasets.at(name)->getFeatures(); return datasets[name]->getFeatures();
} else { } else {
throw invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
map<string, vector<int>> Datasets::getStates(const string& name) const map<string, vector<int>> Datasets::getStates(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return datasets.at(name)->getStates(); return datasets[name]->getStates();
} else { } else {
throw invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
void Datasets::loadDataset(const string& name) const string Datasets::getClassName(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return; return datasets[name]->getClassName();
} else {
datasets.at(name)->load();
}
}
string Datasets::getClassName(const string& name) const
{
if (datasets.at(name)->isLoaded()) {
return datasets.at(name)->getClassName();
} else { } else {
throw invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
int Datasets::getNSamples(const string& name) const int Datasets::getNSamples(const string& name)
{ {
if (datasets.at(name)->isLoaded()) { if (datasets[name]->isLoaded()) {
return datasets.at(name)->getNSamples(); return datasets[name]->getNSamples();
} else {
throw invalid_argument("Dataset not loaded.");
}
}
int Datasets::getNClasses(const string& name)
{
if (datasets.at(name)->isLoaded()) {
auto className = datasets.at(name)->getClassName();
if (discretize) {
auto states = getStates(name);
return states.at(className).size();
}
auto [Xv, yv] = getVectors(name);
return *max_element(yv.begin(), yv.end()) + 1;
} else {
throw invalid_argument("Dataset not loaded.");
}
}
vector<int> Datasets::getClassesCounts(const string& name) const
{
if (datasets.at(name)->isLoaded()) {
auto [Xv, yv] = datasets.at(name)->getVectors();
vector<int> counts(*max_element(yv.begin(), yv.end()) + 1);
for (auto y : yv) {
counts[y]++;
}
return counts;
} else { } else {
throw invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
@@ -113,22 +87,20 @@ namespace platform {
} }
return datasets[name]->getTensors(); return datasets[name]->getTensors();
} }
bool Datasets::isDataset(const string& name) const bool Datasets::isDataset(const string& name)
{ {
return datasets.find(name) != datasets.end(); return datasets.find(name) != datasets.end();
} }
Dataset::Dataset(const Dataset& dataset) : path(dataset.path), name(dataset.name), className(dataset.className), n_samples(dataset.n_samples), n_features(dataset.n_features), features(dataset.features), states(dataset.states), loaded(dataset.loaded), discretize(dataset.discretize), X(dataset.X), y(dataset.y), Xv(dataset.Xv), Xd(dataset.Xd), yv(dataset.yv), fileType(dataset.fileType) Dataset::Dataset(Dataset& dataset) : path(dataset.path), name(dataset.name), className(dataset.className), n_samples(dataset.n_samples), n_features(dataset.n_features), features(dataset.features), states(dataset.states), loaded(dataset.loaded), discretize(dataset.discretize), X(dataset.X), y(dataset.y), Xv(dataset.Xv), Xd(dataset.Xd), yv(dataset.yv), fileType(dataset.fileType) {}
{ string Dataset::getName()
}
string Dataset::getName() const
{ {
return name; return name;
} }
string Dataset::getClassName() const string Dataset::getClassName()
{ {
return className; return className;
} }
vector<string> Dataset::getFeatures() const vector<string> Dataset::getFeatures()
{ {
if (loaded) { if (loaded) {
return features; return features;
@@ -136,7 +108,7 @@ namespace platform {
throw invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
int Dataset::getNFeatures() const int Dataset::getNFeatures()
{ {
if (loaded) { if (loaded) {
return n_features; return n_features;
@@ -144,7 +116,7 @@ namespace platform {
throw invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
int Dataset::getNSamples() const int Dataset::getNSamples()
{ {
if (loaded) { if (loaded) {
return n_samples; return n_samples;
@@ -152,7 +124,7 @@ namespace platform {
throw invalid_argument("Dataset not loaded."); throw invalid_argument("Dataset not loaded.");
} }
} }
map<string, vector<int>> Dataset::getStates() const map<string, vector<int>> Dataset::getStates()
{ {
if (loaded) { if (loaded) {
return states; return states;
@@ -213,11 +185,10 @@ namespace platform {
{ {
for (int i = 0; i < features.size(); ++i) { for (int i = 0; i < features.size(); ++i) {
states[features[i]] = vector<int>(*max_element(Xd[i].begin(), Xd[i].end()) + 1); states[features[i]] = vector<int>(*max_element(Xd[i].begin(), Xd[i].end()) + 1);
auto item = states.at(features[i]); iota(begin(states[features[i]]), end(states[features[i]]), 0);
iota(begin(item), end(item), 0);
} }
states[className] = vector<int>(*max_element(yv.begin(), yv.end()) + 1); states[className] = vector<int>(*max_element(yv.begin(), yv.end()) + 1);
iota(begin(states.at(className)), end(states.at(className)), 0); iota(begin(states[className]), end(states[className]), 0);
} }
void Dataset::load_arff() void Dataset::load_arff()
{ {
@@ -229,7 +200,7 @@ namespace platform {
// Get className & Features // Get className & Features
className = arff.getClassName(); className = arff.getClassName();
auto attributes = arff.getAttributes(); auto attributes = arff.getAttributes();
transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& attribute) { return attribute.first; }); transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& f) { return f.first; });
} }
void Dataset::load() void Dataset::load()
{ {
@@ -244,9 +215,9 @@ namespace platform {
if (discretize) { if (discretize) {
Xd = discretizeDataset(Xv, yv); Xd = discretizeDataset(Xv, yv);
computeStates(); computeStates();
n_samples = Xd[0].size();
n_features = Xd.size();
} }
n_samples = Xv[0].size();
n_features = Xv.size();
loaded = true; loaded = true;
} }
void Dataset::buildTensors() void Dataset::buildTensors()

View File

@@ -13,7 +13,7 @@ namespace platform {
string name; string name;
fileType_t fileType; fileType_t fileType;
string className; string className;
int n_samples{ 0 }, n_features{ 0 }; int n_samples, n_features;
vector<string> features; vector<string> features;
map<string, vector<int>> states; map<string, vector<int>> states;
bool loaded; bool loaded;
@@ -27,17 +27,17 @@ namespace platform {
void load_arff(); void load_arff();
void computeStates(); void computeStates();
public: public:
Dataset(const string& path, const string& name, const string& className, bool discretize, fileType_t fileType) : path(path), name(name), className(className), discretize(discretize), loaded(false), fileType(fileType) {}; Dataset(const string& path, const string& name, const string& className, bool discretize, fileType_t fileType) : path(path), name(name), className(className), discretize(discretize), loaded(false), fileType(fileType), n_samples(0), n_features(0) {};
explicit Dataset(const Dataset&); explicit Dataset(Dataset&);
string getName() const; string getName();
string getClassName() const; string getClassName();
vector<string> getFeatures() const; vector<string> getFeatures();
map<string, vector<int>> getStates() const; map<string, vector<int>> getStates();
pair<vector<vector<float>>&, vector<int>&> getVectors(); pair<vector<vector<float>>&, vector<int>&> getVectors();
pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized(); pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized();
pair<torch::Tensor&, torch::Tensor&> getTensors(); pair<torch::Tensor&, torch::Tensor&> getTensors();
int getNFeatures() const; int getNFeatures();
int getNSamples() const; int getNSamples();
void load(); void load();
const bool inline isLoaded() const { return loaded; }; const bool inline isLoaded() const { return loaded; };
}; };
@@ -51,18 +51,16 @@ namespace platform {
public: public:
explicit Datasets(const string& path, bool discretize = false, fileType_t fileType = ARFF) : path(path), discretize(discretize), fileType(fileType) { load(); }; explicit Datasets(const string& path, bool discretize = false, fileType_t fileType = ARFF) : path(path), discretize(discretize), fileType(fileType) { load(); };
vector<string> getNames(); vector<string> getNames();
vector<string> getFeatures(const string& name) const; vector<string> getFeatures(const string& name);
int getNSamples(const string& name) const; int getNSamples(const string& name);
string getClassName(const string& name) const; string getClassName(const string& name);
int getNClasses(const string& name); map<string, vector<int>> getStates(const string& name);
vector<int> getClassesCounts(const string& name) const;
map<string, vector<int>> getStates(const string& name) const;
pair<vector<vector<float>>&, vector<int>&> getVectors(const string& name); pair<vector<vector<float>>&, vector<int>&> getVectors(const string& name);
pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized(const string& name); pair<vector<vector<int>>&, vector<int>&> getVectorsDiscretized(const string& name);
pair<torch::Tensor&, torch::Tensor&> getTensors(const string& name); pair<torch::Tensor&, torch::Tensor&> getTensors(const string& name);
bool isDataset(const string& name) const; bool isDataset(const string& name);
void loadDataset(const string& name) const;
}; };
vector<string> split(const string&, char);
}; };
#endif #endif

View File

@@ -4,59 +4,64 @@
#include <map> #include <map>
#include <fstream> #include <fstream>
#include <sstream> #include <sstream>
#include "platformUtils.h" std::vector<std::string> split(std::string text, char delimiter)
namespace platform { {
class DotEnv { std::vector<std::string> result;
private: std::stringstream ss(text);
std::map<std::string, std::string> env; std::string token;
std::string trim(const std::string& str) while (getline(ss, token, delimiter)) {
{ result.push_back(token);
std::string result = str; }
result.erase(result.begin(), std::find_if(result.begin(), result.end(), [](int ch) { return result;
return !std::isspace(ch);
}));
result.erase(std::find_if(result.rbegin(), result.rend(), [](int ch) {
return !std::isspace(ch);
}).base(), result.end());
return result;
}
public:
DotEnv()
{
std::ifstream file(".env");
if (!file.is_open()) {
std::cerr << "File .env not found" << std::endl;
exit(1);
}
std::string line;
while (std::getline(file, line)) {
line = trim(line);
if (line.empty() || line[0] == '#') {
continue;
}
std::istringstream iss(line);
std::string key, value;
if (std::getline(iss, key, '=') && std::getline(iss, value)) {
env[key] = value;
}
}
}
std::string get(const std::string& key)
{
return env[key];
}
std::vector<int> getSeeds()
{
auto seeds = std::vector<int>();
auto seeds_str = env["seeds"];
seeds_str = trim(seeds_str);
seeds_str = seeds_str.substr(1, seeds_str.size() - 2);
auto seeds_str_split = split(seeds_str, ',');
transform(seeds_str_split.begin(), seeds_str_split.end(), back_inserter(seeds), [](const std::string& str) {
return stoi(str);
});
return seeds;
}
};
} }
class DotEnv {
private:
std::map<std::string, std::string> env;
std::string trim(const std::string& str)
{
std::string result = str;
result.erase(result.begin(), std::find_if(result.begin(), result.end(), [](int ch) {
return !std::isspace(ch);
}));
result.erase(std::find_if(result.rbegin(), result.rend(), [](int ch) {
return !std::isspace(ch);
}).base(), result.end());
return result;
}
public:
DotEnv()
{
std::ifstream file(".env");
if (!file.is_open()) {
std::cerr << "File .env not found" << std::endl;
exit(1);
}
std::string line;
while (std::getline(file, line)) {
line = trim(line);
if (line.empty() || line[0] == '#') {
continue;
}
std::istringstream iss(line);
std::string key, value;
if (std::getline(iss, key, '=') && std::getline(iss, value)) {
env[key] = value;
}
}
}
std::string get(const std::string& key)
{
return env[key];
}
std::vector<int> getSeeds()
{
auto seeds = std::vector<int>();
auto seeds_str = env["seeds"];
seeds_str = trim(seeds_str);
seeds_str = seeds_str.substr(1, seeds_str.size() - 2);
auto seeds_str_split = split(seeds_str, ',');
transform(seeds_str_split.begin(), seeds_str_split.end(), back_inserter(seeds), [](const auto& s) { return stoi(s); });
return seeds;
}
};
#endif #endif

View File

@@ -1,8 +1,5 @@
#include "Experiment.h" #include "Experiment.h"
#include "Datasets.h"
#include "Models.h"
#include "ReportConsole.h"
#include <fstream>
namespace platform { namespace platform {
using json = nlohmann::json; using json = nlohmann::json;
string get_date() string get_date()
@@ -25,7 +22,6 @@ namespace platform {
oss << std::put_time(timeinfo, "%H:%M:%S"); oss << std::put_time(timeinfo, "%H:%M:%S");
return oss.str(); return oss.str();
} }
Experiment::Experiment() : hyperparameters(json::parse("{}")) {}
string Experiment::get_file_name() string Experiment::get_file_name()
{ {
string result = "results_" + score_name + "_" + model + "_" + platform + "_" + get_date() + "_" + get_time() + "_" + (stratified ? "1" : "0") + ".json"; string result = "results_" + score_name + "_" + model + "_" + platform + "_" + get_date() + "_" + get_time() + "_" + (stratified ? "1" : "0") + ".json";
@@ -47,7 +43,7 @@ namespace platform {
result["discretized"] = discretized; result["discretized"] = discretized;
result["stratified"] = stratified; result["stratified"] = stratified;
result["folds"] = nfolds; result["folds"] = nfolds;
result["seeds"] = randomSeeds; result["seeds"] = random_seeds;
result["duration"] = duration; result["duration"] = duration;
result["results"] = json::array(); result["results"] = json::array();
for (const auto& r : results) { for (const auto& r : results) {
@@ -87,109 +83,10 @@ namespace platform {
file << data; file << data;
file.close(); file.close();
} }
void Experiment::report()
{
json data = build_json();
ReportConsole report(data);
report.show();
}
void Experiment::show() void Experiment::show()
{ {
json data = build_json(); json data = build_json();
cout << data.dump(4) << endl; cout << data.dump(4) << endl;
} }
void Experiment::go(vector<string> filesToProcess, const string& path)
{
cout << "*** Starting experiment: " << title << " ***" << endl;
for (auto fileName : filesToProcess) {
cout << "- " << setw(20) << left << fileName << " " << right << flush;
cross_validation(path, fileName);
cout << endl;
}
}
void Experiment::cross_validation(const string& path, const string& fileName)
{
auto datasets = platform::Datasets(path, discretized, platform::ARFF);
// Get dataset
auto [X, y] = datasets.getTensors(fileName);
auto states = datasets.getStates(fileName);
auto features = datasets.getFeatures(fileName);
auto samples = datasets.getNSamples(fileName);
auto className = datasets.getClassName(fileName);
cout << " (" << setw(5) << samples << "," << setw(3) << features.size() << ") " << flush;
// Prepare Result
auto result = Result();
auto [values, counts] = at::_unique(y);
result.setSamples(X.size(1)).setFeatures(X.size(0)).setClasses(values.size(0));
result.setHyperparameters(hyperparameters);
// Initialize results vectors
int nResults = nfolds * static_cast<int>(randomSeeds.size());
auto accuracy_test = torch::zeros({ nResults }, torch::kFloat64);
auto accuracy_train = torch::zeros({ nResults }, torch::kFloat64);
auto train_time = torch::zeros({ nResults }, torch::kFloat64);
auto test_time = torch::zeros({ nResults }, torch::kFloat64);
auto nodes = torch::zeros({ nResults }, torch::kFloat64);
auto edges = torch::zeros({ nResults }, torch::kFloat64);
auto num_states = torch::zeros({ nResults }, torch::kFloat64);
Timer train_timer, test_timer;
int item = 0;
for (auto seed : randomSeeds) {
cout << "(" << seed << ") doing Fold: " << flush;
Fold* fold;
if (stratified)
fold = new StratifiedKFold(nfolds, y, seed);
else
fold = new KFold(nfolds, y.size(0), seed);
for (int nfold = 0; nfold < nfolds; nfold++) {
auto clf = Models::instance()->create(model);
setModelVersion(clf->getVersion());
if (hyperparameters.size() != 0) {
clf->setHyperparameters(hyperparameters);
}
// Split train - test dataset
train_timer.start();
auto [train, test] = fold->getFold(nfold);
auto train_t = torch::tensor(train);
auto test_t = torch::tensor(test);
auto X_train = X.index({ "...", train_t });
auto y_train = y.index({ train_t });
auto X_test = X.index({ "...", test_t });
auto y_test = y.index({ test_t });
cout << nfold + 1 << ", " << flush;
// Train model
clf->fit(X_train, y_train, features, className, states);
nodes[item] = clf->getNumberOfNodes();
edges[item] = clf->getNumberOfEdges();
num_states[item] = clf->getNumberOfStates();
train_time[item] = train_timer.getDuration();
auto accuracy_train_value = clf->score(X_train, y_train);
// Test model
test_timer.start();
auto accuracy_test_value = clf->score(X_test, y_test);
test_time[item] = test_timer.getDuration();
accuracy_train[item] = accuracy_train_value;
accuracy_test[item] = accuracy_test_value;
// Store results and times in vector
result.addScoreTrain(accuracy_train_value);
result.addScoreTest(accuracy_test_value);
result.addTimeTrain(train_time[item].item<double>());
result.addTimeTest(test_time[item].item<double>());
item++;
clf.reset();
}
cout << "end. " << flush;
delete fold;
}
result.setScoreTest(torch::mean(accuracy_test).item<double>()).setScoreTrain(torch::mean(accuracy_train).item<double>());
result.setScoreTestStd(torch::std(accuracy_test).item<double>()).setScoreTrainStd(torch::std(accuracy_train).item<double>());
result.setTrainTime(torch::mean(train_time).item<double>()).setTestTime(torch::mean(test_time).item<double>());
result.setTestTimeStd(torch::std(test_time).item<double>()).setTrainTimeStd(torch::std(train_time).item<double>());
result.setNodes(torch::mean(nodes).item<double>()).setLeaves(torch::mean(edges).item<double>()).setDepth(torch::mean(num_states).item<double>());
result.setDataset(fileName);
addResult(result);
}
} }

View File

@@ -4,11 +4,6 @@
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
#include <string> #include <string>
#include <chrono> #include <chrono>
#include "Folding.h"
#include "BaseClassifier.h"
#include "TAN.h"
#include "KDB.h"
#include "AODE.h"
using namespace std; using namespace std;
namespace platform { namespace platform {
@@ -29,38 +24,38 @@ namespace platform {
}; };
class Result { class Result {
private: private:
string dataset, model_version; string dataset = "", hyperparameters = "", model_version = "";
json hyperparameters;
int samples{ 0 }, features{ 0 }, classes{ 0 }; int samples{ 0 }, features{ 0 }, classes{ 0 };
double score_train{ 0 }, score_test{ 0 }, score_train_std{ 0 }, score_test_std{ 0 }, train_time{ 0 }, train_time_std{ 0 }, test_time{ 0 }, test_time_std{ 0 }; double score_train{ 0 }, score_test = 0, score_train_std = 0, score_test_std = 0, train_time = 0, train_time_std = 0, test_time = 0, test_time_std = 0;
vector<double> scores_train{}, scores_test{}, times_train{}, times_test{};
float nodes{ 0 }, leaves{ 0 }, depth{ 0 }; float nodes{ 0 }, leaves{ 0 }, depth{ 0 };
vector<double> scores_train, scores_test, times_train, times_test;
public: public:
Result() = default; Result() = default;
Result& setDataset(const string& dataset) { this->dataset = dataset; return *this; } Result& setDataset(const string& dataset) { this->dataset = dataset; return *this; }
Result& setHyperparameters(const json& hyperparameters) { this->hyperparameters = hyperparameters; return *this; } Result& setHyperparameters(const string& hyperparameters) { this->hyperparameters = hyperparameters; return *this; }
Result& setSamples(int samples) { this->samples = samples; return *this; } Result& setSamples(const int samples) { this->samples = samples; return *this; }
Result& setFeatures(int features) { this->features = features; return *this; } Result& setFeatures(const int features) { this->features = features; return *this; }
Result& setClasses(int classes) { this->classes = classes; return *this; } Result& setClasses(const int classes) { this->classes = classes; return *this; }
Result& setScoreTrain(double score) { this->score_train = score; return *this; } Result& setScoreTrain(const double score) { this->score_train = score; return *this; }
Result& setScoreTest(double score) { this->score_test = score; return *this; } Result& setScoreTest(const double score) { this->score_test = score; return *this; }
Result& setScoreTrainStd(double score_std) { this->score_train_std = score_std; return *this; } Result& setScoreTrainStd(const double score_std) { this->score_train_std = score_std; return *this; }
Result& setScoreTestStd(double score_std) { this->score_test_std = score_std; return *this; } Result& setScoreTestStd(const double score_std) { this->score_test_std = score_std; return *this; }
Result& setTrainTime(double train_time) { this->train_time = train_time; return *this; } Result& setTrainTime(const double train_time) { this->train_time = train_time; return *this; }
Result& setTrainTimeStd(double train_time_std) { this->train_time_std = train_time_std; return *this; } Result& setTrainTimeStd(const double train_time_std) { this->train_time_std = train_time_std; return *this; }
Result& setTestTime(double test_time) { this->test_time = test_time; return *this; } Result& setTestTime(const double test_time) { this->test_time = test_time; return *this; }
Result& setTestTimeStd(double test_time_std) { this->test_time_std = test_time_std; return *this; } Result& setTestTimeStd(const double test_time_std) { this->test_time_std = test_time_std; return *this; }
Result& setNodes(float nodes) { this->nodes = nodes; return *this; } Result& setNodes(const float nodes) { this->nodes = nodes; return *this; }
Result& setLeaves(float leaves) { this->leaves = leaves; return *this; } Result& setLeaves(const float leaves) { this->leaves = leaves; return *this; }
Result& setDepth(float depth) { this->depth = depth; return *this; } Result& setDepth(const float depth) { this->depth = depth; return *this; }
Result& addScoreTrain(double score) { scores_train.push_back(score); return *this; } Result& setModelVersion(const string& model_version) { this->model_version = model_version; return *this; }
Result& addScoreTest(double score) { scores_test.push_back(score); return *this; } Result& addScoreTrain(const double score) { scores_train.push_back(score); return *this; }
Result& addTimeTrain(double time) { times_train.push_back(time); return *this; } Result& addScoreTest(const double score) { scores_test.push_back(score); return *this; }
Result& addTimeTest(double time) { times_test.push_back(time); return *this; } Result& addTimeTrain(const double time) { times_train.push_back(time); return *this; }
Result& addTimeTest(const double time) { times_test.push_back(time); return *this; }
const float get_score_train() const { return score_train; } const float get_score_train() const { return score_train; }
float get_score_test() { return score_test; } float get_score_test() { return score_test; }
const string& getDataset() const { return dataset; } const string& getDataset() const { return dataset; }
const json& getHyperparameters() const { return hyperparameters; } const string& getHyperparameters() const { return hyperparameters; }
const int getSamples() const { return samples; } const int getSamples() const { return samples; }
const int getFeatures() const { return features; } const int getFeatures() const { return features; }
const int getClasses() const { return classes; } const int getClasses() const { return classes; }
@@ -79,19 +74,19 @@ namespace platform {
const vector<double>& getScoresTest() const { return scores_test; } const vector<double>& getScoresTest() const { return scores_test; }
const vector<double>& getTimesTrain() const { return times_train; } const vector<double>& getTimesTrain() const { return times_train; }
const vector<double>& getTimesTest() const { return times_test; } const vector<double>& getTimesTest() const { return times_test; }
const string& getModelVersion() const { return model_version; }
}; };
class Experiment { class Experiment {
private: private:
string title, model, platform, score_name, model_version, language_version, language; string title{""}, model{""}, platform{""}, score_name{""}, model_version{""}, language_version{""}, language{""};
bool discretized{ false }, stratified{ false }; bool discretized{false}, stratified{false};
vector<Result> results; vector<Result> results;
vector<int> randomSeeds; vector<int> random_seeds;
json hyperparameters = "{}"; int nfolds{0};
int nfolds{ 0 }; float duration{0};
float duration{ 0 };
json build_json(); json build_json();
public: public:
Experiment(); Experiment() = default;
Experiment& setTitle(const string& title) { this->title = title; return *this; } Experiment& setTitle(const string& title) { this->title = title; return *this; }
Experiment& setModel(const string& model) { this->model = model; return *this; } Experiment& setModel(const string& model) { this->model = model; return *this; }
Experiment& setPlatform(const string& platform) { this->platform = platform; return *this; } Experiment& setPlatform(const string& platform) { this->platform = platform; return *this; }
@@ -99,19 +94,15 @@ namespace platform {
Experiment& setModelVersion(const string& model_version) { this->model_version = model_version; return *this; } Experiment& setModelVersion(const string& model_version) { this->model_version = model_version; return *this; }
Experiment& setLanguage(const string& language) { this->language = language; return *this; } Experiment& setLanguage(const string& language) { this->language = language; return *this; }
Experiment& setLanguageVersion(const string& language_version) { this->language_version = language_version; return *this; } Experiment& setLanguageVersion(const string& language_version) { this->language_version = language_version; return *this; }
Experiment& setDiscretized(bool discretized) { this->discretized = discretized; return *this; } Experiment& setDiscretized(const bool discretized) { this->discretized = discretized; return *this; }
Experiment& setStratified(bool stratified) { this->stratified = stratified; return *this; } Experiment& setStratified(const bool stratified) { this->stratified = stratified; return *this; }
Experiment& setNFolds(int nfolds) { this->nfolds = nfolds; return *this; } Experiment& setNFolds(const int nfolds) { this->nfolds = nfolds; return *this; }
Experiment& addResult(Result result) { results.push_back(result); return *this; } Experiment& addResult(Result result) { results.push_back(result); return *this; }
Experiment& addRandomSeed(int randomSeed) { randomSeeds.push_back(randomSeed); return *this; } Experiment& addRandomSeed(const int random_seed) { random_seeds.push_back(random_seed); return *this; }
Experiment& setDuration(float duration) { this->duration = duration; return *this; } Experiment& setDuration(const float duration) { this->duration = duration; return *this; }
Experiment& setHyperparameters(const json& hyperparameters) { this->hyperparameters = hyperparameters; return *this; }
string get_file_name(); string get_file_name();
void save(const string& path); void save(const string& path);
void cross_validation(const string& path, const string& fileName);
void go(vector<string> filesToProcess, const string& path);
void show(); void show();
void report();
}; };
} }
#endif #endif

View File

@@ -1,97 +1,95 @@
#include "Folding.h" #include "Folding.h"
#include <algorithm> #include <algorithm>
#include <map> #include <map>
namespace platform { Fold::Fold(int k, int n, int seed) : k(k), n(n), seed(seed)
Fold::Fold(int k, int n, int seed) : k(k), n(n), seed(seed) {
{ random_device rd;
random_device rd; random_seed = default_random_engine(seed == -1 ? rd() : seed);
random_seed = default_random_engine(seed == -1 ? rd() : seed); srand(seed == -1 ? time(0) : seed);
srand(seed == -1 ? time(0) : seed); }
KFold::KFold(int k, int n, int seed) : Fold(k, n, seed), indices(vector<int>())
{
iota(begin(indices), end(indices), 0); // fill with 0, 1, ..., n - 1
shuffle(indices.begin(), indices.end(), random_seed);
}
pair<vector<int>, vector<int>> KFold::getFold(int nFold)
{
if (nFold >= k || nFold < 0) {
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
} }
KFold::KFold(int k, int n, int seed) : Fold(k, n, seed), indices(vector<int>(n)) int nTest = n / k;
{ auto train = vector<int>();
iota(begin(indices), end(indices), 0); // fill with 0, 1, ..., n - 1 auto test = vector<int>();
for (int i = 0; i < n; i++) {
if (i >= nTest * nFold && i < nTest * (nFold + 1)) {
test.push_back(indices[i]);
} else {
train.push_back(indices[i]);
}
}
return { train, test };
}
StratifiedKFold::StratifiedKFold(int k, torch::Tensor& y, int seed) : Fold(k, y.numel(), seed)
{
n = y.numel();
this->y = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + n);
build();
}
StratifiedKFold::StratifiedKFold(int k, const vector<int>& y, int seed)
: Fold(k, y.size(), seed)
{
this->y = y;
n = y.size();
build();
}
void StratifiedKFold::build()
{
stratified_indices = vector<vector<int>>(k);
int fold_size = n / k;
// Compute class counts and indices
auto class_indices = map<int, vector<int>>();
vector<int> class_counts(*max_element(y.begin(), y.end()) + 1, 0);
for (auto i = 0; i < n; ++i) {
class_counts[y[i]]++;
class_indices[y[i]].push_back(i);
}
// Shuffle class indices
for (auto& [cls, indices] : class_indices) {
shuffle(indices.begin(), indices.end(), random_seed); shuffle(indices.begin(), indices.end(), random_seed);
} }
pair<vector<int>, vector<int>> KFold::getFold(int nFold) // Assign indices to folds
{ for (auto label = 0; label < class_counts.size(); ++label) {
if (nFold >= k || nFold < 0) { auto num_samples_to_take = class_counts[label] / k;
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")"); if (num_samples_to_take == 0)
continue;
auto remainder_samples_to_take = class_counts[label] % k;
for (auto fold = 0; fold < k; ++fold) {
auto it = next(class_indices[label].begin(), num_samples_to_take);
move(class_indices[label].begin(), it, back_inserter(stratified_indices[fold])); // ##
class_indices[label].erase(class_indices[label].begin(), it);
} }
int nTest = n / k; while (remainder_samples_to_take > 0) {
auto train = vector<int>(); int fold = (rand() % static_cast<int>(k));
auto test = vector<int>(); if (stratified_indices[fold].size() == fold_size + 1) {
for (int i = 0; i < n; i++) {
if (i >= nTest * nFold && i < nTest * (nFold + 1)) {
test.push_back(indices[i]);
} else {
train.push_back(indices[i]);
}
}
return { train, test };
}
StratifiedKFold::StratifiedKFold(int k, torch::Tensor& y, int seed) : Fold(k, y.numel(), seed)
{
n = y.numel();
this->y = vector<int>(y.data_ptr<int>(), y.data_ptr<int>() + n);
build();
}
StratifiedKFold::StratifiedKFold(int k, const vector<int>& y, int seed)
: Fold(k, y.size(), seed)
{
this->y = y;
n = y.size();
build();
}
void StratifiedKFold::build()
{
stratified_indices = vector<vector<int>>(k);
int fold_size = n / k;
// Compute class counts and indices
auto class_indices = map<int, vector<int>>();
vector<int> class_counts(*max_element(y.begin(), y.end()) + 1, 0);
for (auto i = 0; i < n; ++i) {
class_counts[y[i]]++;
class_indices[y[i]].push_back(i);
}
// Shuffle class indices
for (auto& [cls, indices] : class_indices) {
shuffle(indices.begin(), indices.end(), random_seed);
}
// Assign indices to folds
for (auto label = 0; label < class_counts.size(); ++label) {
auto num_samples_to_take = class_counts[label] / k;
if (num_samples_to_take == 0)
continue; continue;
auto remainder_samples_to_take = class_counts[label] % k;
for (auto fold = 0; fold < k; ++fold) {
auto it = next(class_indices[label].begin(), num_samples_to_take);
move(class_indices[label].begin(), it, back_inserter(stratified_indices[fold])); // ##
class_indices[label].erase(class_indices[label].begin(), it);
}
while (remainder_samples_to_take > 0) {
int fold = (rand() % static_cast<int>(k));
if (stratified_indices[fold].size() == fold_size + 1) {
continue;
}
auto it = next(class_indices[label].begin(), 1);
stratified_indices[fold].push_back(*class_indices[label].begin());
class_indices[label].erase(class_indices[label].begin(), it);
remainder_samples_to_take--;
} }
auto it = next(class_indices[label].begin(), 1);
stratified_indices[fold].push_back(*class_indices[label].begin());
class_indices[label].erase(class_indices[label].begin(), it);
remainder_samples_to_take--;
} }
} }
pair<vector<int>, vector<int>> StratifiedKFold::getFold(int nFold) }
{ pair<vector<int>, vector<int>> StratifiedKFold::getFold(int nFold)
if (nFold >= k || nFold < 0) { {
throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")"); if (nFold >= k || nFold < 0) {
} throw out_of_range("nFold (" + to_string(nFold) + ") must be less than k (" + to_string(k) + ")");
vector<int> test_indices = stratified_indices[nFold];
vector<int> train_indices;
for (int i = 0; i < k; ++i) {
if (i == nFold) continue;
train_indices.insert(train_indices.end(), stratified_indices[i].begin(), stratified_indices[i].end());
}
return { train_indices, test_indices };
} }
vector<int> test_indices = stratified_indices[nFold];
vector<int> train_indices;
for (int i = 0; i < k; ++i) {
if (i == nFold) continue;
train_indices.insert(train_indices.end(), stratified_indices[i].begin(), stratified_indices[i].end());
}
return { train_indices, test_indices };
} }

View File

@@ -4,35 +4,34 @@
#include <vector> #include <vector>
#include <random> #include <random>
using namespace std; using namespace std;
namespace platform {
class Fold { class Fold {
protected: protected:
int k; int k;
int n; int n;
int seed; int seed;
default_random_engine random_seed; default_random_engine random_seed;
public: public:
Fold(int k, int n, int seed = -1); Fold(int k, int n, int seed = -1);
virtual pair<vector<int>, vector<int>> getFold(int nFold) = 0; virtual pair<vector<int>, vector<int>> getFold(int nFold) = 0;
virtual ~Fold() = default; virtual ~Fold() = default;
int getNumberOfFolds() { return k; } int getNumberOfFolds() { return k; }
}; };
class KFold : public Fold { class KFold : public Fold {
private: private:
vector<int> indices; vector<int> indices;
public: public:
KFold(int k, int n, int seed = -1); KFold(int k, int n, int seed = -1);
pair<vector<int>, vector<int>> getFold(int nFold) override; pair<vector<int>, vector<int>> getFold(int nFold) override;
}; };
class StratifiedKFold : public Fold { class StratifiedKFold : public Fold {
private: private:
vector<int> y; vector<int> y;
vector<vector<int>> stratified_indices; vector<vector<int>> stratified_indices;
void build(); void build();
public: public:
StratifiedKFold(int k, const vector<int>& y, int seed = -1); StratifiedKFold(int k, const vector<int>& y, int seed = -1);
StratifiedKFold(int k, torch::Tensor& y, int seed = -1); StratifiedKFold(int k, torch::Tensor& y, int seed = -1);
pair<vector<int>, vector<int>> getFold(int nFold) override; pair<vector<int>, vector<int>> getFold(int nFold) override;
}; };
}
#endif #endif

View File

@@ -1,54 +1,8 @@
#include "Models.h" #include "Models.h"
namespace platform { namespace platform {
using namespace std; using namespace std;
// Idea from: https://www.codeproject.com/Articles/567242/AplusC-2b-2bplusObjectplusFactory map<string, bayesnet::BaseClassifier*> Models::classifiers = map<string, bayesnet::BaseClassifier*>({
Models* Models::factory = nullptr;; { "AODE", new bayesnet::AODE() }, { "KDB", new bayesnet::KDB(2) },
Models* Models::instance() { "SPODE", new bayesnet::SPODE(2) }, { "TAN", new bayesnet::TAN() }
{ });
//manages singleton }
if (factory == nullptr)
factory = new Models();
return factory;
}
void Models::registerFactoryFunction(const string& name,
function<bayesnet::BaseClassifier* (void)> classFactoryFunction)
{
// register the class factory function
functionRegistry[name] = classFactoryFunction;
}
shared_ptr<bayesnet::BaseClassifier> Models::create(const string& name)
{
bayesnet::BaseClassifier* instance = nullptr;
// find name in the registry and call factory method.
auto it = functionRegistry.find(name);
if (it != functionRegistry.end())
instance = it->second();
// wrap instance in a shared ptr and return
if (instance != nullptr)
return unique_ptr<bayesnet::BaseClassifier>(instance);
else
return nullptr;
}
vector<string> Models::getNames()
{
vector<string> names;
transform(functionRegistry.begin(), functionRegistry.end(), back_inserter(names),
[](const pair<string, function<bayesnet::BaseClassifier* (void)>>& pair) { return pair.first; });
return names;
}
string Models::toString()
{
string result = "";
for (const auto& pair : functionRegistry) {
result += pair.first + ", ";
}
return "{" + result.substr(0, result.size() - 2) + "}";
}
Registrar::Registrar(const string& name, function<bayesnet::BaseClassifier* (void)> classFactoryFunction)
{
// register the class factory function
Models::instance()->registerFactoryFunction(name, classFactoryFunction);
}
}

View File

@@ -6,32 +6,28 @@
#include "TAN.h" #include "TAN.h"
#include "KDB.h" #include "KDB.h"
#include "SPODE.h" #include "SPODE.h"
#include "TANLd.h"
#include "KDBLd.h"
#include "SPODELd.h"
#include "AODELd.h"
#include "BoostAODE.h"
namespace platform { namespace platform {
class Models { class Models {
private: private:
map<string, function<bayesnet::BaseClassifier* (void)>> functionRegistry; static map<string, bayesnet::BaseClassifier*> classifiers;
static Models* factory; //singleton
Models() {};
public: public:
Models(Models&) = delete; static bayesnet::BaseClassifier* get(string name) { return classifiers[name]; }
void operator=(const Models&) = delete; static vector<string> getNames()
// Idea from: https://www.codeproject.com/Articles/567242/AplusC-2b-2bplusObjectplusFactory {
static Models* instance(); vector<string> names;
shared_ptr<bayesnet::BaseClassifier> create(const string& name); for (auto& [name, classifier] : classifiers) {
void registerFactoryFunction(const string& name, names.push_back(name);
function<bayesnet::BaseClassifier* (void)> classFactoryFunction); }
vector<string> getNames(); return names;
string toString(); }
static string toString()
}; {
class Registrar { string names = "";
public: for (auto& [name, classifier] : classifiers) {
Registrar(const string& className, function<bayesnet::BaseClassifier* (void)> classFactoryFunction); names += name + ", ";
}
return "{" + names.substr(0, names.size() - 2) + "}";
}
}; };
} }
#endif #endif

View File

@@ -1,12 +0,0 @@
#ifndef PATHS_H
#define PATHS_H
#include <string>
namespace platform {
class Paths {
public:
static std::string datasets() { return "datasets/"; }
static std::string results() { return "results/"; }
static std::string excel() { return "excel/"; }
};
}
#endif

View File

@@ -1,37 +0,0 @@
#include <sstream>
#include <locale>
#include "ReportBase.h"
#include "BestResult.h"
namespace platform {
string ReportBase::fromVector(const string& key)
{
stringstream oss;
string sep = "";
oss << "[";
for (auto& item : data[key]) {
oss << sep << item.get<double>();
sep = ", ";
}
oss << "]";
return oss.str();
}
string ReportBase::fVector(const string& title, const json& data, const int width, const int precision)
{
stringstream oss;
string sep = "";
oss << title << "[";
for (const auto& item : data) {
oss << sep << fixed << setw(width) << setprecision(precision) << item.get<double>();
sep = ", ";
}
oss << "]";
return oss.str();
}
void ReportBase::show()
{
header();
body();
}
}

View File

@@ -1,23 +0,0 @@
#ifndef REPORTBASE_H
#define REPORTBASE_H
#include <string>
#include <iostream>
#include <nlohmann/json.hpp>
using json = nlohmann::json;
namespace platform {
using namespace std;
class ReportBase {
public:
explicit ReportBase(json data_) { data = data_; };
virtual ~ReportBase() = default;
void show();
protected:
json data;
string fromVector(const string& key);
string fVector(const string& title, const json& data, const int width, const int precision);
virtual void header() = 0;
virtual void body() = 0;
};
};
#endif

View File

@@ -1,88 +0,0 @@
#include <sstream>
#include <locale>
#include "ReportConsole.h"
#include "BestResult.h"
namespace platform {
struct separated : numpunct<char> {
char do_decimal_point() const { return ','; }
char do_thousands_sep() const { return '.'; }
string do_grouping() const { return "\03"; }
};
string ReportConsole::headerLine(const string& text)
{
int n = MAXL - text.length() - 3;
n = n < 0 ? 0 : n;
return "* " + text + string(n, ' ') + "*\n";
}
void ReportConsole::header()
{
locale mylocale(cout.getloc(), new separated);
locale::global(mylocale);
cout.imbue(mylocale);
stringstream oss;
cout << Colors::MAGENTA() << string(MAXL, '*') << endl;
cout << headerLine("Report " + data["model"].get<string>() + " ver. " + data["version"].get<string>() + " with " + to_string(data["folds"].get<int>()) + " Folds cross validation and " + to_string(data["seeds"].size()) + " random seeds. " + data["date"].get<string>() + " " + data["time"].get<string>());
cout << headerLine(data["title"].get<string>());
cout << headerLine("Random seeds: " + fromVector("seeds") + " Stratified: " + (data["stratified"].get<bool>() ? "True" : "False"));
oss << "Execution took " << setprecision(2) << fixed << data["duration"].get<float>() << " seconds, " << data["duration"].get<float>() / 3600 << " hours, on " << data["platform"].get<string>();
cout << headerLine(oss.str());
cout << headerLine("Score is " + data["score_name"].get<string>());
cout << string(MAXL, '*') << endl;
cout << endl;
}
void ReportConsole::body()
{
cout << Colors::GREEN() << "Dataset Sampl. Feat. Cls Nodes Edges States Score Time Hyperparameters" << endl;
cout << "============================== ====== ===== === ========= ========= ========= =============== ================== ===============" << endl;
json lastResult;
double totalScore = 0.0;
bool odd = true;
for (const auto& r : data["results"]) {
auto color = odd ? Colors::CYAN() : Colors::BLUE();
cout << color << setw(30) << left << r["dataset"].get<string>() << " ";
cout << setw(6) << right << r["samples"].get<int>() << " ";
cout << setw(5) << right << r["features"].get<int>() << " ";
cout << setw(3) << right << r["classes"].get<int>() << " ";
cout << setw(9) << setprecision(2) << fixed << r["nodes"].get<float>() << " ";
cout << setw(9) << setprecision(2) << fixed << r["leaves"].get<float>() << " ";
cout << setw(9) << setprecision(2) << fixed << r["depth"].get<float>() << " ";
cout << setw(8) << right << setprecision(6) << fixed << r["score"].get<double>() << "±" << setw(6) << setprecision(4) << fixed << r["score_std"].get<double>() << " ";
cout << setw(11) << right << setprecision(6) << fixed << r["time"].get<double>() << "±" << setw(6) << setprecision(4) << fixed << r["time_std"].get<double>() << " ";
try {
cout << r["hyperparameters"].get<string>();
}
catch (const exception& err) {
cout << r["hyperparameters"];
}
cout << endl;
lastResult = r;
totalScore += r["score"].get<double>();
odd = !odd;
}
if (data["results"].size() == 1) {
cout << string(MAXL, '*') << endl;
cout << headerLine(fVector("Train scores: ", lastResult["scores_train"], 14, 12));
cout << headerLine(fVector("Test scores: ", lastResult["scores_test"], 14, 12));
cout << headerLine(fVector("Train times: ", lastResult["times_train"], 10, 3));
cout << headerLine(fVector("Test times: ", lastResult["times_test"], 10, 3));
cout << string(MAXL, '*') << endl;
} else {
footer(totalScore);
}
}
void ReportConsole::footer(double totalScore)
{
cout << Colors::MAGENTA() << string(MAXL, '*') << endl;
auto score = data["score_name"].get<string>();
if (score == BestResult::scoreName()) {
stringstream oss;
oss << score << " compared to " << BestResult::title() << " .: " << totalScore / BestResult::score();
cout << headerLine(oss.str());
}
cout << string(MAXL, '*') << endl << Colors::RESET();
}
}

View File

@@ -1,22 +0,0 @@
#ifndef REPORTCONSOLE_H
#define REPORTCONSOLE_H
#include <string>
#include <iostream>
#include "ReportBase.h"
#include "Colors.h"
namespace platform {
using namespace std;
const int MAXL = 128;
class ReportConsole : public ReportBase{
public:
explicit ReportConsole(json data_) : ReportBase(data_) {};
virtual ~ReportConsole() = default;
private:
string headerLine(const string& text);
void header() override;
void body() override;
void footer(double totalScore);
};
};
#endif

View File

@@ -1,109 +0,0 @@
#include <sstream>
#include <locale>
#include "ReportExcel.h"
#include "BestResult.h"
namespace platform {
struct separated : numpunct<char> {
char do_decimal_point() const { return ','; }
char do_thousands_sep() const { return '.'; }
string do_grouping() const { return "\03"; }
};
void ReportExcel::createFile()
{
doc.create(Paths::excel() + "some_results.xlsx");
wks = doc.workbook().worksheet("Sheet1");
wks.setName(data["model"].get<string>());
}
void ReportExcel::closeFile()
{
doc.save();
doc.close();
}
void ReportExcel::header()
{
locale mylocale(cout.getloc(), new separated);
locale::global(mylocale);
cout.imbue(mylocale);
stringstream oss;
wks.cell("A1").value().set(
"Report " + data["model"].get<string>() + " ver. " + data["version"].get<string>() + " with " +
to_string(data["folds"].get<int>()) + " Folds cross validation and " + to_string(data["seeds"].size()) +
" random seeds. " + data["date"].get<string>() + " " + data["time"].get<string>());
wks.cell("A2").value() = data["title"].get<string>();
wks.cell("A3").value() = "Random seeds: " + fromVector("seeds") + " Stratified: " +
(data["stratified"].get<bool>() ? "True" : "False");
oss << "Execution took " << setprecision(2) << fixed << data["duration"].get<float>() << " seconds, "
<< data["duration"].get<float>() / 3600 << " hours, on " << data["platform"].get<string>();
wks.cell("A4").value() = oss.str();
wks.cell("A5").value() = "Score is " + data["score_name"].get<string>();
}
void ReportExcel::body()
{
auto head = vector<string>(
{ "Dataset", "Samples", "Features", "Classes", "Nodes", "Edges", "States", "Score", "Score Std.", "Time",
"Time Std.", "Hyperparameters" });
int col = 1;
for (const auto& item : head) {
wks.cell(8, col++).value() = item;
}
int row = 9;
col = 1;
json lastResult;
double totalScore = 0.0;
string hyperparameters;
for (const auto& r : data["results"]) {
wks.cell(row, col).value() = r["dataset"].get<string>();
wks.cell(row, col + 1).value() = r["samples"].get<int>();
wks.cell(row, col + 2).value() = r["features"].get<int>();
wks.cell(row, col + 3).value() = r["classes"].get<int>();
wks.cell(row, col + 4).value() = r["nodes"].get<float>();
wks.cell(row, col + 5).value() = r["leaves"].get<float>();
wks.cell(row, col + 6).value() = r["depth"].get<float>();
wks.cell(row, col + 7).value() = r["score"].get<double>();
wks.cell(row, col + 8).value() = r["score_std"].get<double>();
wks.cell(row, col + 9).value() = r["time"].get<double>();
wks.cell(row, col + 10).value() = r["time_std"].get<double>();
try {
hyperparameters = r["hyperparameters"].get<string>();
}
catch (const exception& err) {
stringstream oss;
oss << r["hyperparameters"];
hyperparameters = oss.str();
}
wks.cell(row, col + 11).value() = hyperparameters;
lastResult = r;
totalScore += r["score"].get<double>();
row++;
}
if (data["results"].size() == 1) {
for (const string& group : { "scores_train", "scores_test", "times_train", "times_test" }) {
row++;
col = 1;
wks.cell(row, col).value() = group;
for (double item : lastResult[group]) {
wks.cell(row, ++col).value() = item;
}
}
} else {
footer(totalScore, row);
}
}
void ReportExcel::footer(double totalScore, int row)
{
auto score = data["score_name"].get<string>();
if (score == BestResult::scoreName()) {
wks.cell(row + 2, 1).value() = score + " compared to " + BestResult::title() + " .: ";
wks.cell(row + 2, 5).value() = totalScore / BestResult::score();
}
}
}

View File

@@ -1,25 +0,0 @@
#ifndef REPORTEXCEL_H
#define REPORTEXCEL_H
#include <OpenXLSX.hpp>
#include "ReportBase.h"
#include "Paths.h"
#include "Colors.h"
namespace platform {
using namespace std;
using namespace OpenXLSX;
const int MAXLL = 128;
class ReportExcel : public ReportBase{
public:
explicit ReportExcel(json data_) : ReportBase(data_) {createFile();};
virtual ~ReportExcel() {closeFile();};
private:
void createFile();
void closeFile();
XLDocument doc;
XLWorksheet wks;
void header() override;
void body() override;
void footer(double totalScore, int row);
};
};
#endif // !REPORTEXCEL_H

View File

@@ -1,254 +0,0 @@
#include <filesystem>
#include "platformUtils.h"
#include "Results.h"
#include "ReportConsole.h"
#include "ReportExcel.h"
#include "BestResult.h"
#include "Colors.h"
namespace platform {
Result::Result(const string& path, const string& filename)
: path(path)
, filename(filename)
{
auto data = load();
date = data["date"];
score = 0;
for (const auto& result : data["results"]) {
score += result["score"].get<double>();
}
scoreName = data["score_name"];
if (scoreName == BestResult::scoreName()) {
score /= BestResult::score();
}
title = data["title"];
duration = data["duration"];
model = data["model"];
}
json Result::load() const
{
ifstream resultData(path + "/" + filename);
if (resultData.is_open()) {
json data = json::parse(resultData);
return data;
}
throw invalid_argument("Unable to open result file. [" + path + "/" + filename + "]");
}
void Results::load()
{
using std::filesystem::directory_iterator;
for (const auto& file : directory_iterator(path)) {
auto filename = file.path().filename().string();
if (filename.find(".json") != string::npos && filename.find("results_") == 0) {
auto result = Result(path, filename);
bool addResult = true;
if (model != "any" && result.getModel() != model || scoreName != "any" && scoreName != result.getScoreName())
addResult = false;
if (addResult)
files.push_back(result);
}
}
}
string Result::to_string() const
{
stringstream oss;
oss << date << " ";
oss << setw(12) << left << model << " ";
oss << setw(11) << left << scoreName << " ";
oss << right << setw(11) << setprecision(7) << fixed << score << " ";
oss << setw(9) << setprecision(3) << fixed << duration << " ";
oss << setw(50) << left << title << " ";
return oss.str();
}
void Results::show() const
{
cout << Colors::GREEN() << "Results found: " << files.size() << endl;
cout << "-------------------" << endl;
auto i = 0;
cout << " # Date Model Score Name Score Duration Title" << endl;
cout << "=== ========== ============ =========== =========== ========= =============================================================" << endl;
bool odd = true;
for (const auto& result : files) {
auto color = odd ? Colors::BLUE() : Colors::CYAN();
cout << color << setw(3) << fixed << right << i++ << " ";
cout << result.to_string() << endl;
if (i == max && max != 0) {
break;
}
odd = !odd;
}
}
int Results::getIndex(const string& intent) const
{
string color;
if (intent == "delete") {
color = Colors::RED();
} else {
color = Colors::YELLOW();
}
cout << color << "Choose result to " << intent << " (cancel=-1): ";
string line;
getline(cin, line);
int index = stoi(line);
if (index >= -1 && index < static_cast<int>(files.size())) {
return index;
}
cout << "Invalid index" << endl;
return -1;
}
void Results::report(const int index, const bool excelReport) const
{
cout << Colors::YELLOW() << "Reporting " << files.at(index).getFilename() << endl;
auto data = files.at(index).load();
if (excelReport) {
ReportExcel reporter(data);
reporter.show();
} else {
ReportConsole reporter(data);
reporter.show();
}
}
void Results::menu()
{
char option;
int index;
bool finished = false;
string filename, line, options = "qldhsre";
while (!finished) {
cout << Colors::RESET() << "Choose option (quit='q', list='l', delete='d', hide='h', sort='s', report='r', excel='e'): ";
getline(cin, line);
if (line.size() == 0)
continue;
if (options.find(line[0]) != string::npos) {
if (line.size() > 1) {
cout << "Invalid option" << endl;
continue;
}
option = line[0];
} else {
if (all_of(line.begin(), line.end(), ::isdigit)) {
index = stoi(line);
if (index >= 0 && index < files.size()) {
report(index, false);
continue;
}
}
cout << "Invalid option" << endl;
continue;
}
switch (option) {
case 'q':
finished = true;
break;
case 'l':
show();
break;
case 'd':
index = getIndex("delete");
if (index == -1)
break;
filename = files[index].getFilename();
cout << "Deleting " << filename << endl;
remove((path + "/" + filename).c_str());
files.erase(files.begin() + index);
cout << "File: " + filename + " deleted!" << endl;
show();
break;
case 'h':
index = getIndex("hide");
if (index == -1)
break;
filename = files[index].getFilename();
cout << "Hiding " << filename << endl;
rename((path + "/" + filename).c_str(), (path + "/." + filename).c_str());
files.erase(files.begin() + index);
show();
menu();
break;
case 's':
sortList();
show();
break;
case 'r':
index = getIndex("report");
if (index == -1)
break;
report(index, false);
break;
case 'e':
index = getIndex("excel");
if (index == -1)
break;
report(index, true);
break;
default:
cout << "Invalid option" << endl;
}
}
}
void Results::sortList()
{
cout << Colors::YELLOW() << "Choose sorting field (date='d', score='s', duration='u', model='m'): ";
string line;
char option;
getline(cin, line);
if (line.size() == 0)
return;
if (line.size() > 1) {
cout << "Invalid option" << endl;
return;
}
option = line[0];
switch (option) {
case 'd':
sortDate();
break;
case 's':
sortScore();
break;
case 'u':
sortDuration();
break;
case 'm':
sortModel();
break;
default:
cout << "Invalid option" << endl;
}
}
void Results::sortDate()
{
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
return a.getDate() > b.getDate();
});
}
void Results::sortModel()
{
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
return a.getModel() > b.getModel();
});
}
void Results::sortDuration()
{
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
return a.getDuration() > b.getDuration();
});
}
void Results::sortScore()
{
sort(files.begin(), files.end(), [](const Result& a, const Result& b) {
return a.getScore() > b.getScore();
});
}
void Results::manage()
{
if (files.size() == 0) {
cout << "No results found!" << endl;
exit(0);
}
sortDate();
show();
menu();
cout << "Done!" << endl;
}
}

View File

@@ -1,56 +0,0 @@
#ifndef RESULTS_H
#define RESULTS_H
#include <map>
#include <vector>
#include <string>
#include <nlohmann/json.hpp>
namespace platform {
using namespace std;
using json = nlohmann::json;
class Result {
public:
Result(const string& path, const string& filename);
json load() const;
string to_string() const;
string getFilename() const { return filename; };
string getDate() const { return date; };
double getScore() const { return score; };
string getTitle() const { return title; };
double getDuration() const { return duration; };
string getModel() const { return model; };
string getScoreName() const { return scoreName; };
private:
string path;
string filename;
string date;
double score;
string title;
double duration;
string model;
string scoreName;
};
class Results {
public:
Results(const string& path, const int max, const string& model, const string& score) : path(path), max(max), model(model), scoreName(score) { load(); };
void manage();
private:
string path;
int max;
string model;
string scoreName;
vector<Result> files;
void load(); // Loads the list of results
void show() const;
void report(const int index, const bool excelReport) const;
int getIndex(const string& intent) const;
void menu();
void sortList();
void sortDate();
void sortScore();
void sortModel();
void sortDuration();
};
};
#endif

View File

@@ -1,57 +0,0 @@
#include <iostream>
#include <locale>
#include "Paths.h"
#include "Colors.h"
#include "Datasets.h"
using namespace std;
const int BALANCE_LENGTH = 75;
struct separated : numpunct<char> {
char do_decimal_point() const { return ','; }
char do_thousands_sep() const { return '.'; }
string do_grouping() const { return "\03"; }
};
void outputBalance(const string& balance)
{
auto temp = string(balance);
while (temp.size() > BALANCE_LENGTH - 1) {
auto part = temp.substr(0, BALANCE_LENGTH);
cout << part << endl;
cout << setw(48) << " ";
temp = temp.substr(BALANCE_LENGTH);
}
cout << temp << endl;
}
int main(int argc, char** argv)
{
auto data = platform::Datasets(platform::Paths().datasets(), false);
locale mylocale(cout.getloc(), new separated);
locale::global(mylocale);
cout.imbue(mylocale);
cout << Colors::GREEN() << "Dataset Sampl. Feat. Cls. Balance" << endl;
string balanceBars = string(BALANCE_LENGTH, '=');
cout << "============================== ====== ===== === " << balanceBars << endl;
bool odd = true;
for (const auto& dataset : data.getNames()) {
auto color = odd ? Colors::CYAN() : Colors::BLUE();
cout << color << setw(30) << left << dataset << " ";
data.loadDataset(dataset);
auto nSamples = data.getNSamples(dataset);
cout << setw(6) << right << nSamples << " ";
cout << setw(5) << right << data.getFeatures(dataset).size() << " ";
cout << setw(3) << right << data.getNClasses(dataset) << " ";
stringstream oss;
string sep = "";
for (auto number : data.getClassesCounts(dataset)) {
oss << sep << setprecision(2) << fixed << (float)number / nSamples * 100.0 << "% (" << number << ")";
sep = " / ";
}
outputBalance(oss.str());
odd = !odd;
}
cout << Colors::RESET() << endl;
return 0;
}

View File

@@ -1,40 +1,38 @@
#include <iostream> #include <iostream>
#include <argparse/argparse.hpp> #include <argparse/argparse.hpp>
#include <nlohmann/json.hpp>
#include "platformUtils.h" #include "platformUtils.h"
#include "Experiment.h" #include "Experiment.h"
#include "Datasets.h" #include "Datasets.h"
#include "DotEnv.h" #include "DotEnv.h"
#include "CrossValidation.h"
#include "Models.h" #include "Models.h"
#include "modelRegister.h"
#include "Paths.h"
using namespace std; using namespace std;
using json = nlohmann::json; const string PATH_RESULTS = "results";
const string PATH_DATASETS = "datasets";
argparse::ArgumentParser manageArguments(int argc, char** argv) argparse::ArgumentParser manageArguments(int argc, char** argv)
{ {
auto env = platform::DotEnv(); auto env = DotEnv();
argparse::ArgumentParser program("main"); argparse::ArgumentParser program("BayesNetSample");
program.add_argument("-d", "--dataset").default_value("").help("Dataset file name"); program.add_argument("-d", "--dataset").default_value("").help("Dataset file name");
program.add_argument("--hyperparameters").default_value("{}").help("Hyperparamters passed to the model in Experiment");
program.add_argument("-p", "--path") program.add_argument("-p", "--path")
.help("folder where the data files are located, default") .help("folder where the data files are located, default")
.default_value(string{ platform::Paths::datasets() }); .default_value(string{ PATH_DATASETS }
);
program.add_argument("-m", "--model") program.add_argument("-m", "--model")
.help("Model to use " + platform::Models::instance()->toString()) .help("Model to use " + platform::Models::toString())
.action([](const std::string& value) { .action([](const std::string& value) {
static const vector<string> choices = platform::Models::instance()->getNames(); static const vector<string> choices = platform::Models::getNames();
if (find(choices.begin(), choices.end(), value) != choices.end()) { if (find(choices.begin(), choices.end(), value) != choices.end()) {
return value; return value;
} }
throw runtime_error("Model must be one of " + platform::Models::instance()->toString()); throw runtime_error("Model must be one of " + platform::Models::toString());
} }
); );
program.add_argument("--title").default_value("").help("Experiment title"); program.add_argument("--title").default_value("").help("Experiment title");
program.add_argument("--discretize").help("Discretize input dataset").default_value((bool)stoi(env.get("discretize"))).implicit_value(true); program.add_argument("--discretize").help("Discretize input dataset").default_value((bool)stoi(env.get("discretize"))).implicit_value(true);
program.add_argument("--save").help("Save result (always save if no dataset is supplied)").default_value(false).implicit_value(true);
program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value((bool)stoi(env.get("stratified"))).implicit_value(true); program.add_argument("--stratified").help("If Stratified KFold is to be done").default_value((bool)stoi(env.get("stratified"))).implicit_value(true);
program.add_argument("-f", "--folds").help("Number of folds").default_value(stoi(env.get("n_folds"))).scan<'i', int>().action([](const string& value) { program.add_argument("-f", "--folds").help("Number of folds").default_value(stoi(env.get("n_folds"))).scan<'i', int>().action([](const string& value) {
try { try {
@@ -63,8 +61,6 @@ argparse::ArgumentParser manageArguments(int argc, char** argv)
auto seeds = program.get<vector<int>>("seeds"); auto seeds = program.get<vector<int>>("seeds");
auto complete_file_name = path + file_name + ".arff"; auto complete_file_name = path + file_name + ".arff";
auto title = program.get<string>("title"); auto title = program.get<string>("title");
auto hyperparameters = program.get<string>("hyperparameters");
auto saveResults = program.get<bool>("save");
if (title == "" && file_name == "") { if (title == "" && file_name == "") {
throw runtime_error("title is mandatory if dataset is not provided"); throw runtime_error("title is mandatory if dataset is not provided");
} }
@@ -80,6 +76,7 @@ argparse::ArgumentParser manageArguments(int argc, char** argv)
int main(int argc, char** argv) int main(int argc, char** argv)
{ {
auto program = manageArguments(argc, argv); auto program = manageArguments(argc, argv);
bool saveResults = false;
auto file_name = program.get<string>("dataset"); auto file_name = program.get<string>("dataset");
auto path = program.get<string>("path"); auto path = program.get<string>("path");
auto model_name = program.get<string>("model"); auto model_name = program.get<string>("model");
@@ -87,11 +84,9 @@ int main(int argc, char** argv)
auto stratified = program.get<bool>("stratified"); auto stratified = program.get<bool>("stratified");
auto n_folds = program.get<int>("folds"); auto n_folds = program.get<int>("folds");
auto seeds = program.get<vector<int>>("seeds"); auto seeds = program.get<vector<int>>("seeds");
auto hyperparameters =program.get<string>("hyperparameters"); vector<string> filesToProcess;
vector<string> filesToTest;
auto datasets = platform::Datasets(path, true, platform::ARFF); auto datasets = platform::Datasets(path, true, platform::ARFF);
auto title = program.get<string>("title"); auto title = program.get<string>("title");
auto saveResults = program.get<bool>("save");
if (file_name != "") { if (file_name != "") {
if (!datasets.isDataset(file_name)) { if (!datasets.isDataset(file_name)) {
cerr << "Dataset " << file_name << " not found" << endl; cerr << "Dataset " << file_name << " not found" << endl;
@@ -100,31 +95,42 @@ int main(int argc, char** argv)
if (title == "") { if (title == "") {
title = "Test " + file_name + " " + model_name + " " + to_string(n_folds) + " folds"; title = "Test " + file_name + " " + model_name + " " + to_string(n_folds) + " folds";
} }
filesToTest.push_back(file_name); filesToProcess.push_back(file_name);
} else { } else {
filesToTest = platform::Datasets(path, true, platform::ARFF).getNames(); filesToProcess = platform::Datasets(path, true, platform::ARFF).getNames();
saveResults = true; saveResults = true; // Only save results if all datasets are processed
} }
/* /*
* Begin Processing * Begin Processing
*/ */
auto env = platform::DotEnv();
auto experiment = platform::Experiment(); auto experiment = platform::Experiment();
experiment.setTitle(title).setLanguage("cpp").setLanguageVersion("14.0.3"); experiment.setTitle(title).setLanguage("cpp").setLanguageVersion("1.0.0");
experiment.setDiscretized(discretize_dataset).setModel(model_name).setPlatform(env.get("platform")); experiment.setDiscretized(discretize_dataset).setModel(model_name).setPlatform("BayesNet");
experiment.setStratified(stratified).setNFolds(n_folds).setScoreName("accuracy"); experiment.setStratified(stratified).setNFolds(n_folds).setScoreName("accuracy");
experiment.setHyperparameters(json::parse(hyperparameters));
for (auto seed : seeds) { for (auto seed : seeds) {
experiment.addRandomSeed(seed); experiment.addRandomSeed(seed);
} }
platform::Timer timer; platform::Timer timer;
cout << "*** Starting experiment: " << title << " ***" << endl;
timer.start(); timer.start();
experiment.go(filesToTest, path); auto validation = platform::CrossValidation(model_name, stratified, n_folds, seeds, datasets);
experiment.setDuration(timer.getDuration()); for (auto fileName : filesToProcess) {
if (saveResults) { cout << "- " << setw(20) << left << fileName << " " << right << flush;
experiment.save(platform::Paths::results()); auto [X, y] = datasets.getTensors(fileName);
auto features = datasets.getFeatures(fileName);
auto samples = datasets.getNSamples(fileName);
cout << " (" << setw(5) << samples << "," << setw(3) << features.size() << ") " << flush;
auto result = validation.crossValidate(fileName);
result.setDataset(fileName);
experiment.setModelVersion(result.getModelVersion());
experiment.addResult(result);
} }
experiment.report(); experiment.setDuration(timer.getDuration());
if (saveResults)
experiment.save(PATH_RESULTS);
else
experiment.show();
cout << "Done!" << endl; cout << "Done!" << endl;
return 0; return 0;
} }

View File

@@ -1,41 +0,0 @@
#include <iostream>
#include <argparse/argparse.hpp>
#include "platformUtils.h"
#include "Paths.h"
#include "Results.h"
using namespace std;
argparse::ArgumentParser manageArguments(int argc, char** argv)
{
argparse::ArgumentParser program("manage");
program.add_argument("-n", "--number").default_value(0).help("Number of results to show (0 = all)").scan<'i', int>();
program.add_argument("-m", "--model").default_value("any").help("Filter results of the selected model)");
program.add_argument("-s", "--score").default_value("any").help("Filter results of the score name supplied");
try {
program.parse_args(argc, argv);
auto number = program.get<int>("number");
if (number < 0) {
throw runtime_error("Number of results must be greater than or equal to 0");
}
auto model = program.get<string>("model");
auto score = program.get<string>("score");
}
catch (const exception& err) {
cerr << err.what() << endl;
cerr << program;
exit(1);
}
return program;
}
int main(int argc, char** argv)
{
auto program = manageArguments(argc, argv);
auto number = program.get<int>("number");
auto model = program.get<string>("model");
auto score = program.get<string>("score");
auto results = platform::Results(platform::Paths::results(), number, model, score);
results.manage();
return 0;
}

View File

@@ -1,21 +0,0 @@
#ifndef MODEL_REGISTER_H
#define MODEL_REGISTER_H
static platform::Registrar registrarT("TAN",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::TAN();});
static platform::Registrar registrarTLD("TANLd",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::TANLd();});
static platform::Registrar registrarS("SPODE",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPODE(2);});
static platform::Registrar registrarSLD("SPODELd",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::SPODELd(2);});
static platform::Registrar registrarK("KDB",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::KDB(2);});
static platform::Registrar registrarKLD("KDBLd",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::KDBLd(2);});
static platform::Registrar registrarA("AODE",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODE();});
static platform::Registrar registrarALD("AODELd",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::AODELd();});
static platform::Registrar registrarBA("BoostAODE",
[](void) -> bayesnet::BaseClassifier* { return new bayesnet::BoostAODE();});
#endif

View File

@@ -1,19 +1,7 @@
#include "platformUtils.h" #include "platformUtils.h"
#include "Paths.h"
using namespace torch; using namespace torch;
vector<string> split(const string& text, char delimiter)
{
vector<string> result;
stringstream ss(text);
string token;
while (getline(ss, token, delimiter)) {
result.push_back(token);
}
return result;
}
pair<vector<mdlp::labels_t>, map<string, int>> discretize(vector<mdlp::samples_t>& X, mdlp::labels_t& y, vector<string> features) pair<vector<mdlp::labels_t>, map<string, int>> discretize(vector<mdlp::samples_t>& X, mdlp::labels_t& y, vector<string> features)
{ {
vector<mdlp::labels_t> Xd; vector<mdlp::labels_t> Xd;
@@ -40,7 +28,7 @@ vector<mdlp::labels_t> discretizeDataset(vector<mdlp::samples_t>& X, mdlp::label
return Xd; return Xd;
} }
bool file_exists(const string& name) bool file_exists(const std::string& name)
{ {
if (FILE* file = fopen(name.c_str(), "r")) { if (FILE* file = fopen(name.c_str(), "r")) {
fclose(file); fclose(file);
@@ -61,7 +49,7 @@ tuple<Tensor, Tensor, vector<string>, string, map<string, vector<int>>> loadData
auto className = handler.getClassName(); auto className = handler.getClassName();
vector<string> features; vector<string> features;
auto attributes = handler.getAttributes(); auto attributes = handler.getAttributes();
transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& pair) { return pair.first; }); transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& f) { return f.first; });
Tensor Xd; Tensor Xd;
auto states = map<string, vector<int>>(); auto states = map<string, vector<int>>();
if (discretize_dataset) { if (discretize_dataset) {
@@ -69,12 +57,11 @@ tuple<Tensor, Tensor, vector<string>, string, map<string, vector<int>>> loadData
Xd = torch::zeros({ static_cast<int>(Xr[0].size()), static_cast<int>(Xr.size()) }, torch::kInt32); Xd = torch::zeros({ static_cast<int>(Xr[0].size()), static_cast<int>(Xr.size()) }, torch::kInt32);
for (int i = 0; i < features.size(); ++i) { for (int i = 0; i < features.size(); ++i) {
states[features[i]] = vector<int>(*max_element(Xr[i].begin(), Xr[i].end()) + 1); states[features[i]] = vector<int>(*max_element(Xr[i].begin(), Xr[i].end()) + 1);
auto item = states.at(features[i]); iota(begin(states[features[i]]), end(states[features[i]]), 0);
iota(begin(item), end(item), 0);
Xd.index_put_({ "...", i }, torch::tensor(Xr[i], torch::kInt32)); Xd.index_put_({ "...", i }, torch::tensor(Xr[i], torch::kInt32));
} }
states[className] = vector<int>(*max_element(y.begin(), y.end()) + 1); states[className] = vector<int>(*max_element(y.begin(), y.end()) + 1);
iota(begin(states.at(className)), end(states.at(className)), 0); iota(begin(states[className]), end(states[className]), 0);
} else { } else {
Xd = torch::zeros({ static_cast<int>(X[0].size()), static_cast<int>(X.size()) }, torch::kFloat32); Xd = torch::zeros({ static_cast<int>(X[0].size()), static_cast<int>(X.size()) }, torch::kFloat32);
for (int i = 0; i < features.size(); ++i) { for (int i = 0; i < features.size(); ++i) {
@@ -87,7 +74,7 @@ tuple<Tensor, Tensor, vector<string>, string, map<string, vector<int>>> loadData
tuple<vector<vector<int>>, vector<int>, vector<string>, string, map<string, vector<int>>> loadFile(const string& name) tuple<vector<vector<int>>, vector<int>, vector<string>, string, map<string, vector<int>>> loadFile(const string& name)
{ {
auto handler = ArffFiles(); auto handler = ArffFiles();
handler.load(platform::Paths::datasets() + static_cast<string>(name) + ".arff"); handler.load(PATH + static_cast<string>(name) + ".arff");
// Get Dataset X, y // Get Dataset X, y
vector<mdlp::samples_t>& X = handler.getX(); vector<mdlp::samples_t>& X = handler.getX();
mdlp::labels_t& y = handler.getY(); mdlp::labels_t& y = handler.getY();
@@ -95,7 +82,7 @@ tuple<vector<vector<int>>, vector<int>, vector<string>, string, map<string, vect
auto className = handler.getClassName(); auto className = handler.getClassName();
vector<string> features; vector<string> features;
auto attributes = handler.getAttributes(); auto attributes = handler.getAttributes();
transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& pair) { return pair.first; }); transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& f) { return f.first; });
// Discretize Dataset // Discretize Dataset
vector<mdlp::labels_t> Xd; vector<mdlp::labels_t> Xd;
map<string, int> maxes; map<string, int> maxes;

View File

@@ -11,10 +11,9 @@ using namespace std;
const string PATH = "../../data/"; const string PATH = "../../data/";
bool file_exists(const std::string& name); bool file_exists(const std::string& name);
vector<string> split(const string& text, char delimiter);
pair<vector<mdlp::labels_t>, map<string, int>> discretize(vector<mdlp::samples_t>& X, mdlp::labels_t& y, vector<string> features); pair<vector<mdlp::labels_t>, map<string, int>> discretize(vector<mdlp::samples_t>& X, mdlp::labels_t& y, vector<string> features);
vector<mdlp::labels_t> discretizeDataset(vector<mdlp::samples_t>& X, mdlp::labels_t& y); vector<mdlp::labels_t> discretizeDataset(vector<mdlp::samples_t>& X, mdlp::labels_t& y);
pair<torch::Tensor, map<string, vector<int>>> discretizeTorch(torch::Tensor& X, torch::Tensor& y, vector<string>& features, const string& className); // pair<torch::Tensor, map<string, vector<int>>> discretizeTorch(torch::Tensor& X, torch::Tensor& y, vector<string>& features, const string& className);
tuple<vector<vector<int>>, vector<int>, vector<string>, string, map<string, vector<int>>> loadFile(const string& name); tuple<vector<vector<int>>, vector<int>, vector<string>, string, map<string, vector<int>>> loadFile(const string& name);
tuple<torch::Tensor, torch::Tensor, vector<string>, string, map<string, vector<int>>> loadDataset(const string& path, const string& name, bool class_last, bool discretize_dataset); tuple<torch::Tensor, torch::Tensor, vector<string>, string, map<string, vector<int>>> loadDataset(const string& path, const string& name, bool class_last, bool discretize_dataset);
map<string, vector<int>> get_states(vector<string>& features, string className, map<string, int>& maxes); map<string, vector<int>> get_states(vector<string>& features, string className, map<string, int>& maxes);

View File

@@ -9,21 +9,29 @@ TEST_CASE("Test Bayesian Network")
{ {
auto [Xd, y, features, className, states] = loadFile("iris"); auto [Xd, y, features, className, states] = loadFile("iris");
SECTION("Test Update Nodes")
{
auto net = bayesnet::Network();
net.addNode("A", 3);
REQUIRE(net.getStates() == 3);
net.addNode("A", 5);
REQUIRE(net.getStates() == 5);
}
SECTION("Test get features") SECTION("Test get features")
{ {
auto net = bayesnet::Network(); auto net = bayesnet::Network();
net.addNode("A"); net.addNode("A", 3);
net.addNode("B"); net.addNode("B", 5);
REQUIRE(net.getFeatures() == vector<string>{"A", "B"}); REQUIRE(net.getFeatures() == vector<string>{"A", "B"});
net.addNode("C"); net.addNode("C", 2);
REQUIRE(net.getFeatures() == vector<string>{"A", "B", "C"}); REQUIRE(net.getFeatures() == vector<string>{"A", "B", "C"});
} }
SECTION("Test get edges") SECTION("Test get edges")
{ {
auto net = bayesnet::Network(); auto net = bayesnet::Network();
net.addNode("A"); net.addNode("A", 3);
net.addNode("B"); net.addNode("B", 5);
net.addNode("C"); net.addNode("C", 2);
net.addEdge("A", "B"); net.addEdge("A", "B");
net.addEdge("B", "C"); net.addEdge("B", "C");
REQUIRE(net.getEdges() == vector<pair<string, string>>{ {"A", "B"}, { "B", "C" } }); REQUIRE(net.getEdges() == vector<pair<string, string>>{ {"A", "B"}, { "B", "C" } });