Compare commits

...

10 Commits
cuda ... v1.0.6

Author SHA1 Message Date
d84adf6172 Add model to changelog 2024-11-23 19:13:54 +01:00
268a86cbe0 Actualiza Changelog 2024-11-23 19:11:00 +01:00
fc4c93b299 Fix Mst test 2024-11-23 19:07:35 +01:00
86f2bc44fc libmdlp (#31)
Add mdlp as library in lib/
Fix tests to reach 99.1% of coverage

Reviewed-on: #31
2024-11-23 17:22:41 +00:00
f0f3d9ad6e Fix CUDA and mdlp library issues 2024-11-20 21:02:56 +01:00
9a323cd7a3 Remove mdlp submodule 2024-11-20 20:15:49 +01:00
cb949ac7e5 Update dependecies versions 2024-09-29 13:17:44 +02:00
2c297ea15d Control optional doxygen dependency 2024-09-29 12:48:15 +02:00
4e4b6e67f4 Add env parallel variable to Makefile 2024-09-18 11:05:19 +02:00
82847774ee Update Dockerfile 2024-09-13 09:42:06 +02:00
36 changed files with 5227 additions and 750 deletions

View File

@@ -1,6 +1,6 @@
FROM mcr.microsoft.com/devcontainers/cpp:ubuntu22.04 FROM mcr.microsoft.com/devcontainers/cpp:ubuntu22.04
ARG REINSTALL_CMAKE_VERSION_FROM_SOURCE="3.22.2" ARG REINSTALL_CMAKE_VERSION_FROM_SOURCE="3.29.3"
# Optionally install the cmake for vcpkg # Optionally install the cmake for vcpkg
COPY ./reinstall-cmake.sh /tmp/ COPY ./reinstall-cmake.sh /tmp/
@@ -23,7 +23,7 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test
RUN apt-get update RUN apt-get update
# Install GCC 13.1 # Install GCC 13.1
RUN apt-get install -y gcc-13 g++-13 RUN apt-get install -y gcc-13 g++-13 doxygen
# Install lcov 2.1 # Install lcov 2.1
RUN wget --quiet https://github.com/linux-test-project/lcov/releases/download/v2.1/lcov-2.1.tar.gz && \ RUN wget --quiet https://github.com/linux-test-project/lcov/releases/download/v2.1/lcov-2.1.tar.gz && \

8
.gitmodules vendored
View File

@@ -1,8 +1,3 @@
[submodule "lib/mdlp"]
path = lib/mdlp
url = https://github.com/rmontanana/mdlp
main = main
update = merge
[submodule "lib/json"] [submodule "lib/json"]
path = lib/json path = lib/json
url = https://github.com/nlohmann/json.git url = https://github.com/nlohmann/json.git
@@ -21,3 +16,6 @@
[submodule "tests/lib/Files"] [submodule "tests/lib/Files"]
path = tests/lib/Files path = tests/lib/Files
url = https://github.com/rmontanana/ArffFiles url = https://github.com/rmontanana/ArffFiles
[submodule "lib/mdlp"]
path = lib/mdlp
url = https://github.com/rmontanana/mdlp

2
.vscode/launch.json vendored
View File

@@ -16,7 +16,7 @@
"name": "test", "name": "test",
"program": "${workspaceFolder}/build_Debug/tests/TestBayesNet", "program": "${workspaceFolder}/build_Debug/tests/TestBayesNet",
"args": [ "args": [
"[Network]" "No features selected"
], ],
"cwd": "${workspaceFolder}/build_Debug/tests" "cwd": "${workspaceFolder}/build_Debug/tests"
}, },

View File

@@ -7,6 +7,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [1.0.6] 2024-11-23
### Fixed
- Prevent existing edges to be added to the network in the `add_edge` method.
- Don't allow to add nodes or edges on already fiited networks.
- Number of threads spawned
- Network class tests
### Added ### Added
- Library logo generated with <https://openart.ai> to README.md - Library logo generated with <https://openart.ai> to README.md
@@ -14,15 +23,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- *convergence_best* hyperparameter to the BoostAODE class, to control the way the prior accuracy is computed if convergence is set. Default value is *false*. - *convergence_best* hyperparameter to the BoostAODE class, to control the way the prior accuracy is computed if convergence is set. Default value is *false*.
- SPnDE model. - SPnDE model.
- A2DE model. - A2DE model.
- BoostA2DE model.
- A2DE & SPnDE tests. - A2DE & SPnDE tests.
- Add tests to reach 99% of coverage. - Add tests to reach 99% of coverage.
- Add tests to check the correct version of the mdlp, folding and json libraries. - Add tests to check the correct version of the mdlp, folding and json libraries.
- Library documentation generated with Doxygen. - Library documentation generated with Doxygen.
- Link to documentation in the README.md. - Link to documentation in the README.md.
- Three types of smoothing the Bayesian Network OLD_LAPLACE, LAPLACE and CESTNIK. - Three types of smoothing the Bayesian Network ORIGINAL, LAPLACE and CESTNIK.
### Internal ### Internal
- Fixed doxygen optional dependency
- Add env parallel variable to Makefile
- Add CountingSemaphore class to manage the number of threads spawned.
- Ignore CUDA language in CMake CodeCoverage module.
- Update mdlp library as a git submodule.
- Create library ShuffleArffFile to limit the number of samples with a parameter and shuffle them. - Create library ShuffleArffFile to limit the number of samples with a parameter and shuffle them.
- Refactor catch2 library location to test/lib - Refactor catch2 library location to test/lib
- Refactor loadDataset function in tests. - Refactor loadDataset function in tests.
@@ -33,6 +48,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Add a Makefile target (doc) to generate the documentation. - Add a Makefile target (doc) to generate the documentation.
- Add a Makefile target (doc-install) to install the documentation. - Add a Makefile target (doc-install) to install the documentation.
### Libraries versions
- mdlp: 2.0.1
- Folding: 1.1.0
- json: 3.11
- ArffFiles: 1.1.0
## [1.0.5] 2024-04-20 ## [1.0.5] 2024-04-20
### Added ### Added

View File

@@ -49,11 +49,12 @@ if (CMAKE_BUILD_TYPE STREQUAL "Debug")
set(CODE_COVERAGE ON) set(CODE_COVERAGE ON)
endif (CMAKE_BUILD_TYPE STREQUAL "Debug") endif (CMAKE_BUILD_TYPE STREQUAL "Debug")
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
message(STATUS "Languages=${LANGUAGES}")
if (CODE_COVERAGE) if (CODE_COVERAGE)
enable_testing() enable_testing()
include(CodeCoverage) include(CodeCoverage)
MESSAGE("Code coverage enabled") MESSAGE(STATUS "Code coverage enabled")
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage") SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
endif (CODE_COVERAGE) endif (CODE_COVERAGE)
@@ -63,6 +64,7 @@ endif (ENABLE_CLANG_TIDY)
# External libraries - dependencies of BayesNet # External libraries - dependencies of BayesNet
# --------------------------------------------- # ---------------------------------------------
# include(FetchContent) # include(FetchContent)
add_git_submodule("lib/json") add_git_submodule("lib/json")
add_git_submodule("lib/mdlp") add_git_submodule("lib/mdlp")
@@ -75,7 +77,7 @@ add_subdirectory(bayesnet)
# Testing # Testing
# ------- # -------
if (ENABLE_TESTING) if (ENABLE_TESTING)
MESSAGE("Testing enabled") MESSAGE(STATUS "Testing enabled")
add_subdirectory(tests/lib/catch2) add_subdirectory(tests/lib/catch2)
include(CTest) include(CTest)
add_subdirectory(tests) add_subdirectory(tests)
@@ -93,10 +95,14 @@ install(FILES ${CMAKE_BINARY_DIR}/configured_files/include/bayesnet/config.h DES
# Documentation # Documentation
# ------------- # -------------
find_package(Doxygen) find_package(Doxygen)
set(DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/docs) if (Doxygen_FOUND)
set(doxyfile_in ${DOC_DIR}/Doxyfile.in) set(DOC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/docs)
set(doxyfile ${DOC_DIR}/Doxyfile) set(doxyfile_in ${DOC_DIR}/Doxyfile.in)
configure_file(${doxyfile_in} ${doxyfile} @ONLY) set(doxyfile ${DOC_DIR}/Doxyfile)
doxygen_add_docs(doxygen configure_file(${doxyfile_in} ${doxyfile} @ONLY)
doxygen_add_docs(doxygen
WORKING_DIRECTORY ${DOC_DIR} WORKING_DIRECTORY ${DOC_DIR}
CONFIG_FILE ${doxyfile}) CONFIG_FILE ${doxyfile})
else (Doxygen_FOUND)
MESSAGE("* Doxygen not found")
endif (Doxygen_FOUND)

View File

@@ -43,7 +43,7 @@ setup: ## Install dependencies for tests and coverage
fi fi
@echo "* You should install plantuml & graphviz for the diagrams" @echo "* You should install plantuml & graphviz for the diagrams"
diagrams: ## Create an UML class diagram & depnendency of the project (diagrams/BayesNet.png) diagrams: ## Create an UML class diagram & dependency of the project (diagrams/BayesNet.png)
@which $(plantuml) || (echo ">>> Please install plantuml"; exit 1) @which $(plantuml) || (echo ">>> Please install plantuml"; exit 1)
@which $(dot) || (echo ">>> Please install graphviz"; exit 1) @which $(dot) || (echo ">>> Please install graphviz"; exit 1)
@which $(clang-uml) || (echo ">>> Please install clang-uml"; exit 1) @which $(clang-uml) || (echo ">>> Please install clang-uml"; exit 1)
@@ -58,10 +58,10 @@ diagrams: ## Create an UML class diagram & depnendency of the project (diagrams/
@$(dot) -Tsvg $(f_debug)/dependency.dot.BayesNet -o $(f_diagrams)/dependency.svg @$(dot) -Tsvg $(f_debug)/dependency.dot.BayesNet -o $(f_diagrams)/dependency.svg
buildd: ## Build the debug targets buildd: ## Build the debug targets
cmake --build $(f_debug) -t $(app_targets) --parallel cmake --build $(f_debug) -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
buildr: ## Build the release targets buildr: ## Build the release targets
cmake --build $(f_release) -t $(app_targets) --parallel cmake --build $(f_release) -t $(app_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
clean: ## Clean the tests info clean: ## Clean the tests info
@echo ">>> Cleaning Debug BayesNet tests..."; @echo ">>> Cleaning Debug BayesNet tests...";
@@ -105,7 +105,7 @@ opt = ""
test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section test: ## Run tests (opt="-s") to verbose output the tests, (opt="-c='Test Maximum Spanning Tree'") to run only that section
@echo ">>> Running BayesNet tests..."; @echo ">>> Running BayesNet tests...";
@$(MAKE) clean @$(MAKE) clean
@cmake --build $(f_debug) -t $(test_targets) --parallel @cmake --build $(f_debug) -t $(test_targets) --parallel $(CMAKE_BUILD_PARALLEL_LEVEL)
@for t in $(test_targets); do \ @for t in $(test_targets); do \
echo ">>> Running $$t...";\ echo ">>> Running $$t...";\
if [ -f $(f_debug)/tests/$$t ]; then \ if [ -f $(f_debug)/tests/$$t ]; then \

View File

@@ -7,9 +7,9 @@
[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet) [![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet) [![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_BayesNet&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_BayesNet)
![Gitea Last Commit](https://img.shields.io/gitea/last-commit/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000&logo=gitea) ![Gitea Last Commit](https://img.shields.io/gitea/last-commit/rmontanana/bayesnet?gitea_url=https://gitea.rmontanana.es:3000&logo=gitea)
[![Coverage Badge](https://img.shields.io/badge/Coverage-97,1%25-green)](html/index.html) [![Coverage Badge](https://img.shields.io/badge/Coverage-99,1%25-green)](html/index.html)
Bayesian Network Classifiers using libtorch from scratch Bayesian Network Classifiers library
## Dependencies ## Dependencies
@@ -71,6 +71,8 @@ make sample fname=tests/data/glass.arff
#### - AODE #### - AODE
#### - A2DE
#### - [BoostAODE](docs/BoostAODE.md) #### - [BoostAODE](docs/BoostAODE.md)
#### - BoostA2DE #### - BoostA2DE

View File

@@ -9,4 +9,4 @@ include_directories(
file(GLOB_RECURSE Sources "*.cc") file(GLOB_RECURSE Sources "*.cc")
add_library(BayesNet ${Sources}) add_library(BayesNet ${Sources})
target_link_libraries(BayesNet mdlp "${TORCH_LIBRARIES}") target_link_libraries(BayesNet fimdlp "${TORCH_LIBRARIES}")

View File

@@ -59,6 +59,9 @@ namespace bayesnet {
std::vector<int> featuresUsed; std::vector<int> featuresUsed;
if (selectFeatures) { if (selectFeatures) {
featuresUsed = initializeModels(smoothing); featuresUsed = initializeModels(smoothing);
if (featuresUsed.size() == 0) {
return;
}
auto ypred = predict(X_train); auto ypred = predict(X_train);
std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_); std::tie(weights_, alpha_t, finished) = update_weights(y_train, ypred, weights_);
// Update significance of the models // Update significance of the models

View File

@@ -209,7 +209,7 @@ namespace bayesnet {
pthread_setname_np(threadName.c_str()); pthread_setname_np(threadName.c_str());
#endif #endif
double numStates = static_cast<double>(node.second->getNumStates()); double numStates = static_cast<double>(node.second->getNumStates());
double smoothing_factor = 0.0; double smoothing_factor;
switch (smoothing) { switch (smoothing) {
case Smoothing_t::ORIGINAL: case Smoothing_t::ORIGINAL:
smoothing_factor = 1.0 / n_samples; smoothing_factor = 1.0 / n_samples;
@@ -221,7 +221,7 @@ namespace bayesnet {
smoothing_factor = 1 / numStates; smoothing_factor = 1 / numStates;
break; break;
default: default:
throw std::invalid_argument("Smoothing method not recognized " + std::to_string(static_cast<int>(smoothing))); smoothing_factor = 0.0; // No smoothing
} }
node.second->computeCPT(samples, features, smoothing_factor, weights); node.second->computeCPT(samples, features, smoothing_factor, weights);
semaphore.release(); semaphore.release();
@@ -234,16 +234,6 @@ namespace bayesnet {
for (auto& thread : threads) { for (auto& thread : threads) {
thread.join(); thread.join();
} }
// std::fstream file;
// file.open("cpt.txt", std::fstream::out | std::fstream::app);
// file << std::string(80, '*') << std::endl;
// for (const auto& item : graph("Test")) {
// file << item << std::endl;
// }
// file << std::string(80, '-') << std::endl;
// file << dump_cpt() << std::endl;
// file << std::string(80, '=') << std::endl;
// file.close();
fitted = true; fitted = true;
} }
torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba) torch::Tensor Network::predict_tensor(const torch::Tensor& samples, const bool proba)

View File

@@ -53,14 +53,14 @@ namespace bayesnet {
} }
} }
void insertElement(std::list<int>& variables, int variable) void MST::insertElement(std::list<int>& variables, int variable)
{ {
if (std::find(variables.begin(), variables.end(), variable) == variables.end()) { if (std::find(variables.begin(), variables.end(), variable) == variables.end()) {
variables.push_front(variable); variables.push_front(variable);
} }
} }
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original) std::vector<std::pair<int, int>> MST::reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original)
{ {
// Create the edges of a DAG from the MST // Create the edges of a DAG from the MST
// replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted // replacing unordered_set with list because unordered_set cannot guarantee the order of the elements inserted

View File

@@ -14,6 +14,8 @@ namespace bayesnet {
public: public:
MST() = default; MST() = default;
MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root); MST(const std::vector<std::string>& features, const torch::Tensor& weights, const int root);
void insertElement(std::list<int>& variables, int variable);
std::vector<std::pair<int, int>> reorder(std::vector<std::pair<float, std::pair<int, int>>> T, int root_original);
std::vector<std::pair<int, int>> maximumSpanningTree(); std::vector<std::pair<int, int>> maximumSpanningTree();
private: private:
torch::Tensor weights; torch::Tensor weights;

View File

@@ -137,7 +137,7 @@
include(CMakeParseArguments) include(CMakeParseArguments)
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE) option(CODE_COVERAGE_VERBOSE "Verbose information" TRUE)
# Check prereqs # Check prereqs
find_program( GCOV_PATH gcov ) find_program( GCOV_PATH gcov )
@@ -160,8 +160,12 @@ foreach(LANG ${LANGUAGES})
endif() endif()
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang") AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
if ("${LANG}" MATCHES "CUDA")
message(STATUS "Ignoring CUDA")
else()
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...") message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif() endif()
endif()
endforeach() endforeach()
set(COVERAGE_COMPILER_FLAGS "-g --coverage" set(COVERAGE_COMPILER_FLAGS "-g --coverage"

View File

@@ -5,15 +5,21 @@ project(bayesnet_sample)
set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD 17)
find_package(Torch REQUIRED) find_package(Torch REQUIRED)
find_library(BayesNet NAMES BayesNet.a libBayesNet.a REQUIRED) find_library(BayesNet NAMES libBayesNet BayesNet libBayesNet.a REQUIRED)
find_path(Bayesnet_INCLUDE_DIRS REQUIRED NAMES bayesnet)
find_library(FImdlp NAMES libfimdlp.a PATHS REQUIRED)
message(STATUS "FImdlp=${FImdlp}")
message(STATUS "FImdlp_INCLUDE_DIRS=${FImdlp_INCLUDE_DIRS}")
message(STATUS "BayesNet=${BayesNet}")
message(STATUS "Bayesnet_INCLUDE_DIRS=${Bayesnet_INCLUDE_DIRS}")
include_directories( include_directories(
../tests/lib/Files ../tests/lib/Files
lib/mdlp
lib/json/include lib/json/include
/usr/local/include /usr/local/include
${FImdlp_INCLUDE_DIRS}
) )
add_subdirectory(lib/mdlp)
add_executable(bayesnet_sample sample.cc) add_executable(bayesnet_sample sample.cc)
target_link_libraries(bayesnet_sample mdlp "${TORCH_LIBRARIES}" "${BayesNet}") target_link_libraries(bayesnet_sample fimdlp "${TORCH_LIBRARIES}" "${BayesNet}")

View File

@@ -1,11 +0,0 @@
cmake_minimum_required(VERSION 3.20)
project(mdlp)
if (POLICY CMP0135)
cmake_policy(SET CMP0135 NEW)
endif ()
set(CMAKE_CXX_STANDARD 11)
add_library(mdlp CPPFImdlp.cpp Metrics.cpp)

View File

@@ -1,222 +0,0 @@
#include <numeric>
#include <algorithm>
#include <set>
#include <cmath>
#include "CPPFImdlp.h"
#include "Metrics.h"
namespace mdlp {
CPPFImdlp::CPPFImdlp(size_t min_length_, int max_depth_, float proposed) : min_length(min_length_),
max_depth(max_depth_),
proposed_cuts(proposed)
{
}
CPPFImdlp::CPPFImdlp() = default;
CPPFImdlp::~CPPFImdlp() = default;
size_t CPPFImdlp::compute_max_num_cut_points() const
{
// Set the actual maximum number of cut points as a number or as a percentage of the number of samples
if (proposed_cuts == 0) {
return numeric_limits<size_t>::max();
}
if (proposed_cuts < 0 || proposed_cuts > static_cast<float>(X.size())) {
throw invalid_argument("wrong proposed num_cuts value");
}
if (proposed_cuts < 1)
return static_cast<size_t>(round(static_cast<float>(X.size()) * proposed_cuts));
return static_cast<size_t>(proposed_cuts);
}
void CPPFImdlp::fit(samples_t& X_, labels_t& y_)
{
X = X_;
y = y_;
num_cut_points = compute_max_num_cut_points();
depth = 0;
discretizedData.clear();
cutPoints.clear();
if (X.size() != y.size()) {
throw invalid_argument("X and y must have the same size");
}
if (X.empty() || y.empty()) {
throw invalid_argument("X and y must have at least one element");
}
if (min_length < 3) {
throw invalid_argument("min_length must be greater than 2");
}
if (max_depth < 1) {
throw invalid_argument("max_depth must be greater than 0");
}
indices = sortIndices(X_, y_);
metrics.setData(y, indices);
computeCutPoints(0, X.size(), 1);
sort(cutPoints.begin(), cutPoints.end());
if (num_cut_points > 0) {
// Select the best (with lower entropy) cut points
while (cutPoints.size() > num_cut_points) {
resizeCutPoints();
}
}
}
pair<precision_t, size_t> CPPFImdlp::valueCutPoint(size_t start, size_t cut, size_t end)
{
size_t n;
size_t m;
size_t idxPrev = cut - 1 >= start ? cut - 1 : cut;
size_t idxNext = cut + 1 < end ? cut + 1 : cut;
bool backWall; // true if duplicates reach beginning of the interval
precision_t previous;
precision_t actual;
precision_t next;
previous = X[indices[idxPrev]];
actual = X[indices[cut]];
next = X[indices[idxNext]];
// definition 2 of the paper => X[t-1] < X[t]
// get the first equal value of X in the interval
while (idxPrev > start && actual == previous) {
previous = X[indices[--idxPrev]];
}
backWall = idxPrev == start && actual == previous;
// get the last equal value of X in the interval
while (idxNext < end - 1 && actual == next) {
next = X[indices[++idxNext]];
}
// # of duplicates before cutpoint
n = cut - 1 - idxPrev;
// # of duplicates after cutpoint
m = idxNext - cut - 1;
// Decide which values to use
cut = cut + (backWall ? m + 1 : -n);
actual = X[indices[cut]];
return { (actual + previous) / 2, cut };
}
void CPPFImdlp::computeCutPoints(size_t start, size_t end, int depth_)
{
size_t cut;
pair<precision_t, size_t> result;
// Check if the interval length and the depth are Ok
if (end - start < min_length || depth_ > max_depth)
return;
depth = depth_ > depth ? depth_ : depth;
cut = getCandidate(start, end);
if (cut == numeric_limits<size_t>::max())
return;
if (mdlp(start, cut, end)) {
result = valueCutPoint(start, cut, end);
cut = result.second;
cutPoints.push_back(result.first);
computeCutPoints(start, cut, depth_ + 1);
computeCutPoints(cut, end, depth_ + 1);
}
}
size_t CPPFImdlp::getCandidate(size_t start, size_t end)
{
/* Definition 1: A binary discretization for A is determined by selecting the cut point TA for which
E(A, TA; S) is minimal amongst all the candidate cut points. */
size_t candidate = numeric_limits<size_t>::max();
size_t elements = end - start;
bool sameValues = true;
precision_t entropy_left;
precision_t entropy_right;
precision_t minEntropy;
// Check if all the values of the variable in the interval are the same
for (size_t idx = start + 1; idx < end; idx++) {
if (X[indices[idx]] != X[indices[start]]) {
sameValues = false;
break;
}
}
if (sameValues)
return candidate;
minEntropy = metrics.entropy(start, end);
for (size_t idx = start + 1; idx < end; idx++) {
// Cutpoints are always on boundaries (definition 2)
if (y[indices[idx]] == y[indices[idx - 1]])
continue;
entropy_left = precision_t(idx - start) / static_cast<precision_t>(elements) * metrics.entropy(start, idx);
entropy_right = precision_t(end - idx) / static_cast<precision_t>(elements) * metrics.entropy(idx, end);
if (entropy_left + entropy_right < minEntropy) {
minEntropy = entropy_left + entropy_right;
candidate = idx;
}
}
return candidate;
}
bool CPPFImdlp::mdlp(size_t start, size_t cut, size_t end)
{
int k;
int k1;
int k2;
precision_t ig;
precision_t delta;
precision_t ent;
precision_t ent1;
precision_t ent2;
auto N = precision_t(end - start);
k = metrics.computeNumClasses(start, end);
k1 = metrics.computeNumClasses(start, cut);
k2 = metrics.computeNumClasses(cut, end);
ent = metrics.entropy(start, end);
ent1 = metrics.entropy(start, cut);
ent2 = metrics.entropy(cut, end);
ig = metrics.informationGain(start, cut, end);
delta = static_cast<precision_t>(log2(pow(3, precision_t(k)) - 2) -
(precision_t(k) * ent - precision_t(k1) * ent1 - precision_t(k2) * ent2));
precision_t term = 1 / N * (log2(N - 1) + delta);
return ig > term;
}
// Argsort from https://stackoverflow.com/questions/1577475/c-sorting-and-keeping-track-of-indexes
indices_t CPPFImdlp::sortIndices(samples_t& X_, labels_t& y_)
{
indices_t idx(X_.size());
iota(idx.begin(), idx.end(), 0);
stable_sort(idx.begin(), idx.end(), [&X_, &y_](size_t i1, size_t i2) {
if (X_[i1] == X_[i2])
return y_[i1] < y_[i2];
else
return X_[i1] < X_[i2];
});
return idx;
}
void CPPFImdlp::resizeCutPoints()
{
//Compute entropy of each of the whole cutpoint set and discards the biggest value
precision_t maxEntropy = 0;
precision_t entropy;
size_t maxEntropyIdx = 0;
size_t begin = 0;
size_t end;
for (size_t idx = 0; idx < cutPoints.size(); idx++) {
end = begin;
while (X[indices[end]] < cutPoints[idx] && end < X.size())
end++;
entropy = metrics.entropy(begin, end);
if (entropy > maxEntropy) {
maxEntropy = entropy;
maxEntropyIdx = idx;
}
begin = end;
}
cutPoints.erase(cutPoints.begin() + static_cast<long>(maxEntropyIdx));
}
labels_t& CPPFImdlp::transform(const samples_t& data)
{
discretizedData.clear();
discretizedData.reserve(data.size());
for (const precision_t& item : data) {
auto upper = upper_bound(cutPoints.begin(), cutPoints.end(), item);
discretizedData.push_back(upper - cutPoints.begin());
}
return discretizedData;
}
}

View File

@@ -1,51 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef CPPFIMDLP_H
#define CPPFIMDLP_H
#include "typesFImdlp.h"
#include "Metrics.h"
#include <limits>
#include <utility>
#include <string>
namespace mdlp {
class CPPFImdlp {
protected:
size_t min_length = 3;
int depth = 0;
int max_depth = numeric_limits<int>::max();
float proposed_cuts = 0;
indices_t indices = indices_t();
samples_t X = samples_t();
labels_t y = labels_t();
Metrics metrics = Metrics(y, indices);
cutPoints_t cutPoints;
size_t num_cut_points = numeric_limits<size_t>::max();
labels_t discretizedData = labels_t();
static indices_t sortIndices(samples_t&, labels_t&);
void computeCutPoints(size_t, size_t, int);
void resizeCutPoints();
bool mdlp(size_t, size_t, size_t);
size_t getCandidate(size_t, size_t);
size_t compute_max_num_cut_points() const;
pair<precision_t, size_t> valueCutPoint(size_t, size_t, size_t);
public:
CPPFImdlp();
CPPFImdlp(size_t, int, float);
~CPPFImdlp();
void fit(samples_t&, labels_t&);
inline cutPoints_t getCutPoints() const { return cutPoints; };
labels_t& transform(const samples_t&);
inline int get_depth() const { return depth; };
static inline string version() { return "1.1.2"; };
};
}
#endif

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2022 Ricardo Montañana Gómez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,78 +0,0 @@
#include "Metrics.h"
#include <set>
#include <cmath>
using namespace std;
namespace mdlp {
Metrics::Metrics(labels_t& y_, indices_t& indices_): y(y_), indices(indices_),
numClasses(computeNumClasses(0, indices.size()))
{
}
int Metrics::computeNumClasses(size_t start, size_t end)
{
set<int> nClasses;
for (auto i = start; i < end; ++i) {
nClasses.insert(y[indices[i]]);
}
return static_cast<int>(nClasses.size());
}
void Metrics::setData(const labels_t& y_, const indices_t& indices_)
{
indices = indices_;
y = y_;
numClasses = computeNumClasses(0, indices.size());
entropyCache.clear();
igCache.clear();
}
precision_t Metrics::entropy(size_t start, size_t end)
{
precision_t p;
precision_t ventropy = 0;
int nElements = 0;
labels_t counts(numClasses + 1, 0);
if (end - start < 2)
return 0;
if (entropyCache.find({ start, end }) != entropyCache.end()) {
return entropyCache[{start, end}];
}
for (auto i = &indices[start]; i != &indices[end]; ++i) {
counts[y[*i]]++;
nElements++;
}
for (auto count : counts) {
if (count > 0) {
p = static_cast<precision_t>(count) / static_cast<precision_t>(nElements);
ventropy -= p * log2(p);
}
}
entropyCache[{start, end}] = ventropy;
return ventropy;
}
precision_t Metrics::informationGain(size_t start, size_t cut, size_t end)
{
precision_t iGain;
precision_t entropyInterval;
precision_t entropyLeft;
precision_t entropyRight;
size_t nElementsLeft = cut - start;
size_t nElementsRight = end - cut;
size_t nElements = end - start;
if (igCache.find(make_tuple(start, cut, end)) != igCache.end()) {
return igCache[make_tuple(start, cut, end)];
}
entropyInterval = entropy(start, end);
entropyLeft = entropy(start, cut);
entropyRight = entropy(cut, end);
iGain = entropyInterval -
(static_cast<precision_t>(nElementsLeft) * entropyLeft +
static_cast<precision_t>(nElementsRight) * entropyRight) /
static_cast<precision_t>(nElements);
igCache[make_tuple(start, cut, end)] = iGain;
return iGain;
}
}

View File

@@ -1,28 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef CCMETRICS_H
#define CCMETRICS_H
#include "typesFImdlp.h"
namespace mdlp {
class Metrics {
protected:
labels_t& y;
indices_t& indices;
int numClasses;
cacheEnt_t entropyCache = cacheEnt_t();
cacheIg_t igCache = cacheIg_t();
public:
Metrics(labels_t&, indices_t&);
void setData(const labels_t&, const indices_t&);
int computeNumClasses(size_t, size_t);
precision_t entropy(size_t, size_t);
precision_t informationGain(size_t, size_t, size_t);
};
}
#endif

View File

@@ -1,41 +0,0 @@
[![Build](https://github.com/rmontanana/mdlp/actions/workflows/build.yml/badge.svg)](https://github.com/rmontanana/mdlp/actions/workflows/build.yml)
[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_mdlp&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=rmontanana_mdlp)
[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=rmontanana_mdlp&metric=reliability_rating)](https://sonarcloud.io/summary/new_code?id=rmontanana_mdlp)
# mdlp
Discretization algorithm based on the paper by Fayyad &amp; Irani [Multi-Interval Discretization of Continuous-Valued Attributes for Classification Learning](https://www.ijcai.org/Proceedings/93-2/Papers/022.pdf)
The implementation tries to mitigate the problem of different label values with the same value of the variable:
- Sorts the values of the variable using the label values as a tie-breaker
- Once found a valid candidate for the split, it checks if the previous value is the same as actual one, and tries to get previous one, or next if the former is not possible.
Other features:
- Intervals with the same value of the variable are not taken into account for cutpoints.
- Intervals have to have more than two examples to be evaluated.
The algorithm returns the cut points for the variable.
## Sample
To run the sample, just execute the following commands:
```bash
cd sample
cmake -B build
cd build
make
./sample -f iris -m 2
./sample -h
```
## Test
To run the tests and see coverage (llvm & gcovr have to be installed), execute the following commands:
```bash
cd tests
./test
```

View File

@@ -1,24 +0,0 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#ifndef TYPES_H
#define TYPES_H
#include <vector>
#include <map>
#include <stdexcept>
using namespace std;
namespace mdlp {
typedef float precision_t;
typedef vector<precision_t> samples_t;
typedef vector<int> labels_t;
typedef vector<size_t> indices_t;
typedef vector<precision_t> cutPoints_t;
typedef map<pair<int, int>, precision_t> cacheEnt_t;
typedef map<tuple<int, int, int>, precision_t> cacheIg_t;
}
#endif

View File

@@ -10,8 +10,8 @@ if(ENABLE_TESTING)
file(GLOB_RECURSE BayesNet_SOURCES "${BayesNet_SOURCE_DIR}/bayesnet/*.cc") file(GLOB_RECURSE BayesNet_SOURCES "${BayesNet_SOURCE_DIR}/bayesnet/*.cc")
add_executable(TestBayesNet TestBayesNetwork.cc TestBayesNode.cc TestBayesClassifier.cc add_executable(TestBayesNet TestBayesNetwork.cc TestBayesNode.cc TestBayesClassifier.cc
TestBayesModels.cc TestBayesMetrics.cc TestFeatureSelection.cc TestBoostAODE.cc TestA2DE.cc TestBayesModels.cc TestBayesMetrics.cc TestFeatureSelection.cc TestBoostAODE.cc TestA2DE.cc
TestUtils.cc TestBayesEnsemble.cc TestModulesVersions.cc TestBoostA2DE.cc ${BayesNet_SOURCES}) TestUtils.cc TestBayesEnsemble.cc TestModulesVersions.cc TestBoostA2DE.cc TestMST.cc ${BayesNet_SOURCES})
target_link_libraries(TestBayesNet PUBLIC "${TORCH_LIBRARIES}" mdlp PRIVATE Catch2::Catch2WithMain) target_link_libraries(TestBayesNet PUBLIC "${TORCH_LIBRARIES}" fimdlp PRIVATE Catch2::Catch2WithMain)
add_test(NAME BayesNetworkTest COMMAND TestBayesNet) add_test(NAME BayesNetworkTest COMMAND TestBayesNet)
add_test(NAME A2DE COMMAND TestBayesNet "[A2DE]") add_test(NAME A2DE COMMAND TestBayesNet "[A2DE]")
add_test(NAME BoostA2DE COMMAND TestBayesNet "[BoostA2DE]") add_test(NAME BoostA2DE COMMAND TestBayesNet "[BoostA2DE]")
@@ -24,4 +24,5 @@ if(ENABLE_TESTING)
add_test(NAME Modules COMMAND TestBayesNet "[Modules]") add_test(NAME Modules COMMAND TestBayesNet "[Modules]")
add_test(NAME Network COMMAND TestBayesNet "[Network]") add_test(NAME Network COMMAND TestBayesNet "[Network]")
add_test(NAME Node COMMAND TestBayesNet "[Node]") add_test(NAME Node COMMAND TestBayesNet "[Node]")
add_test(NAME MST COMMAND TestBayesNet "[MST]")
endif(ENABLE_TESTING) endif(ENABLE_TESTING)

View File

@@ -45,5 +45,5 @@ TEST_CASE("Test graph", "[A2DE]")
auto graph = clf.graph(); auto graph = clf.graph();
REQUIRE(graph.size() == 78); REQUIRE(graph.size() == 78);
REQUIRE(graph[0] == "digraph BayesNet {\nlabel=<BayesNet A2DE_0>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n"); REQUIRE(graph[0] == "digraph BayesNet {\nlabel=<BayesNet A2DE_0>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n");
REQUIRE(graph[1] == "class [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ] \n"); REQUIRE(graph[1] == "\"class\" [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ] \n");
} }

View File

@@ -85,7 +85,7 @@ TEST_CASE("Dump_cpt", "[Classifier]")
auto raw = RawDatasets("iris", true); auto raw = RawDatasets("iris", true);
model.fit(raw.Xt, raw.yt, raw.features, raw.className, raw.states, raw.smoothing); model.fit(raw.Xt, raw.yt, raw.features, raw.className, raw.states, raw.smoothing);
auto cpt = model.dump_cpt(); auto cpt = model.dump_cpt();
REQUIRE(cpt.size() == 1713); REQUIRE(cpt.size() == 1718);
} }
TEST_CASE("Not fitted model", "[Classifier]") TEST_CASE("Not fitted model", "[Classifier]")
{ {

View File

@@ -27,13 +27,13 @@ TEST_CASE("Test Bayesian Classifiers score & version", "[Models]")
map <pair<std::string, std::string>, float> scores{ map <pair<std::string, std::string>, float> scores{
// Diabetes // Diabetes
{{"diabetes", "AODE"}, 0.82161}, {{"diabetes", "KDB"}, 0.852865}, {{"diabetes", "SPODE"}, 0.802083}, {{"diabetes", "TAN"}, 0.821615}, {{"diabetes", "AODE"}, 0.82161}, {{"diabetes", "KDB"}, 0.852865}, {{"diabetes", "SPODE"}, 0.802083}, {{"diabetes", "TAN"}, 0.821615},
{{"diabetes", "AODELd"}, 0.8138f}, {{"diabetes", "KDBLd"}, 0.80208f}, {{"diabetes", "SPODELd"}, 0.78646f}, {{"diabetes", "TANLd"}, 0.8099f}, {{"diabetes", "BoostAODE"}, 0.83984f}, {{"diabetes", "AODELd"}, 0.8125f}, {{"diabetes", "KDBLd"}, 0.80208f}, {{"diabetes", "SPODELd"}, 0.7890625f}, {{"diabetes", "TANLd"}, 0.803385437f}, {{"diabetes", "BoostAODE"}, 0.83984f},
// Ecoli // Ecoli
{{"ecoli", "AODE"}, 0.889881}, {{"ecoli", "KDB"}, 0.889881}, {{"ecoli", "SPODE"}, 0.880952}, {{"ecoli", "TAN"}, 0.892857}, {{"ecoli", "AODE"}, 0.889881}, {{"ecoli", "KDB"}, 0.889881}, {{"ecoli", "SPODE"}, 0.880952}, {{"ecoli", "TAN"}, 0.892857},
{{"ecoli", "AODELd"}, 0.8869f}, {{"ecoli", "KDBLd"}, 0.875f}, {{"ecoli", "SPODELd"}, 0.84226f}, {{"ecoli", "TANLd"}, 0.86905f}, {{"ecoli", "BoostAODE"}, 0.89583f}, {{"ecoli", "AODELd"}, 0.875f}, {{"ecoli", "KDBLd"}, 0.880952358f}, {{"ecoli", "SPODELd"}, 0.839285731f}, {{"ecoli", "TANLd"}, 0.848214269f}, {{"ecoli", "BoostAODE"}, 0.89583f},
// Glass // Glass
{{"glass", "AODE"}, 0.79439}, {{"glass", "KDB"}, 0.827103}, {{"glass", "SPODE"}, 0.775701}, {{"glass", "TAN"}, 0.827103}, {{"glass", "AODE"}, 0.79439}, {{"glass", "KDB"}, 0.827103}, {{"glass", "SPODE"}, 0.775701}, {{"glass", "TAN"}, 0.827103},
{{"glass", "AODELd"}, 0.79439f}, {{"glass", "KDBLd"}, 0.85047f}, {{"glass", "SPODELd"}, 0.79439f}, {{"glass", "TANLd"}, 0.86449f}, {{"glass", "BoostAODE"}, 0.84579f}, {{"glass", "AODELd"}, 0.799065411f}, {{"glass", "KDBLd"}, 0.82710278f}, {{"glass", "SPODELd"}, 0.780373812f}, {{"glass", "TANLd"}, 0.869158864f}, {{"glass", "BoostAODE"}, 0.84579f},
// Iris // Iris
{{"iris", "AODE"}, 0.973333}, {{"iris", "KDB"}, 0.973333}, {{"iris", "SPODE"}, 0.973333}, {{"iris", "TAN"}, 0.973333}, {{"iris", "AODE"}, 0.973333}, {{"iris", "KDB"}, 0.973333}, {{"iris", "SPODE"}, 0.973333}, {{"iris", "TAN"}, 0.973333},
{{"iris", "AODELd"}, 0.973333}, {{"iris", "KDBLd"}, 0.973333}, {{"iris", "SPODELd"}, 0.96f}, {{"iris", "TANLd"}, 0.97333f}, {{"iris", "BoostAODE"}, 0.98f} {{"iris", "AODELd"}, 0.973333}, {{"iris", "KDBLd"}, 0.973333}, {{"iris", "SPODELd"}, 0.96f}, {{"iris", "TANLd"}, 0.97333f}, {{"iris", "BoostAODE"}, 0.98f}
@@ -71,10 +71,10 @@ TEST_CASE("Test Bayesian Classifiers score & version", "[Models]")
TEST_CASE("Models features & Graph", "[Models]") TEST_CASE("Models features & Graph", "[Models]")
{ {
auto graph = std::vector<std::string>({ "digraph BayesNet {\nlabel=<BayesNet Test>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n", auto graph = std::vector<std::string>({ "digraph BayesNet {\nlabel=<BayesNet Test>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n",
"class [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ] \n", "\"class\" [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ] \n",
"class -> sepallength", "class -> sepalwidth", "class -> petallength", "class -> petalwidth", "petallength [shape=circle] \n", "\"class\" -> \"sepallength\"", "\"class\" -> \"sepalwidth\"", "\"class\" -> \"petallength\"", "\"class\" -> \"petalwidth\"", "\"petallength\" [shape=circle] \n",
"petallength -> sepallength", "petalwidth [shape=circle] \n", "sepallength [shape=circle] \n", "\"petallength\" -> \"sepallength\"", "\"petalwidth\" [shape=circle] \n", "\"sepallength\" [shape=circle] \n",
"sepallength -> sepalwidth", "sepalwidth [shape=circle] \n", "sepalwidth -> petalwidth", "}\n" "\"sepallength\" -> \"sepalwidth\"", "\"sepalwidth\" [shape=circle] \n", "\"sepalwidth\" -> \"petalwidth\"", "}\n"
} }
); );
SECTION("Test TAN") SECTION("Test TAN")
@@ -96,7 +96,7 @@ TEST_CASE("Models features & Graph", "[Models]")
clf.fit(raw.Xt, raw.yt, raw.features, raw.className, raw.states, raw.smoothing); clf.fit(raw.Xt, raw.yt, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE(clf.getNumberOfNodes() == 5); REQUIRE(clf.getNumberOfNodes() == 5);
REQUIRE(clf.getNumberOfEdges() == 7); REQUIRE(clf.getNumberOfEdges() == 7);
REQUIRE(clf.getNumberOfStates() == 19); REQUIRE(clf.getNumberOfStates() == 27);
REQUIRE(clf.getClassNumStates() == 3); REQUIRE(clf.getClassNumStates() == 3);
REQUIRE(clf.show() == std::vector<std::string>{"class -> sepallength, sepalwidth, petallength, petalwidth, ", "petallength -> sepallength, ", "petalwidth -> ", "sepallength -> sepalwidth, ", "sepalwidth -> petalwidth, "}); REQUIRE(clf.show() == std::vector<std::string>{"class -> sepallength, sepalwidth, petallength, petalwidth, ", "petallength -> sepallength, ", "petalwidth -> ", "sepallength -> sepalwidth, ", "sepalwidth -> petalwidth, "});
REQUIRE(clf.graph("Test") == graph); REQUIRE(clf.graph("Test") == graph);

View File

@@ -186,11 +186,11 @@ TEST_CASE("Test Bayesian Network", "[Network]")
auto str = net.graph("Test Graph"); auto str = net.graph("Test Graph");
REQUIRE(str.size() == 7); REQUIRE(str.size() == 7);
REQUIRE(str[0] == "digraph BayesNet {\nlabel=<BayesNet Test Graph>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n"); REQUIRE(str[0] == "digraph BayesNet {\nlabel=<BayesNet Test Graph>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n");
REQUIRE(str[1] == "A [shape=circle] \n"); REQUIRE(str[1] == "\"A\" [shape=circle] \n");
REQUIRE(str[2] == "A -> B"); REQUIRE(str[2] == "\"A\" -> \"B\"");
REQUIRE(str[3] == "A -> C"); REQUIRE(str[3] == "\"A\" -> \"C\"");
REQUIRE(str[4] == "B [shape=circle] \n"); REQUIRE(str[4] == "\"B\" [shape=circle] \n");
REQUIRE(str[5] == "C [shape=circle] \n"); REQUIRE(str[5] == "\"C\" [shape=circle] \n");
REQUIRE(str[6] == "}\n"); REQUIRE(str[6] == "}\n");
} }
SECTION("Test predict") SECTION("Test predict")
@@ -257,9 +257,9 @@ TEST_CASE("Test Bayesian Network", "[Network]")
REQUIRE(node->getCPT().equal(node2->getCPT())); REQUIRE(node->getCPT().equal(node2->getCPT()));
} }
} }
SECTION("Test oddities") SECTION("Network oddities")
{ {
INFO("Test oddities"); INFO("Network oddities");
buildModel(net, raw.features, raw.className); buildModel(net, raw.features, raw.className);
// predict without fitting // predict without fitting
std::vector<std::vector<int>> test = { {1, 2, 0, 1, 1}, {0, 1, 2, 0, 1}, {0, 0, 0, 0, 1}, {2, 2, 2, 2, 1} }; std::vector<std::vector<int>> test = { {1, 2, 0, 1, 1}, {0, 1, 2, 0, 1}, {0, 0, 0, 0, 1}, {2, 2, 2, 2, 1} };
@@ -329,6 +329,14 @@ TEST_CASE("Test Bayesian Network", "[Network]")
std::string invalid_state = "Feature sepallength not found in states"; std::string invalid_state = "Feature sepallength not found in states";
REQUIRE_THROWS_AS(net4.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, std::map<std::string, std::vector<int>>(), raw.smoothing), std::invalid_argument); REQUIRE_THROWS_AS(net4.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, std::map<std::string, std::vector<int>>(), raw.smoothing), std::invalid_argument);
REQUIRE_THROWS_WITH(net4.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, std::map<std::string, std::vector<int>>(), raw.smoothing), invalid_state); REQUIRE_THROWS_WITH(net4.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, std::map<std::string, std::vector<int>>(), raw.smoothing), invalid_state);
// Try to add node or edge to a fitted network
auto net5 = bayesnet::Network();
buildModel(net5, raw.features, raw.className);
net5.fit(raw.Xv, raw.yv, raw.weightsv, raw.features, raw.className, raw.states, raw.smoothing);
REQUIRE_THROWS_AS(net5.addNode("A"), std::logic_error);
REQUIRE_THROWS_WITH(net5.addNode("A"), "Cannot add node to a fitted network. Initialize first.");
REQUIRE_THROWS_AS(net5.addEdge("A", "B"), std::logic_error);
REQUIRE_THROWS_WITH(net5.addEdge("A", "B"), "Cannot add edge to a fitted network. Initialize first.");
} }
} }
@@ -373,7 +381,7 @@ TEST_CASE("Dump CPT", "[Network]")
0.3333 0.3333
0.3333 0.3333
0.3333 0.3333
[ CPUFloatType{3} ] [ CPUDoubleType{3} ]
* petallength: (4) : [4, 3, 3] * petallength: (4) : [4, 3, 3]
(1,.,.) = (1,.,.) =
0.9388 0.1000 0.2000 0.9388 0.1000 0.2000
@@ -394,7 +402,7 @@ TEST_CASE("Dump CPT", "[Network]")
0.0204 0.1000 0.2000 0.0204 0.1000 0.2000
0.1250 0.0526 0.1667 0.1250 0.0526 0.1667
0.2000 0.0606 0.8235 0.2000 0.0606 0.8235
[ CPUFloatType{4,3,3} ] [ CPUDoubleType{4,3,3} ]
* petalwidth: (3) : [3, 6, 3] * petalwidth: (3) : [3, 6, 3]
(1,.,.) = (1,.,.) =
0.5000 0.0417 0.0714 0.5000 0.0417 0.0714
@@ -419,12 +427,12 @@ TEST_CASE("Dump CPT", "[Network]")
0.1111 0.0909 0.8000 0.1111 0.0909 0.8000
0.0667 0.2000 0.8667 0.0667 0.2000 0.8667
0.0303 0.2500 0.7500 0.0303 0.2500 0.7500
[ CPUFloatType{3,6,3} ] [ CPUDoubleType{3,6,3} ]
* sepallength: (3) : [3, 3] * sepallength: (3) : [3, 3]
0.8679 0.1321 0.0377 0.8679 0.1321 0.0377
0.0943 0.3019 0.0566 0.0943 0.3019 0.0566
0.0377 0.5660 0.9057 0.0377 0.5660 0.9057
[ CPUFloatType{3,3} ] [ CPUDoubleType{3,3} ]
* sepalwidth: (6) : [6, 3, 3] * sepalwidth: (6) : [6, 3, 3]
(1,.,.) = (1,.,.) =
0.0392 0.5000 0.2857 0.0392 0.5000 0.2857
@@ -455,7 +463,7 @@ TEST_CASE("Dump CPT", "[Network]")
0.5098 0.0833 0.1429 0.5098 0.0833 0.1429
0.5000 0.0476 0.1250 0.5000 0.0476 0.1250
0.2857 0.0571 0.1132 0.2857 0.0571 0.1132
[ CPUFloatType{6,3,3} ] [ CPUDoubleType{6,3,3} ]
)"; )";
REQUIRE(res == expected); REQUIRE(res == expected);
} }
@@ -525,6 +533,7 @@ TEST_CASE("Test Smoothing A", "[Network]")
} }
} }
} }
TEST_CASE("Test Smoothing B", "[Network]") TEST_CASE("Test Smoothing B", "[Network]")
{ {
auto net = bayesnet::Network(); auto net = bayesnet::Network();
@@ -549,19 +558,41 @@ TEST_CASE("Test Smoothing B", "[Network]")
{ "C", {0, 1} } { "C", {0, 1} }
}; };
auto weights = std::vector<double>(C.size(), 1); auto weights = std::vector<double>(C.size(), 1);
// Simple // See https://www.overleaf.com/read/tfnhpfysfkfx#2d576c example for calculations
std::cout << "LAPLACE\n"; INFO("Test Smoothing B - Laplace");
net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::LAPLACE); net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::LAPLACE);
std::cout << net.dump_cpt(); auto laplace_values = std::vector<std::vector<float>>({ {0.377418, 0.622582}, {0.217821, 0.782179} });
std::cout << "Predict proba of {0, 1, 2} y {1, 2, 3} = " << net.predict_proba({ {0, 1}, {1, 2}, {2, 3} }) << std::endl; auto laplace_score = net.predict_proba({ {0, 1}, {1, 2}, {2, 3} });
std::cout << "ORIGINAL\n"; for (auto i = 0; i < 2; ++i) {
for (auto j = 0; j < 2; ++j) {
REQUIRE(laplace_score.at(i).at(j) == Catch::Approx(laplace_values.at(i).at(j)).margin(threshold));
}
}
INFO("Test Smoothing B - Original");
net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::ORIGINAL); net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::ORIGINAL);
std::cout << net.dump_cpt(); auto original_values = std::vector<std::vector<float>>({ {0.344769, 0.655231}, {0.0421263, 0.957874} });
std::cout << "Predict proba of {0, 1, 2} y {1, 2, 3} = " << net.predict_proba({ {0, 1}, {1, 2}, {2, 3} }) << std::endl; auto original_score = net.predict_proba({ {0, 1}, {1, 2}, {2, 3} });
std::cout << "CESTNIK\n"; for (auto i = 0; i < 2; ++i) {
for (auto j = 0; j < 2; ++j) {
REQUIRE(original_score.at(i).at(j) == Catch::Approx(original_values.at(i).at(j)).margin(threshold));
}
}
INFO("Test Smoothing B - Cestnik");
net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::CESTNIK); net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::CESTNIK);
std::cout << net.dump_cpt(); auto cestnik_values = std::vector<std::vector<float>>({ {0.353422, 0.646578}, {0.12364, 0.87636} });
std::cout << "Predict proba of {0, 1, 2} y {1, 2, 3} = " << net.predict_proba({ {0, 1}, {1, 2}, {2, 3} }) << std::endl; auto cestnik_score = net.predict_proba({ {0, 1}, {1, 2}, {2, 3} });
for (auto i = 0; i < 2; ++i) {
for (auto j = 0; j < 2; ++j) {
REQUIRE(cestnik_score.at(i).at(j) == Catch::Approx(cestnik_values.at(i).at(j)).margin(threshold));
}
}
INFO("Test Smoothing B - No smoothing");
net.fit(Data, C, weights, { "X", "Y", "Z" }, "C", states, bayesnet::Smoothing_t::NONE);
auto nosmooth_values = std::vector<std::vector<float>>({ {0.342465753, 0.65753424}, {0.0, 1.0} });
auto nosmooth_score = net.predict_proba({ {0, 1}, {1, 2}, {2, 3} });
for (auto i = 0; i < 2; ++i) {
for (auto j = 0; j < 2; ++j) {
REQUIRE(nosmooth_score.at(i).at(j) == Catch::Approx(nosmooth_values.at(i).at(j)).margin(threshold));
}
}
} }

View File

@@ -62,15 +62,17 @@ TEST_CASE("Test Node computeCPT", "[Node]")
// Create a vector with the names of the classes // Create a vector with the names of the classes
auto className = std::string("Class"); auto className = std::string("Class");
// weights // weights
auto weights = torch::tensor({ 1.0, 1.0, 1.0, 1.0 }); auto weights = torch::tensor({ 1.0, 1.0, 1.0, 1.0 }, torch::kDouble);
std::vector<bayesnet::Node> nodes; std::vector<bayesnet::Node> nodes;
for (int i = 0; i < features.size(); i++) { for (int i = 0; i < features.size(); i++) {
auto node = bayesnet::Node(features[i]); auto node = bayesnet::Node(features[i]);
node.setNumStates(states[i]); node.setNumStates(states[i]);
nodes.push_back(node); nodes.push_back(node);
} }
// Create node class with 2 states
nodes.push_back(bayesnet::Node(className)); nodes.push_back(bayesnet::Node(className));
nodes[features.size()].setNumStates(2); nodes[features.size()].setNumStates(2);
// The network is c->f1, f2, f3 y f1->f2, f3
for (int i = 0; i < features.size(); i++) { for (int i = 0; i < features.size(); i++) {
// Add class node as parent of all feature nodes // Add class node as parent of all feature nodes
nodes[i].addParent(&nodes[features.size()]); nodes[i].addParent(&nodes[features.size()]);

View File

@@ -27,189 +27,192 @@ TEST_CASE("Build basic model", "[BoostA2DE]")
auto score = clf.score(raw.Xv, raw.yv); auto score = clf.score(raw.Xv, raw.yv);
REQUIRE(score == Catch::Approx(0.919271).epsilon(raw.epsilon)); REQUIRE(score == Catch::Approx(0.919271).epsilon(raw.epsilon));
} }
// TEST_CASE("Feature_select IWSS", "[BoostAODE]") TEST_CASE("Feature_select IWSS", "[BoostA2DE]")
// { {
// auto raw = RawDatasets("glass", true); auto raw = RawDatasets("glass", true);
// auto clf = bayesnet::BoostAODE(); auto clf = bayesnet::BoostA2DE();
// clf.setHyperparameters({ {"select_features", "IWSS"}, {"threshold", 0.5 } }); clf.setHyperparameters({ {"select_features", "IWSS"}, {"threshold", 0.5 } });
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing); clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNumberOfNodes() == 90); REQUIRE(clf.getNumberOfNodes() == 140);
// REQUIRE(clf.getNumberOfEdges() == 153); REQUIRE(clf.getNumberOfEdges() == 294);
// REQUIRE(clf.getNotes().size() == 2); REQUIRE(clf.getNotes().size() == 4);
// REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with IWSS"); REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with IWSS");
// REQUIRE(clf.getNotes()[1] == "Number of models: 9"); REQUIRE(clf.getNotes()[1] == "Convergence threshold reached & 15 models eliminated");
// } REQUIRE(clf.getNotes()[2] == "Pairs not used in train: 2");
// TEST_CASE("Feature_select FCBF", "[BoostAODE]") REQUIRE(clf.getNotes()[3] == "Number of models: 14");
// { }
// auto raw = RawDatasets("glass", true); TEST_CASE("Feature_select FCBF", "[BoostA2DE]")
// auto clf = bayesnet::BoostAODE(); {
// clf.setHyperparameters({ {"select_features", "FCBF"}, {"threshold", 1e-7 } }); auto raw = RawDatasets("glass", true);
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing); auto clf = bayesnet::BoostA2DE();
// REQUIRE(clf.getNumberOfNodes() == 90); clf.setHyperparameters({ {"select_features", "FCBF"}, {"threshold", 1e-7 } });
// REQUIRE(clf.getNumberOfEdges() == 153); clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNotes().size() == 2); REQUIRE(clf.getNumberOfNodes() == 110);
// REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with FCBF"); REQUIRE(clf.getNumberOfEdges() == 231);
// REQUIRE(clf.getNotes()[1] == "Number of models: 9"); REQUIRE(clf.getNotes()[0] == "Used features in initialization: 4 of 9 with FCBF");
// } REQUIRE(clf.getNotes()[1] == "Convergence threshold reached & 15 models eliminated");
// TEST_CASE("Test used features in train note and score", "[BoostAODE]") REQUIRE(clf.getNotes()[2] == "Pairs not used in train: 2");
// { REQUIRE(clf.getNotes()[3] == "Number of models: 11");
// auto raw = RawDatasets("diabetes", true); }
// auto clf = bayesnet::BoostAODE(true); TEST_CASE("Test used features in train note and score", "[BoostA2DE]")
// clf.setHyperparameters({ {
// {"order", "asc"}, auto raw = RawDatasets("diabetes", true);
// {"convergence", true}, auto clf = bayesnet::BoostA2DE(true);
// {"select_features","CFS"}, clf.setHyperparameters({
// }); {"order", "asc"},
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing); {"convergence", true},
// REQUIRE(clf.getNumberOfNodes() == 72); {"select_features","CFS"},
// REQUIRE(clf.getNumberOfEdges() == 120); });
// REQUIRE(clf.getNotes().size() == 2); clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(clf.getNotes()[0] == "Used features in initialization: 6 of 8 with CFS"); REQUIRE(clf.getNumberOfNodes() == 144);
// REQUIRE(clf.getNotes()[1] == "Number of models: 8"); REQUIRE(clf.getNumberOfEdges() == 288);
// auto score = clf.score(raw.Xv, raw.yv); REQUIRE(clf.getNotes().size() == 2);
// auto scoret = clf.score(raw.Xt, raw.yt); REQUIRE(clf.getNotes()[0] == "Used features in initialization: 6 of 8 with CFS");
// REQUIRE(score == Catch::Approx(0.809895813).epsilon(raw.epsilon)); REQUIRE(clf.getNotes()[1] == "Number of models: 16");
// REQUIRE(scoret == Catch::Approx(0.809895813).epsilon(raw.epsilon)); auto score = clf.score(raw.Xv, raw.yv);
// } auto scoret = clf.score(raw.Xt, raw.yt);
// TEST_CASE("Voting vs proba", "[BoostAODE]") REQUIRE(score == Catch::Approx(0.856771).epsilon(raw.epsilon));
// { REQUIRE(scoret == Catch::Approx(0.856771).epsilon(raw.epsilon));
// auto raw = RawDatasets("iris", true); }
// auto clf = bayesnet::BoostAODE(false); TEST_CASE("Voting vs proba", "[BoostA2DE]")
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing); {
// auto score_proba = clf.score(raw.Xv, raw.yv); auto raw = RawDatasets("iris", true);
// auto pred_proba = clf.predict_proba(raw.Xv); auto clf = bayesnet::BoostA2DE(false);
// clf.setHyperparameters({ clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// {"predict_voting",true}, auto score_proba = clf.score(raw.Xv, raw.yv);
// }); auto pred_proba = clf.predict_proba(raw.Xv);
// auto score_voting = clf.score(raw.Xv, raw.yv); clf.setHyperparameters({
// auto pred_voting = clf.predict_proba(raw.Xv); {"predict_voting",true},
// REQUIRE(score_proba == Catch::Approx(0.97333).epsilon(raw.epsilon)); });
// REQUIRE(score_voting == Catch::Approx(0.98).epsilon(raw.epsilon)); auto score_voting = clf.score(raw.Xv, raw.yv);
// REQUIRE(pred_voting[83][2] == Catch::Approx(1.0).epsilon(raw.epsilon)); auto pred_voting = clf.predict_proba(raw.Xv);
// REQUIRE(pred_proba[83][2] == Catch::Approx(0.86121525).epsilon(raw.epsilon)); REQUIRE(score_proba == Catch::Approx(0.98).epsilon(raw.epsilon));
// REQUIRE(clf.dump_cpt() == ""); REQUIRE(score_voting == Catch::Approx(0.946667).epsilon(raw.epsilon));
// REQUIRE(clf.topological_order() == std::vector<std::string>()); REQUIRE(pred_voting[83][2] == Catch::Approx(0.53508).epsilon(raw.epsilon));
// } REQUIRE(pred_proba[83][2] == Catch::Approx(0.48394).epsilon(raw.epsilon));
// TEST_CASE("Order asc, desc & random", "[BoostAODE]") REQUIRE(clf.dump_cpt() == "");
// { REQUIRE(clf.topological_order() == std::vector<std::string>());
// auto raw = RawDatasets("glass", true); }
// std::map<std::string, double> scores{ TEST_CASE("Order asc, desc & random", "[BoostA2DE]")
// {"asc", 0.83645f }, { "desc", 0.84579f }, { "rand", 0.84112 } {
// }; auto raw = RawDatasets("glass", true);
// for (const std::string& order : { "asc", "desc", "rand" }) { std::map<std::string, double> scores{
// auto clf = bayesnet::BoostAODE(); {"asc", 0.752336f }, { "desc", 0.813084f }, { "rand", 0.850467 }
// clf.setHyperparameters({ };
// {"order", order}, for (const std::string& order : { "asc", "desc", "rand" }) {
// {"bisection", false}, auto clf = bayesnet::BoostA2DE();
// {"maxTolerance", 1}, clf.setHyperparameters({
// {"convergence", false}, {"order", order},
// }); {"bisection", false},
// clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing); {"maxTolerance", 1},
// auto score = clf.score(raw.Xv, raw.yv); {"convergence", false},
// auto scoret = clf.score(raw.Xt, raw.yt); });
// INFO("BoostAODE order: " + order); clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// REQUIRE(score == Catch::Approx(scores[order]).epsilon(raw.epsilon)); auto score = clf.score(raw.Xv, raw.yv);
// REQUIRE(scoret == Catch::Approx(scores[order]).epsilon(raw.epsilon)); auto scoret = clf.score(raw.Xt, raw.yt);
// } INFO("BoostA2DE order: " + order);
// } REQUIRE(score == Catch::Approx(scores[order]).epsilon(raw.epsilon));
// TEST_CASE("Oddities", "[BoostAODE]") REQUIRE(scoret == Catch::Approx(scores[order]).epsilon(raw.epsilon));
// { }
// auto clf = bayesnet::BoostAODE(); }
// auto raw = RawDatasets("iris", true); TEST_CASE("Oddities2", "[BoostA2DE]")
// auto bad_hyper = nlohmann::json{ {
// { { "order", "duck" } }, auto clf = bayesnet::BoostA2DE();
// { { "select_features", "duck" } }, auto raw = RawDatasets("iris", true);
// { { "maxTolerance", 0 } }, auto bad_hyper = nlohmann::json{
// { { "maxTolerance", 5 } }, { { "order", "duck" } },
// }; { { "select_features", "duck" } },
// for (const auto& hyper : bad_hyper.items()) { { { "maxTolerance", 0 } },
// INFO("BoostAODE hyper: " + hyper.value().dump()); { { "maxTolerance", 5 } },
// REQUIRE_THROWS_AS(clf.setHyperparameters(hyper.value()), std::invalid_argument); };
// } for (const auto& hyper : bad_hyper.items()) {
// REQUIRE_THROWS_AS(clf.setHyperparameters({ {"maxTolerance", 0 } }), std::invalid_argument); INFO("BoostA2DE hyper: " + hyper.value().dump());
// auto bad_hyper_fit = nlohmann::json{ REQUIRE_THROWS_AS(clf.setHyperparameters(hyper.value()), std::invalid_argument);
// { { "select_features","IWSS" }, { "threshold", -0.01 } }, }
// { { "select_features","IWSS" }, { "threshold", 0.51 } }, REQUIRE_THROWS_AS(clf.setHyperparameters({ {"maxTolerance", 0 } }), std::invalid_argument);
// { { "select_features","FCBF" }, { "threshold", 1e-8 } }, auto bad_hyper_fit = nlohmann::json{
// { { "select_features","FCBF" }, { "threshold", 1.01 } }, { { "select_features","IWSS" }, { "threshold", -0.01 } },
// }; { { "select_features","IWSS" }, { "threshold", 0.51 } },
// for (const auto& hyper : bad_hyper_fit.items()) { { { "select_features","FCBF" }, { "threshold", 1e-8 } },
// INFO("BoostAODE hyper: " + hyper.value().dump()); { { "select_features","FCBF" }, { "threshold", 1.01 } },
// clf.setHyperparameters(hyper.value()); };
// REQUIRE_THROWS_AS(clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing, std::invalid_argument); for (const auto& hyper : bad_hyper_fit.items()) {
// } INFO("BoostA2DE hyper: " + hyper.value().dump());
// } clf.setHyperparameters(hyper.value());
REQUIRE_THROWS_AS(clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing), std::invalid_argument);
// TEST_CASE("Bisection Best", "[BoostAODE]") }
// { }
// auto clf = bayesnet::BoostAODE(); TEST_CASE("No features selected", "[BoostA2DE]")
// auto raw = RawDatasets("kdd_JapaneseVowels", true, 1200, true, false); {
// clf.setHyperparameters({ // Check that the note "No features selected in initialization" is added
// {"bisection", true}, //
// {"maxTolerance", 3}, auto raw = RawDatasets("iris", true);
// {"convergence", true}, auto clf = bayesnet::BoostA2DE();
// {"block_update", false}, clf.setHyperparameters({ {"select_features","FCBF"}, {"threshold", 1 } });
// {"convergence_best", false}, clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// }); REQUIRE(clf.getNotes().size() == 1);
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing); REQUIRE(clf.getNotes()[0] == "No features selected in initialization");
// REQUIRE(clf.getNumberOfNodes() == 210); }
// REQUIRE(clf.getNumberOfEdges() == 378); TEST_CASE("Bisection Best", "[BoostA2DE]")
// REQUIRE(clf.getNotes().size() == 1); {
// REQUIRE(clf.getNotes().at(0) == "Number of models: 14"); auto clf = bayesnet::BoostA2DE();
// auto score = clf.score(raw.X_test, raw.y_test); auto raw = RawDatasets("kdd_JapaneseVowels", true, 1200, true, false);
// auto scoret = clf.score(raw.X_test, raw.y_test); clf.setHyperparameters({
// REQUIRE(score == Catch::Approx(0.991666675f).epsilon(raw.epsilon)); {"bisection", true},
// REQUIRE(scoret == Catch::Approx(0.991666675f).epsilon(raw.epsilon)); {"maxTolerance", 3},
// } {"convergence", true},
// TEST_CASE("Bisection Best vs Last", "[BoostAODE]") {"block_update", false},
// { {"convergence_best", false},
// auto raw = RawDatasets("kdd_JapaneseVowels", true, 1500, true, false); });
// auto clf = bayesnet::BoostAODE(true); clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
// auto hyperparameters = nlohmann::json{ REQUIRE(clf.getNumberOfNodes() == 480);
// {"bisection", true}, REQUIRE(clf.getNumberOfEdges() == 1152);
// {"maxTolerance", 3}, REQUIRE(clf.getNotes().size() == 3);
// {"convergence", true}, REQUIRE(clf.getNotes().at(0) == "Convergence threshold reached & 15 models eliminated");
// {"convergence_best", true}, REQUIRE(clf.getNotes().at(1) == "Pairs not used in train: 83");
// }; REQUIRE(clf.getNotes().at(2) == "Number of models: 32");
// clf.setHyperparameters(hyperparameters); auto score = clf.score(raw.X_test, raw.y_test);
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing); auto scoret = clf.score(raw.X_test, raw.y_test);
// auto score_best = clf.score(raw.X_test, raw.y_test); REQUIRE(score == Catch::Approx(0.966667f).epsilon(raw.epsilon));
// REQUIRE(score_best == Catch::Approx(0.980000019f).epsilon(raw.epsilon)); REQUIRE(scoret == Catch::Approx(0.966667f).epsilon(raw.epsilon));
// // Now we will set the hyperparameter to use the last accuracy }
// hyperparameters["convergence_best"] = false; TEST_CASE("Block Update", "[BoostA2DE]")
// clf.setHyperparameters(hyperparameters); {
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing); auto clf = bayesnet::BoostA2DE();
// auto score_last = clf.score(raw.X_test, raw.y_test); auto raw = RawDatasets("spambase", true, 500);
// REQUIRE(score_last == Catch::Approx(0.976666689f).epsilon(raw.epsilon)); clf.setHyperparameters({
// } {"bisection", true},
{"block_update", true},
// TEST_CASE("Block Update", "[BoostAODE]") {"maxTolerance", 3},
// { {"convergence", true},
// auto clf = bayesnet::BoostAODE(); });
// auto raw = RawDatasets("mfeat-factors", true, 500); clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing);
// clf.setHyperparameters({ REQUIRE(clf.getNumberOfNodes() == 58);
// {"bisection", true}, REQUIRE(clf.getNumberOfEdges() == 165);
// {"block_update", true}, REQUIRE(clf.getNotes().size() == 3);
// {"maxTolerance", 3}, REQUIRE(clf.getNotes()[0] == "Convergence threshold reached & 15 models eliminated");
// {"convergence", true}, REQUIRE(clf.getNotes()[1] == "Pairs not used in train: 1588");
// }); REQUIRE(clf.getNotes()[2] == "Number of models: 1");
// clf.fit(raw.X_train, raw.y_train, raw.features, raw.className, raw.states, raw.smoothing); auto score = clf.score(raw.X_test, raw.y_test);
// REQUIRE(clf.getNumberOfNodes() == 868); auto scoret = clf.score(raw.X_test, raw.y_test);
// REQUIRE(clf.getNumberOfEdges() == 1724); REQUIRE(score == Catch::Approx(1.0f).epsilon(raw.epsilon));
// REQUIRE(clf.getNotes().size() == 3); REQUIRE(scoret == Catch::Approx(1.0f).epsilon(raw.epsilon));
// REQUIRE(clf.getNotes()[0] == "Convergence threshold reached & 15 models eliminated"); //
// REQUIRE(clf.getNotes()[1] == "Used features in train: 19 of 216"); // std::cout << "Number of nodes " << clf.getNumberOfNodes() << std::endl;
// REQUIRE(clf.getNotes()[2] == "Number of models: 4"); // std::cout << "Number of edges " << clf.getNumberOfEdges() << std::endl;
// auto score = clf.score(raw.X_test, raw.y_test); // std::cout << "Notes size " << clf.getNotes().size() << std::endl;
// auto scoret = clf.score(raw.X_test, raw.y_test); // for (auto note : clf.getNotes()) {
// REQUIRE(score == Catch::Approx(0.99f).epsilon(raw.epsilon)); // std::cout << note << std::endl;
// REQUIRE(scoret == Catch::Approx(0.99f).epsilon(raw.epsilon)); // }
// // // std::cout << "Score " << score << std::endl;
// // std::cout << "Number of nodes " << clf.getNumberOfNodes() << std::endl; }
// // std::cout << "Number of edges " << clf.getNumberOfEdges() << std::endl; TEST_CASE("Test graph b2a2de", "[BoostA2DE]")
// // std::cout << "Notes size " << clf.getNotes().size() << std::endl; {
// // for (auto note : clf.getNotes()) { auto raw = RawDatasets("iris", true);
// // std::cout << note << std::endl; auto clf = bayesnet::BoostA2DE();
// // } clf.fit(raw.Xv, raw.yv, raw.features, raw.className, raw.states, raw.smoothing);
// // std::cout << "Score " << score << std::endl; auto graph = clf.graph();
// } REQUIRE(graph.size() == 26);
REQUIRE(graph[0] == "digraph BayesNet {\nlabel=<BayesNet BoostA2DE_0>\nfontsize=30\nfontcolor=blue\nlabelloc=t\nlayout=circo\n");
REQUIRE(graph[1] == "\"class\" [shape=circle, fontcolor=red, fillcolor=lightblue, style=filled ] \n");
}

72
tests/TestMST.cc Normal file
View File

@@ -0,0 +1,72 @@
// ***************************************************************
// SPDX-FileCopyrightText: Copyright 2024 Ricardo Montañana Gómez
// SPDX-FileType: SOURCE
// SPDX-License-Identifier: MIT
// ***************************************************************
#include <catch2/catch_test_macros.hpp>
#include <catch2/catch_approx.hpp>
#include <catch2/generators/catch_generators.hpp>
#include <catch2/matchers/catch_matchers.hpp>
#include <string>
#include <vector>
#include "TestUtils.h"
#include "bayesnet/utils/Mst.h"
TEST_CASE("MST::insertElement tests", "[MST]")
{
bayesnet::MST mst({}, torch::tensor({}), 0);
SECTION("Insert into an empty list")
{
std::list<int> variables;
mst.insertElement(variables, 5);
REQUIRE(variables == std::list<int>{5});
}
SECTION("Insert a non-duplicate element")
{
std::list<int> variables = { 1, 2, 3 };
mst.insertElement(variables, 4);
REQUIRE(variables == std::list<int>{4, 1, 2, 3});
}
SECTION("Insert a duplicate element")
{
std::list<int> variables = { 1, 2, 3 };
mst.insertElement(variables, 2);
REQUIRE(variables == std::list<int>{1, 2, 3});
}
}
TEST_CASE("MST::reorder tests", "[MST]")
{
bayesnet::MST mst({}, torch::tensor({}), 0);
SECTION("Reorder simple graph")
{
std::vector<std::pair<float, std::pair<int, int>>> T = { {2.0, {1, 2}}, {1.0, {0, 1}} };
auto result = mst.reorder(T, 0);
REQUIRE(result == std::vector<std::pair<int, int>>{{0, 1}, { 1, 2 }});
}
SECTION("Reorder with disconnected graph")
{
std::vector<std::pair<float, std::pair<int, int>>> T = { {2.0, {2, 3}}, {1.0, {0, 1}} };
auto result = mst.reorder(T, 0);
REQUIRE(result == std::vector<std::pair<int, int>>{{0, 1}, { 2, 3 }});
}
}
TEST_CASE("MST::maximumSpanningTree tests", "[MST]")
{
std::vector<std::string> features = { "A", "B", "C" };
auto weights = torch::tensor({
{0.0, 1.0, 2.0},
{1.0, 0.0, 3.0},
{2.0, 3.0, 0.0}
});
bayesnet::MST mst(features, weights, 0);
SECTION("MST of a complete graph")
{
auto result = mst.maximumSpanningTree();
REQUIRE(result.size() == 2); // Un MST para 3 nodos tiene 2 aristas
}
}

View File

@@ -16,7 +16,7 @@
#include "TestUtils.h" #include "TestUtils.h"
std::map<std::string, std::string> modules = { std::map<std::string, std::string> modules = {
{ "mdlp", "2.0.0" }, { "mdlp", "2.0.1" },
{ "Folding", "1.1.0" }, { "Folding", "1.1.0" },
{ "json", "3.11" }, { "json", "3.11" },
{ "ArffFiles", "1.1.0" } { "ArffFiles", "1.1.0" }

4811
tests/data/spambase.arff Executable file

File diff suppressed because it is too large Load Diff